summaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@gmail.com>2017-11-01 11:24:59 -0400
committerMatthew Heon <matthew.heon@gmail.com>2017-11-01 11:24:59 -0400
commita031b83a09a8628435317a03f199cdc18b78262f (patch)
treebc017a96769ce6de33745b8b0b1304ccf38e9df0 /vendor/github.com
parent2b74391cd5281f6fdf391ff8ad50fd1490f6bf89 (diff)
downloadpodman-a031b83a09a8628435317a03f199cdc18b78262f.tar.gz
podman-a031b83a09a8628435317a03f199cdc18b78262f.tar.bz2
podman-a031b83a09a8628435317a03f199cdc18b78262f.zip
Initial checkin from CRI-O repo
Signed-off-by: Matthew Heon <matthew.heon@gmail.com>
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/Azure/go-ansiterm/LICENSE21
-rw-r--r--vendor/github.com/Azure/go-ansiterm/README.md12
-rw-r--r--vendor/github.com/Azure/go-ansiterm/constants.go188
-rw-r--r--vendor/github.com/Azure/go-ansiterm/context.go7
-rw-r--r--vendor/github.com/Azure/go-ansiterm/csi_entry_state.go49
-rw-r--r--vendor/github.com/Azure/go-ansiterm/csi_param_state.go38
-rw-r--r--vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go36
-rw-r--r--vendor/github.com/Azure/go-ansiterm/escape_state.go47
-rw-r--r--vendor/github.com/Azure/go-ansiterm/event_handler.go90
-rw-r--r--vendor/github.com/Azure/go-ansiterm/ground_state.go24
-rw-r--r--vendor/github.com/Azure/go-ansiterm/osc_string_state.go31
-rw-r--r--vendor/github.com/Azure/go-ansiterm/parser.go136
-rw-r--r--vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go103
-rw-r--r--vendor/github.com/Azure/go-ansiterm/parser_actions.go122
-rw-r--r--vendor/github.com/Azure/go-ansiterm/states.go71
-rw-r--r--vendor/github.com/Azure/go-ansiterm/utilities.go21
-rw-r--r--vendor/github.com/Azure/go-ansiterm/winterm/ansi.go182
-rw-r--r--vendor/github.com/Azure/go-ansiterm/winterm/api.go322
-rw-r--r--vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go100
-rw-r--r--vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go101
-rw-r--r--vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go84
-rw-r--r--vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go118
-rw-r--r--vendor/github.com/Azure/go-ansiterm/winterm/utilities.go9
-rw-r--r--vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go726
-rw-r--r--vendor/github.com/BurntSushi/toml/COPYING14
-rw-r--r--vendor/github.com/BurntSushi/toml/README.md220
-rw-r--r--vendor/github.com/BurntSushi/toml/decode.go505
-rw-r--r--vendor/github.com/BurntSushi/toml/decode_meta.go122
-rw-r--r--vendor/github.com/BurntSushi/toml/doc.go27
-rw-r--r--vendor/github.com/BurntSushi/toml/encode.go549
-rw-r--r--vendor/github.com/BurntSushi/toml/encoding_types.go19
-rw-r--r--vendor/github.com/BurntSushi/toml/encoding_types_1.1.go18
-rw-r--r--vendor/github.com/BurntSushi/toml/lex.go871
-rw-r--r--vendor/github.com/BurntSushi/toml/parse.go493
-rw-r--r--vendor/github.com/BurntSushi/toml/type_check.go91
-rw-r--r--vendor/github.com/BurntSushi/toml/type_fields.go241
-rw-r--r--vendor/github.com/Microsoft/go-winio/LICENSE22
-rw-r--r--vendor/github.com/Microsoft/go-winio/README.md22
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE27
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/common.go344
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/reader.go1002
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go20
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go20
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go32
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/writer.go444
-rw-r--r--vendor/github.com/Microsoft/go-winio/backup.go280
-rw-r--r--vendor/github.com/Microsoft/go-winio/backuptar/noop.go4
-rw-r--r--vendor/github.com/Microsoft/go-winio/backuptar/tar.go439
-rw-r--r--vendor/github.com/Microsoft/go-winio/ea.go137
-rw-r--r--vendor/github.com/Microsoft/go-winio/file.go310
-rw-r--r--vendor/github.com/Microsoft/go-winio/fileinfo.go60
-rw-r--r--vendor/github.com/Microsoft/go-winio/pipe.go404
-rw-r--r--vendor/github.com/Microsoft/go-winio/privilege.go202
-rw-r--r--vendor/github.com/Microsoft/go-winio/reparse.go128
-rw-r--r--vendor/github.com/Microsoft/go-winio/sd.go98
-rw-r--r--vendor/github.com/Microsoft/go-winio/syscall.go3
-rw-r--r--vendor/github.com/Microsoft/go-winio/zsyscall_windows.go528
-rw-r--r--vendor/github.com/Microsoft/hcsshim/LICENSE21
-rw-r--r--vendor/github.com/Microsoft/hcsshim/README.md12
-rw-r--r--vendor/github.com/Microsoft/hcsshim/activatelayer.go28
-rw-r--r--vendor/github.com/Microsoft/hcsshim/baselayer.go183
-rw-r--r--vendor/github.com/Microsoft/hcsshim/callback.go79
-rw-r--r--vendor/github.com/Microsoft/hcsshim/cgo.go7
-rw-r--r--vendor/github.com/Microsoft/hcsshim/container.go794
-rw-r--r--vendor/github.com/Microsoft/hcsshim/createlayer.go27
-rw-r--r--vendor/github.com/Microsoft/hcsshim/createsandboxlayer.go35
-rw-r--r--vendor/github.com/Microsoft/hcsshim/deactivatelayer.go26
-rw-r--r--vendor/github.com/Microsoft/hcsshim/destroylayer.go27
-rw-r--r--vendor/github.com/Microsoft/hcsshim/errors.go239
-rw-r--r--vendor/github.com/Microsoft/hcsshim/expandsandboxsize.go26
-rw-r--r--vendor/github.com/Microsoft/hcsshim/exportlayer.go156
-rw-r--r--vendor/github.com/Microsoft/hcsshim/getlayermountpath.go55
-rw-r--r--vendor/github.com/Microsoft/hcsshim/getsharedbaseimages.go22
-rw-r--r--vendor/github.com/Microsoft/hcsshim/guid.go19
-rw-r--r--vendor/github.com/Microsoft/hcsshim/hcsshim.go166
-rw-r--r--vendor/github.com/Microsoft/hcsshim/hnsendpoint.go318
-rw-r--r--vendor/github.com/Microsoft/hcsshim/hnsfuncs.go40
-rw-r--r--vendor/github.com/Microsoft/hcsshim/hnsnetwork.go142
-rw-r--r--vendor/github.com/Microsoft/hcsshim/hnspolicy.go95
-rw-r--r--vendor/github.com/Microsoft/hcsshim/hnspolicylist.go196
-rw-r--r--vendor/github.com/Microsoft/hcsshim/importlayer.go212
-rw-r--r--vendor/github.com/Microsoft/hcsshim/interface.go187
-rw-r--r--vendor/github.com/Microsoft/hcsshim/layerexists.go30
-rw-r--r--vendor/github.com/Microsoft/hcsshim/layerutils.go111
-rw-r--r--vendor/github.com/Microsoft/hcsshim/legacy.go741
-rw-r--r--vendor/github.com/Microsoft/hcsshim/nametoguid.go20
-rw-r--r--vendor/github.com/Microsoft/hcsshim/preparelayer.go46
-rw-r--r--vendor/github.com/Microsoft/hcsshim/process.go384
-rw-r--r--vendor/github.com/Microsoft/hcsshim/processimage.go23
-rw-r--r--vendor/github.com/Microsoft/hcsshim/unpreparelayer.go27
-rw-r--r--vendor/github.com/Microsoft/hcsshim/utils.go33
-rw-r--r--vendor/github.com/Microsoft/hcsshim/version.go7
-rw-r--r--vendor/github.com/Microsoft/hcsshim/waithelper.go63
-rw-r--r--vendor/github.com/Microsoft/hcsshim/zhcsshim.go1042
-rw-r--r--vendor/github.com/PuerkitoBio/purell/LICENSE12
-rw-r--r--vendor/github.com/PuerkitoBio/purell/README.md187
-rw-r--r--vendor/github.com/PuerkitoBio/purell/purell.go379
-rw-r--r--vendor/github.com/PuerkitoBio/urlesc/LICENSE27
-rw-r--r--vendor/github.com/PuerkitoBio/urlesc/README.md16
-rw-r--r--vendor/github.com/PuerkitoBio/urlesc/urlesc.go180
-rw-r--r--vendor/github.com/beorn7/perks/LICENSE20
-rw-r--r--vendor/github.com/beorn7/perks/README.md31
-rw-r--r--vendor/github.com/beorn7/perks/quantile/stream.go292
-rw-r--r--vendor/github.com/blang/semver/LICENSE22
-rw-r--r--vendor/github.com/blang/semver/README.md191
-rw-r--r--vendor/github.com/blang/semver/json.go23
-rw-r--r--vendor/github.com/blang/semver/range.go416
-rw-r--r--vendor/github.com/blang/semver/semver.go418
-rw-r--r--vendor/github.com/blang/semver/sort.go28
-rw-r--r--vendor/github.com/blang/semver/sql.go30
-rw-r--r--vendor/github.com/buger/goterm/README.md119
-rw-r--r--vendor/github.com/buger/goterm/box.go122
-rw-r--r--vendor/github.com/buger/goterm/plot.go328
-rw-r--r--vendor/github.com/buger/goterm/table.go34
-rw-r--r--vendor/github.com/buger/goterm/terminal.go258
-rw-r--r--vendor/github.com/buger/goterm/terminal_nosysioctl.go12
-rw-r--r--vendor/github.com/buger/goterm/terminal_sysioctl.go36
-rw-r--r--vendor/github.com/containerd/cgroups/LICENSE201
-rw-r--r--vendor/github.com/containerd/cgroups/README.md112
-rw-r--r--vendor/github.com/containerd/cgroups/blkio.go323
-rw-r--r--vendor/github.com/containerd/cgroups/cgroup.go389
-rw-r--r--vendor/github.com/containerd/cgroups/control.go58
-rw-r--r--vendor/github.com/containerd/cgroups/cpu.go120
-rw-r--r--vendor/github.com/containerd/cgroups/cpuacct.go112
-rw-r--r--vendor/github.com/containerd/cgroups/cpuset.go139
-rw-r--r--vendor/github.com/containerd/cgroups/devices.go74
-rw-r--r--vendor/github.com/containerd/cgroups/errors.go31
-rw-r--r--vendor/github.com/containerd/cgroups/freezer.go69
-rw-r--r--vendor/github.com/containerd/cgroups/hierarchy.go4
-rw-r--r--vendor/github.com/containerd/cgroups/hugetlb.go92
-rw-r--r--vendor/github.com/containerd/cgroups/memory.go304
-rw-r--r--vendor/github.com/containerd/cgroups/named.go23
-rw-r--r--vendor/github.com/containerd/cgroups/net_cls.go42
-rw-r--r--vendor/github.com/containerd/cgroups/net_prio.go50
-rw-r--r--vendor/github.com/containerd/cgroups/paths.go88
-rw-r--r--vendor/github.com/containerd/cgroups/perf_event.go21
-rw-r--r--vendor/github.com/containerd/cgroups/pids.go69
-rw-r--r--vendor/github.com/containerd/cgroups/state.go12
-rw-r--r--vendor/github.com/containerd/cgroups/stats.go109
-rw-r--r--vendor/github.com/containerd/cgroups/subsystem.go94
-rw-r--r--vendor/github.com/containerd/cgroups/systemd.go101
-rw-r--r--vendor/github.com/containerd/cgroups/ticks.go10
-rw-r--r--vendor/github.com/containerd/cgroups/utils.go280
-rw-r--r--vendor/github.com/containerd/cgroups/v1.go65
-rw-r--r--vendor/github.com/containernetworking/cni/LICENSE202
-rw-r--r--vendor/github.com/containernetworking/cni/README.md176
-rw-r--r--vendor/github.com/containernetworking/cni/libcni/api.go219
-rw-r--r--vendor/github.com/containernetworking/cni/libcni/conf.go256
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/invoke/args.go82
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go53
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/invoke/exec.go95
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/invoke/find.go43
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go20
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go18
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go59
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/types/020/types.go135
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/types/args.go112
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/types/current/types.go300
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/types/types.go189
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/version/conf.go37
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/version/plugin.go81
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/version/reconcile.go49
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/version/version.go61
-rw-r--r--vendor/github.com/containernetworking/plugins/pkg/ns/README.md40
-rw-r--r--vendor/github.com/containernetworking/plugins/pkg/ns/ns.go178
-rw-r--r--vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go149
-rw-r--r--vendor/github.com/containernetworking/plugins/pkg/ns/ns_unspecified.go36
-rw-r--r--vendor/github.com/containers/image/LICENSE189
-rw-r--r--vendor/github.com/containers/image/README.md79
-rw-r--r--vendor/github.com/containers/image/copy/copy.go670
-rw-r--r--vendor/github.com/containers/image/copy/manifest.go102
-rw-r--r--vendor/github.com/containers/image/copy/progress_reader.go28
-rw-r--r--vendor/github.com/containers/image/copy/sign.go35
-rw-r--r--vendor/github.com/containers/image/directory/directory_dest.go149
-rw-r--r--vendor/github.com/containers/image/directory/directory_src.go81
-rw-r--r--vendor/github.com/containers/image/directory/directory_transport.go177
-rw-r--r--vendor/github.com/containers/image/directory/explicitfilepath/path.go56
-rw-r--r--vendor/github.com/containers/image/docker/archive/dest.go66
-rw-r--r--vendor/github.com/containers/image/docker/archive/src.go41
-rw-r--r--vendor/github.com/containers/image/docker/archive/transport.go153
-rw-r--r--vendor/github.com/containers/image/docker/daemon/daemon_dest.go128
-rw-r--r--vendor/github.com/containers/image/docker/daemon/daemon_src.go90
-rw-r--r--vendor/github.com/containers/image/docker/daemon/daemon_transport.go182
-rw-r--r--vendor/github.com/containers/image/docker/docker_client.go430
-rw-r--r--vendor/github.com/containers/image/docker/docker_image.go63
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_dest.go457
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_src.go379
-rw-r--r--vendor/github.com/containers/image/docker/docker_transport.go160
-rw-r--r--vendor/github.com/containers/image/docker/lookaside.go202
-rw-r--r--vendor/github.com/containers/image/docker/policyconfiguration/naming.go56
-rw-r--r--vendor/github.com/containers/image/docker/reference/README.md2
-rw-r--r--vendor/github.com/containers/image/docker/reference/helpers.go42
-rw-r--r--vendor/github.com/containers/image/docker/reference/normalize.go152
-rw-r--r--vendor/github.com/containers/image/docker/reference/reference.go433
-rw-r--r--vendor/github.com/containers/image/docker/reference/regexp.go143
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/dest.go258
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/doc.go3
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/src.go360
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/types.go40
-rw-r--r--vendor/github.com/containers/image/docker/wwwauthenticate.go159
-rw-r--r--vendor/github.com/containers/image/image/docker_list.go63
-rw-r--r--vendor/github.com/containers/image/image/docker_schema1.go245
-rw-r--r--vendor/github.com/containers/image/image/docker_schema2.go353
-rw-r--r--vendor/github.com/containers/image/image/manifest.go118
-rw-r--r--vendor/github.com/containers/image/image/memory.go80
-rw-r--r--vendor/github.com/containers/image/image/oci.go195
-rw-r--r--vendor/github.com/containers/image/image/sourced.go94
-rw-r--r--vendor/github.com/containers/image/image/unparsed.go92
-rw-r--r--vendor/github.com/containers/image/manifest/docker_schema1.go212
-rw-r--r--vendor/github.com/containers/image/manifest/docker_schema2.go241
-rw-r--r--vendor/github.com/containers/image/manifest/manifest.go217
-rw-r--r--vendor/github.com/containers/image/manifest/oci.go108
-rw-r--r--vendor/github.com/containers/image/oci/archive/oci_dest.go131
-rw-r--r--vendor/github.com/containers/image/oci/archive/oci_src.go92
-rw-r--r--vendor/github.com/containers/image/oci/archive/oci_transport.go225
-rw-r--r--vendor/github.com/containers/image/oci/layout/oci_dest.go233
-rw-r--r--vendor/github.com/containers/image/oci/layout/oci_src.go147
-rw-r--r--vendor/github.com/containers/image/oci/layout/oci_transport.go277
-rw-r--r--vendor/github.com/containers/image/openshift/openshift-copies.go1174
-rw-r--r--vendor/github.com/containers/image/openshift/openshift.go539
-rw-r--r--vendor/github.com/containers/image/openshift/openshift_transport.go155
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_dest.go332
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_transport.go226
-rw-r--r--vendor/github.com/containers/image/pkg/compression/compression.go67
-rw-r--r--vendor/github.com/containers/image/pkg/docker/config/config.go295
-rw-r--r--vendor/github.com/containers/image/pkg/strslice/README.md1
-rw-r--r--vendor/github.com/containers/image/pkg/strslice/strslice.go30
-rw-r--r--vendor/github.com/containers/image/pkg/sysregistries/system_registries.go86
-rw-r--r--vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go102
-rw-r--r--vendor/github.com/containers/image/signature/docker.go65
-rw-r--r--vendor/github.com/containers/image/signature/json.go88
-rw-r--r--vendor/github.com/containers/image/signature/mechanism.go85
-rw-r--r--vendor/github.com/containers/image/signature/mechanism_gpgme.go175
-rw-r--r--vendor/github.com/containers/image/signature/mechanism_openpgp.go159
-rw-r--r--vendor/github.com/containers/image/signature/policy_config.go684
-rw-r--r--vendor/github.com/containers/image/signature/policy_eval.go289
-rw-r--r--vendor/github.com/containers/image/signature/policy_eval_baselayer.go18
-rw-r--r--vendor/github.com/containers/image/signature/policy_eval_signedby.go131
-rw-r--r--vendor/github.com/containers/image/signature/policy_eval_simple.go28
-rw-r--r--vendor/github.com/containers/image/signature/policy_reference_match.go101
-rw-r--r--vendor/github.com/containers/image/signature/policy_types.go152
-rw-r--r--vendor/github.com/containers/image/signature/signature.go280
-rw-r--r--vendor/github.com/containers/image/storage/storage_image.go878
-rw-r--r--vendor/github.com/containers/image/storage/storage_reference.go178
-rw-r--r--vendor/github.com/containers/image/storage/storage_transport.go442
-rw-r--r--vendor/github.com/containers/image/transports/alltransports/alltransports.go34
-rw-r--r--vendor/github.com/containers/image/transports/alltransports/ostree.go8
-rw-r--r--vendor/github.com/containers/image/transports/alltransports/ostree_stub.go9
-rw-r--r--vendor/github.com/containers/image/transports/stub.go36
-rw-r--r--vendor/github.com/containers/image/transports/transports.go90
-rw-r--r--vendor/github.com/containers/image/types/types.go354
-rw-r--r--vendor/github.com/containers/image/vendor.conf40
-rw-r--r--vendor/github.com/containers/image/version/version.go18
-rw-r--r--vendor/github.com/containers/storage/LICENSE191
-rw-r--r--vendor/github.com/containers/storage/NOTICE19
-rw-r--r--vendor/github.com/containers/storage/README.md43
-rw-r--r--vendor/github.com/containers/storage/containers.go531
-rw-r--r--vendor/github.com/containers/storage/containers_ffjson.go1194
-rw-r--r--vendor/github.com/containers/storage/drivers/aufs/aufs.go691
-rw-r--r--vendor/github.com/containers/storage/drivers/aufs/dirs.go64
-rw-r--r--vendor/github.com/containers/storage/drivers/aufs/mount.go21
-rw-r--r--vendor/github.com/containers/storage/drivers/aufs/mount_linux.go7
-rw-r--r--vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go12
-rw-r--r--vendor/github.com/containers/storage/drivers/btrfs/btrfs.go677
-rw-r--r--vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/btrfs/version.go26
-rw-r--r--vendor/github.com/containers/storage/drivers/btrfs/version_none.go14
-rw-r--r--vendor/github.com/containers/storage/drivers/counter.go59
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/device_setup.go236
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/deviceset.go2825
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go106
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/driver.go240
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/mount.go88
-rw-r--r--vendor/github.com/containers/storage/drivers/driver.go285
-rw-r--r--vendor/github.com/containers/storage/drivers/driver_freebsd.go23
-rw-r--r--vendor/github.com/containers/storage/drivers/driver_linux.go135
-rw-r--r--vendor/github.com/containers/storage/drivers/driver_solaris.go96
-rw-r--r--vendor/github.com/containers/storage/drivers/driver_unsupported.go15
-rw-r--r--vendor/github.com/containers/storage/drivers/driver_windows.go14
-rw-r--r--vendor/github.com/containers/storage/drivers/fsdiff.go169
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/check.go102
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/mount.go89
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go775
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/randomid.go81
-rw-r--r--vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go18
-rw-r--r--vendor/github.com/containers/storage/drivers/quota/projectquota.go337
-rw-r--r--vendor/github.com/containers/storage/drivers/register/register_aufs.go8
-rw-r--r--vendor/github.com/containers/storage/drivers/register/register_btrfs.go8
-rw-r--r--vendor/github.com/containers/storage/drivers/register/register_devicemapper.go8
-rw-r--r--vendor/github.com/containers/storage/drivers/register/register_overlay.go8
-rw-r--r--vendor/github.com/containers/storage/drivers/register/register_vfs.go6
-rw-r--r--vendor/github.com/containers/storage/drivers/register/register_windows.go6
-rw-r--r--vendor/github.com/containers/storage/drivers/register/register_zfs.go8
-rw-r--r--vendor/github.com/containers/storage/drivers/vfs/driver.go155
-rw-r--r--vendor/github.com/containers/storage/drivers/windows/windows.go965
-rw-r--r--vendor/github.com/containers/storage/drivers/zfs/zfs.go426
-rw-r--r--vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go39
-rw-r--r--vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go28
-rw-r--r--vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go59
-rw-r--r--vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go11
-rw-r--r--vendor/github.com/containers/storage/errors.go56
-rw-r--r--vendor/github.com/containers/storage/images.go576
-rw-r--r--vendor/github.com/containers/storage/images_ffjson.go1148
-rw-r--r--vendor/github.com/containers/storage/layers.go1040
-rw-r--r--vendor/github.com/containers/storage/layers_ffjson.go1713
-rw-r--r--vendor/github.com/containers/storage/lockfile.go184
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/README.md1
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go1251
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_linux.go92
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_other.go7
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_unix.go122
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_windows.go79
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/changes.go441
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/changes_linux.go313
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/changes_other.go97
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/changes_unix.go37
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/changes_windows.go30
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/copy.go461
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/copy_unix.go11
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/copy_windows.go9
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/diff.go256
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/time_linux.go16
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/time_unsupported.go16
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/whiteouts.go23
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/wrap.go59
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive.go70
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go86
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go22
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go108
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go12
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/diff.go23
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go130
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go45
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go28
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go821
-rw-r--r--vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go121
-rw-r--r--vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go252
-rw-r--r--vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go31
-rw-r--r--vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go15
-rw-r--r--vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go28
-rw-r--r--vendor/github.com/containers/storage/pkg/devicemapper/log.go11
-rw-r--r--vendor/github.com/containers/storage/pkg/directory/directory.go26
-rw-r--r--vendor/github.com/containers/storage/pkg/directory/directory_unix.go48
-rw-r--r--vendor/github.com/containers/storage/pkg/directory/directory_windows.go37
-rw-r--r--vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go20
-rw-r--r--vendor/github.com/containers/storage/pkg/fileutils/fileutils.go298
-rw-r--r--vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go27
-rw-r--r--vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go7
-rw-r--r--vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go22
-rw-r--r--vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go7
-rw-r--r--vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go88
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go23
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_others.go13
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go34
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go24
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools.go279
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go204
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go25
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go164
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go12
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/utils_unix.go32
-rw-r--r--vendor/github.com/containers/storage/pkg/ioutils/buffer.go51
-rw-r--r--vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go186
-rw-r--r--vendor/github.com/containers/storage/pkg/ioutils/fswriters.go162
-rw-r--r--vendor/github.com/containers/storage/pkg/ioutils/readers.go154
-rw-r--r--vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go10
-rw-r--r--vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go18
-rw-r--r--vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go92
-rw-r--r--vendor/github.com/containers/storage/pkg/ioutils/writers.go66
-rw-r--r--vendor/github.com/containers/storage/pkg/locker/README.md65
-rw-r--r--vendor/github.com/containers/storage/pkg/locker/locker.go112
-rw-r--r--vendor/github.com/containers/storage/pkg/longpath/longpath.go26
-rw-r--r--vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go157
-rw-r--r--vendor/github.com/containers/storage/pkg/loopback/ioctl.go53
-rw-r--r--vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go52
-rw-r--r--vendor/github.com/containers/storage/pkg/loopback/loopback.go63
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/flags.go149
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go49
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/flags_linux.go87
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go31
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/mount.go106
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go60
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/mounter_linux.go57
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go34
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go11
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/mountinfo.go54
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go41
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go95
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/mountinfo_solaris.go37
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go12
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/mountinfo_windows.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go69
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go58
-rw-r--r--vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go74
-rw-r--r--vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go56
-rw-r--r--vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go45
-rw-r--r--vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go70
-rw-r--r--vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go17
-rw-r--r--vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go14
-rw-r--r--vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go18
-rw-r--r--vendor/github.com/containers/storage/pkg/parsers/parsers.go69
-rw-r--r--vendor/github.com/containers/storage/pkg/pools/pools.go119
-rw-r--r--vendor/github.com/containers/storage/pkg/promise/promise.go11
-rw-r--r--vendor/github.com/containers/storage/pkg/reexec/README.md5
-rw-r--r--vendor/github.com/containers/storage/pkg/reexec/command_linux.go30
-rw-r--r--vendor/github.com/containers/storage/pkg/reexec/command_unix.go23
-rw-r--r--vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go12
-rw-r--r--vendor/github.com/containers/storage/pkg/reexec/command_windows.go23
-rw-r--r--vendor/github.com/containers/storage/pkg/reexec/reexec.go47
-rw-r--r--vendor/github.com/containers/storage/pkg/stringid/README.md1
-rw-r--r--vendor/github.com/containers/storage/pkg/stringid/stringid.go99
-rw-r--r--vendor/github.com/containers/storage/pkg/system/chtimes.go35
-rw-r--r--vendor/github.com/containers/storage/pkg/system/chtimes_unix.go14
-rw-r--r--vendor/github.com/containers/storage/pkg/system/chtimes_windows.go28
-rw-r--r--vendor/github.com/containers/storage/pkg/system/errors.go10
-rw-r--r--vendor/github.com/containers/storage/pkg/system/exitcode.go33
-rw-r--r--vendor/github.com/containers/storage/pkg/system/filesys.go67
-rw-r--r--vendor/github.com/containers/storage/pkg/system/filesys_windows.go298
-rw-r--r--vendor/github.com/containers/storage/pkg/system/init.go22
-rw-r--r--vendor/github.com/containers/storage/pkg/system/init_windows.go17
-rw-r--r--vendor/github.com/containers/storage/pkg/system/lcow_unix.go8
-rw-r--r--vendor/github.com/containers/storage/pkg/system/lcow_windows.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/system/lstat_unix.go19
-rw-r--r--vendor/github.com/containers/storage/pkg/system/lstat_windows.go14
-rw-r--r--vendor/github.com/containers/storage/pkg/system/meminfo.go17
-rw-r--r--vendor/github.com/containers/storage/pkg/system/meminfo_linux.go65
-rw-r--r--vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go129
-rw-r--r--vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go8
-rw-r--r--vendor/github.com/containers/storage/pkg/system/meminfo_windows.go45
-rw-r--r--vendor/github.com/containers/storage/pkg/system/mknod.go22
-rw-r--r--vendor/github.com/containers/storage/pkg/system/mknod_windows.go13
-rw-r--r--vendor/github.com/containers/storage/pkg/system/path.go21
-rw-r--r--vendor/github.com/containers/storage/pkg/system/path_unix.go9
-rw-r--r--vendor/github.com/containers/storage/pkg/system/path_windows.go33
-rw-r--r--vendor/github.com/containers/storage/pkg/system/process_unix.go24
-rw-r--r--vendor/github.com/containers/storage/pkg/system/rm.go80
-rw-r--r--vendor/github.com/containers/storage/pkg/system/stat_darwin.go13
-rw-r--r--vendor/github.com/containers/storage/pkg/system/stat_freebsd.go13
-rw-r--r--vendor/github.com/containers/storage/pkg/system/stat_linux.go19
-rw-r--r--vendor/github.com/containers/storage/pkg/system/stat_openbsd.go13
-rw-r--r--vendor/github.com/containers/storage/pkg/system/stat_solaris.go13
-rw-r--r--vendor/github.com/containers/storage/pkg/system/stat_unix.go60
-rw-r--r--vendor/github.com/containers/storage/pkg/system/stat_windows.go49
-rw-r--r--vendor/github.com/containers/storage/pkg/system/syscall_unix.go17
-rw-r--r--vendor/github.com/containers/storage/pkg/system/syscall_windows.go122
-rw-r--r--vendor/github.com/containers/storage/pkg/system/umask.go13
-rw-r--r--vendor/github.com/containers/storage/pkg/system/umask_windows.go9
-rw-r--r--vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go24
-rw-r--r--vendor/github.com/containers/storage/pkg/system/utimes_linux.go25
-rw-r--r--vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go10
-rw-r--r--vendor/github.com/containers/storage/pkg/system/xattrs_linux.go29
-rw-r--r--vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go13
-rw-r--r--vendor/github.com/containers/storage/pkg/truncindex/truncindex.go137
-rw-r--r--vendor/github.com/containers/storage/stat_mtim.go11
-rw-r--r--vendor/github.com/containers/storage/stat_mtimespec.go11
-rw-r--r--vendor/github.com/containers/storage/store.go2390
-rw-r--r--vendor/github.com/containers/storage/vendor.conf21
-rw-r--r--vendor/github.com/coreos/go-systemd/LICENSE191
-rw-r--r--vendor/github.com/coreos/go-systemd/README.md54
-rw-r--r--vendor/github.com/coreos/go-systemd/daemon/sdnotify.go63
-rw-r--r--vendor/github.com/coreos/go-systemd/daemon/watchdog.go72
-rw-r--r--vendor/github.com/coreos/go-systemd/dbus/dbus.go213
-rw-r--r--vendor/github.com/coreos/go-systemd/dbus/methods.go565
-rw-r--r--vendor/github.com/coreos/go-systemd/dbus/properties.go237
-rw-r--r--vendor/github.com/coreos/go-systemd/dbus/set.go47
-rw-r--r--vendor/github.com/coreos/go-systemd/dbus/subscription.go250
-rw-r--r--vendor/github.com/coreos/go-systemd/dbus/subscription_set.go57
-rw-r--r--vendor/github.com/coreos/go-systemd/util/util.go90
-rw-r--r--vendor/github.com/coreos/go-systemd/util/util_cgo.go174
-rw-r--r--vendor/github.com/coreos/go-systemd/util/util_stub.go23
-rw-r--r--vendor/github.com/coreos/pkg/LICENSE202
-rw-r--r--vendor/github.com/coreos/pkg/NOTICE5
-rw-r--r--vendor/github.com/coreos/pkg/README.md4
-rw-r--r--vendor/github.com/coreos/pkg/dlopen/dlopen.go82
-rw-r--r--vendor/github.com/coreos/pkg/dlopen/dlopen_example.go56
-rw-r--r--vendor/github.com/cri-o/ocicni/LICENSE191
-rw-r--r--vendor/github.com/cri-o/ocicni/README.md3
-rw-r--r--vendor/github.com/cri-o/ocicni/pkg/ocicni/noop.go24
-rw-r--r--vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go421
-rw-r--r--vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go62
-rw-r--r--vendor/github.com/cri-o/ocicni/pkg/ocicni/util.go32
-rw-r--r--vendor/github.com/davecgh/go-spew/LICENSE15
-rw-r--r--vendor/github.com/davecgh/go-spew/README.md205
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/bypass.go152
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/bypasssafe.go38
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/common.go341
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/config.go306
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/doc.go211
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/dump.go509
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/format.go419
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/spew.go148
-rw-r--r--vendor/github.com/dgrijalva/jwt-go/LICENSE7
-rw-r--r--vendor/github.com/dgrijalva/jwt-go/README.md85
-rw-r--r--vendor/github.com/dgrijalva/jwt-go/claims.go134
-rw-r--r--vendor/github.com/dgrijalva/jwt-go/doc.go4
-rw-r--r--vendor/github.com/dgrijalva/jwt-go/ecdsa.go147
-rw-r--r--vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go67
-rw-r--r--vendor/github.com/dgrijalva/jwt-go/errors.go63
-rw-r--r--vendor/github.com/dgrijalva/jwt-go/hmac.go94
-rw-r--r--vendor/github.com/dgrijalva/jwt-go/map_claims.go94
-rw-r--r--vendor/github.com/dgrijalva/jwt-go/none.go52
-rw-r--r--vendor/github.com/dgrijalva/jwt-go/parser.go128
-rw-r--r--vendor/github.com/dgrijalva/jwt-go/rsa.go100
-rw-r--r--vendor/github.com/dgrijalva/jwt-go/rsa_pss.go126
-rw-r--r--vendor/github.com/dgrijalva/jwt-go/rsa_utils.go69
-rw-r--r--vendor/github.com/dgrijalva/jwt-go/signing_method.go35
-rw-r--r--vendor/github.com/dgrijalva/jwt-go/token.go108
-rw-r--r--vendor/github.com/docker/distribution/LICENSE202
-rw-r--r--vendor/github.com/docker/distribution/README.md130
-rw-r--r--vendor/github.com/docker/distribution/blobs.go257
-rw-r--r--vendor/github.com/docker/distribution/context/context.go85
-rw-r--r--vendor/github.com/docker/distribution/context/doc.go89
-rw-r--r--vendor/github.com/docker/distribution/context/http.go366
-rw-r--r--vendor/github.com/docker/distribution/context/logger.go116
-rw-r--r--vendor/github.com/docker/distribution/context/trace.go104
-rw-r--r--vendor/github.com/docker/distribution/context/util.go24
-rw-r--r--vendor/github.com/docker/distribution/context/version.go16
-rw-r--r--vendor/github.com/docker/distribution/digestset/set.go247
-rw-r--r--vendor/github.com/docker/distribution/doc.go7
-rw-r--r--vendor/github.com/docker/distribution/errors.go115
-rw-r--r--vendor/github.com/docker/distribution/manifests.go125
-rw-r--r--vendor/github.com/docker/distribution/reference/helpers.go42
-rw-r--r--vendor/github.com/docker/distribution/reference/normalize.go170
-rw-r--r--vendor/github.com/docker/distribution/reference/reference.go433
-rw-r--r--vendor/github.com/docker/distribution/reference/regexp.go143
-rw-r--r--vendor/github.com/docker/distribution/registry.go97
-rw-r--r--vendor/github.com/docker/distribution/registry/api/errcode/errors.go267
-rw-r--r--vendor/github.com/docker/distribution/registry/api/errcode/handler.go44
-rw-r--r--vendor/github.com/docker/distribution/registry/api/errcode/register.go138
-rw-r--r--vendor/github.com/docker/distribution/registry/api/v2/descriptors.go1596
-rw-r--r--vendor/github.com/docker/distribution/registry/api/v2/doc.go9
-rw-r--r--vendor/github.com/docker/distribution/registry/api/v2/errors.go136
-rw-r--r--vendor/github.com/docker/distribution/registry/api/v2/headerparser.go161
-rw-r--r--vendor/github.com/docker/distribution/registry/api/v2/routes.go49
-rw-r--r--vendor/github.com/docker/distribution/registry/api/v2/urls.go266
-rw-r--r--vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go27
-rw-r--r--vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go237
-rw-r--r--vendor/github.com/docker/distribution/registry/client/blob_writer.go162
-rw-r--r--vendor/github.com/docker/distribution/registry/client/errors.go139
-rw-r--r--vendor/github.com/docker/distribution/registry/client/repository.go853
-rw-r--r--vendor/github.com/docker/distribution/registry/client/transport/http_reader.go251
-rw-r--r--vendor/github.com/docker/distribution/registry/client/transport/transport.go147
-rw-r--r--vendor/github.com/docker/distribution/registry/storage/cache/cache.go35
-rw-r--r--vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go101
-rw-r--r--vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go179
-rw-r--r--vendor/github.com/docker/distribution/tags.go27
-rw-r--r--vendor/github.com/docker/distribution/uuid/uuid.go126
-rw-r--r--vendor/github.com/docker/distribution/vendor.conf43
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/LICENSE20
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/README.md82
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/client/client.go121
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/client/command.go56
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go186
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/credentials/error.go102
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/credentials/helper.go14
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/credentials/version.go4
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.c228
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.go196
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.h14
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_go18.go13
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_non_go18.go41
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.c162
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.go118
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.h13
-rw-r--r--vendor/github.com/docker/docker/LICENSE191
-rw-r--r--vendor/github.com/docker/docker/NOTICE19
-rw-r--r--vendor/github.com/docker/docker/README.md90
-rw-r--r--vendor/github.com/docker/docker/api/README.md42
-rw-r--r--vendor/github.com/docker/docker/api/common.go65
-rw-r--r--vendor/github.com/docker/docker/api/common_unix.go6
-rw-r--r--vendor/github.com/docker/docker/api/common_windows.go8
-rw-r--r--vendor/github.com/docker/docker/api/names.go9
-rw-r--r--vendor/github.com/docker/docker/api/types/auth.go22
-rw-r--r--vendor/github.com/docker/docker/api/types/blkiodev/blkio.go23
-rw-r--r--vendor/github.com/docker/docker/api/types/client.go389
-rw-r--r--vendor/github.com/docker/docker/api/types/configs.go70
-rw-r--r--vendor/github.com/docker/docker/api/types/container/config.go69
-rw-r--r--vendor/github.com/docker/docker/api/types/container/container_changes.go21
-rw-r--r--vendor/github.com/docker/docker/api/types/container/container_create.go21
-rw-r--r--vendor/github.com/docker/docker/api/types/container/container_top.go21
-rw-r--r--vendor/github.com/docker/docker/api/types/container/container_update.go17
-rw-r--r--vendor/github.com/docker/docker/api/types/container/container_wait.go17
-rw-r--r--vendor/github.com/docker/docker/api/types/container/host_config.go380
-rw-r--r--vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go41
-rw-r--r--vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go54
-rw-r--r--vendor/github.com/docker/docker/api/types/container/waitcondition.go22
-rw-r--r--vendor/github.com/docker/docker/api/types/error_response.go13
-rw-r--r--vendor/github.com/docker/docker/api/types/events/events.go52
-rw-r--r--vendor/github.com/docker/docker/api/types/filters/parse.go310
-rw-r--r--vendor/github.com/docker/docker/api/types/graph_driver_data.go17
-rw-r--r--vendor/github.com/docker/docker/api/types/id_response.go13
-rw-r--r--vendor/github.com/docker/docker/api/types/image/image_history.go37
-rw-r--r--vendor/github.com/docker/docker/api/types/image_delete_response_item.go15
-rw-r--r--vendor/github.com/docker/docker/api/types/image_summary.go49
-rw-r--r--vendor/github.com/docker/docker/api/types/mount/mount.go128
-rw-r--r--vendor/github.com/docker/docker/api/types/network/network.go108
-rw-r--r--vendor/github.com/docker/docker/api/types/plugin.go200
-rw-r--r--vendor/github.com/docker/docker/api/types/plugin_device.go25
-rw-r--r--vendor/github.com/docker/docker/api/types/plugin_env.go25
-rw-r--r--vendor/github.com/docker/docker/api/types/plugin_interface_type.go21
-rw-r--r--vendor/github.com/docker/docker/api/types/plugin_mount.go37
-rw-r--r--vendor/github.com/docker/docker/api/types/plugin_responses.go71
-rw-r--r--vendor/github.com/docker/docker/api/types/port.go23
-rw-r--r--vendor/github.com/docker/docker/api/types/registry/authenticate.go21
-rw-r--r--vendor/github.com/docker/docker/api/types/registry/registry.go119
-rw-r--r--vendor/github.com/docker/docker/api/types/seccomp.go93
-rw-r--r--vendor/github.com/docker/docker/api/types/service_update_response.go12
-rw-r--r--vendor/github.com/docker/docker/api/types/stats.go181
-rw-r--r--vendor/github.com/docker/docker/api/types/strslice/strslice.go30
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/common.go40
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/config.go31
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/container.go72
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/network.go119
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/node.go115
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/runtime.go19
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go3
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go712
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto18
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/secret.go32
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/service.go124
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/swarm.go217
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/task.go184
-rw-r--r--vendor/github.com/docker/docker/api/types/time/duration_convert.go12
-rw-r--r--vendor/github.com/docker/docker/api/types/time/timestamp.go124
-rw-r--r--vendor/github.com/docker/docker/api/types/types.go575
-rw-r--r--vendor/github.com/docker/docker/api/types/versions/README.md14
-rw-r--r--vendor/github.com/docker/docker/api/types/versions/compare.go62
-rw-r--r--vendor/github.com/docker/docker/api/types/volume.go69
-rw-r--r--vendor/github.com/docker/docker/api/types/volume/volumes_create.go29
-rw-r--r--vendor/github.com/docker/docker/api/types/volume/volumes_list.go23
-rw-r--r--vendor/github.com/docker/docker/client/README.md35
-rw-r--r--vendor/github.com/docker/docker/client/build_prune.go30
-rw-r--r--vendor/github.com/docker/docker/client/checkpoint_create.go13
-rw-r--r--vendor/github.com/docker/docker/client/checkpoint_delete.go20
-rw-r--r--vendor/github.com/docker/docker/client/checkpoint_list.go32
-rw-r--r--vendor/github.com/docker/docker/client/client.go314
-rw-r--r--vendor/github.com/docker/docker/client/client_unix.go6
-rw-r--r--vendor/github.com/docker/docker/client/client_windows.go4
-rw-r--r--vendor/github.com/docker/docker/client/config_create.go25
-rw-r--r--vendor/github.com/docker/docker/client/config_inspect.go37
-rw-r--r--vendor/github.com/docker/docker/client/config_list.go38
-rw-r--r--vendor/github.com/docker/docker/client/config_remove.go13
-rw-r--r--vendor/github.com/docker/docker/client/config_update.go21
-rw-r--r--vendor/github.com/docker/docker/client/container_attach.go57
-rw-r--r--vendor/github.com/docker/docker/client/container_commit.go55
-rw-r--r--vendor/github.com/docker/docker/client/container_copy.go102
-rw-r--r--vendor/github.com/docker/docker/client/container_create.go56
-rw-r--r--vendor/github.com/docker/docker/client/container_diff.go23
-rw-r--r--vendor/github.com/docker/docker/client/container_exec.go54
-rw-r--r--vendor/github.com/docker/docker/client/container_export.go20
-rw-r--r--vendor/github.com/docker/docker/client/container_inspect.go54
-rw-r--r--vendor/github.com/docker/docker/client/container_kill.go17
-rw-r--r--vendor/github.com/docker/docker/client/container_list.go56
-rw-r--r--vendor/github.com/docker/docker/client/container_logs.go72
-rw-r--r--vendor/github.com/docker/docker/client/container_pause.go10
-rw-r--r--vendor/github.com/docker/docker/client/container_prune.go36
-rw-r--r--vendor/github.com/docker/docker/client/container_remove.go27
-rw-r--r--vendor/github.com/docker/docker/client/container_rename.go16
-rw-r--r--vendor/github.com/docker/docker/client/container_resize.go29
-rw-r--r--vendor/github.com/docker/docker/client/container_restart.go22
-rw-r--r--vendor/github.com/docker/docker/client/container_start.go24
-rw-r--r--vendor/github.com/docker/docker/client/container_stats.go26
-rw-r--r--vendor/github.com/docker/docker/client/container_stop.go21
-rw-r--r--vendor/github.com/docker/docker/client/container_top.go28
-rw-r--r--vendor/github.com/docker/docker/client/container_unpause.go10
-rw-r--r--vendor/github.com/docker/docker/client/container_update.go22
-rw-r--r--vendor/github.com/docker/docker/client/container_wait.go84
-rw-r--r--vendor/github.com/docker/docker/client/disk_usage.go26
-rw-r--r--vendor/github.com/docker/docker/client/distribution_inspect.go35
-rw-r--r--vendor/github.com/docker/docker/client/errors.go300
-rw-r--r--vendor/github.com/docker/docker/client/events.go102
-rw-r--r--vendor/github.com/docker/docker/client/hijack.go208
-rw-r--r--vendor/github.com/docker/docker/client/image_build.go128
-rw-r--r--vendor/github.com/docker/docker/client/image_create.go34
-rw-r--r--vendor/github.com/docker/docker/client/image_history.go22
-rw-r--r--vendor/github.com/docker/docker/client/image_import.go37
-rw-r--r--vendor/github.com/docker/docker/client/image_inspect.go33
-rw-r--r--vendor/github.com/docker/docker/client/image_list.go45
-rw-r--r--vendor/github.com/docker/docker/client/image_load.go30
-rw-r--r--vendor/github.com/docker/docker/client/image_prune.go36
-rw-r--r--vendor/github.com/docker/docker/client/image_pull.go61
-rw-r--r--vendor/github.com/docker/docker/client/image_push.go56
-rw-r--r--vendor/github.com/docker/docker/client/image_remove.go31
-rw-r--r--vendor/github.com/docker/docker/client/image_save.go22
-rw-r--r--vendor/github.com/docker/docker/client/image_search.go51
-rw-r--r--vendor/github.com/docker/docker/client/image_tag.go37
-rw-r--r--vendor/github.com/docker/docker/client/info.go26
-rw-r--r--vendor/github.com/docker/docker/client/interface.go194
-rw-r--r--vendor/github.com/docker/docker/client/interface_experimental.go17
-rw-r--r--vendor/github.com/docker/docker/client/interface_stable.go10
-rw-r--r--vendor/github.com/docker/docker/client/login.go29
-rw-r--r--vendor/github.com/docker/docker/client/network_connect.go18
-rw-r--r--vendor/github.com/docker/docker/client/network_create.go25
-rw-r--r--vendor/github.com/docker/docker/client/network_disconnect.go14
-rw-r--r--vendor/github.com/docker/docker/client/network_inspect.go50
-rw-r--r--vendor/github.com/docker/docker/client/network_list.go31
-rw-r--r--vendor/github.com/docker/docker/client/network_prune.go36
-rw-r--r--vendor/github.com/docker/docker/client/network_remove.go10
-rw-r--r--vendor/github.com/docker/docker/client/node_inspect.go33
-rw-r--r--vendor/github.com/docker/docker/client/node_list.go36
-rw-r--r--vendor/github.com/docker/docker/client/node_remove.go21
-rw-r--r--vendor/github.com/docker/docker/client/node_update.go18
-rw-r--r--vendor/github.com/docker/docker/client/parse_logs.go41
-rw-r--r--vendor/github.com/docker/docker/client/ping.go32
-rw-r--r--vendor/github.com/docker/docker/client/plugin_create.go26
-rw-r--r--vendor/github.com/docker/docker/client/plugin_disable.go19
-rw-r--r--vendor/github.com/docker/docker/client/plugin_enable.go19
-rw-r--r--vendor/github.com/docker/docker/client/plugin_inspect.go32
-rw-r--r--vendor/github.com/docker/docker/client/plugin_install.go113
-rw-r--r--vendor/github.com/docker/docker/client/plugin_list.go32
-rw-r--r--vendor/github.com/docker/docker/client/plugin_push.go17
-rw-r--r--vendor/github.com/docker/docker/client/plugin_remove.go20
-rw-r--r--vendor/github.com/docker/docker/client/plugin_set.go12
-rw-r--r--vendor/github.com/docker/docker/client/plugin_upgrade.go39
-rw-r--r--vendor/github.com/docker/docker/client/request.go262
-rw-r--r--vendor/github.com/docker/docker/client/secret_create.go25
-rw-r--r--vendor/github.com/docker/docker/client/secret_inspect.go37
-rw-r--r--vendor/github.com/docker/docker/client/secret_list.go38
-rw-r--r--vendor/github.com/docker/docker/client/secret_remove.go13
-rw-r--r--vendor/github.com/docker/docker/client/secret_update.go21
-rw-r--r--vendor/github.com/docker/docker/client/service_create.go156
-rw-r--r--vendor/github.com/docker/docker/client/service_inspect.go38
-rw-r--r--vendor/github.com/docker/docker/client/service_list.go35
-rw-r--r--vendor/github.com/docker/docker/client/service_logs.go52
-rw-r--r--vendor/github.com/docker/docker/client/service_remove.go10
-rw-r--r--vendor/github.com/docker/docker/client/service_update.go92
-rw-r--r--vendor/github.com/docker/docker/client/session.go19
-rw-r--r--vendor/github.com/docker/docker/client/swarm_get_unlock_key.go21
-rw-r--r--vendor/github.com/docker/docker/client/swarm_init.go21
-rw-r--r--vendor/github.com/docker/docker/client/swarm_inspect.go21
-rw-r--r--vendor/github.com/docker/docker/client/swarm_join.go13
-rw-r--r--vendor/github.com/docker/docker/client/swarm_leave.go18
-rw-r--r--vendor/github.com/docker/docker/client/swarm_unlock.go13
-rw-r--r--vendor/github.com/docker/docker/client/swarm_update.go22
-rw-r--r--vendor/github.com/docker/docker/client/task_inspect.go34
-rw-r--r--vendor/github.com/docker/docker/client/task_list.go35
-rw-r--r--vendor/github.com/docker/docker/client/task_logs.go52
-rw-r--r--vendor/github.com/docker/docker/client/transport.go25
-rw-r--r--vendor/github.com/docker/docker/client/utils.go34
-rw-r--r--vendor/github.com/docker/docker/client/version.go21
-rw-r--r--vendor/github.com/docker/docker/client/volume_create.go21
-rw-r--r--vendor/github.com/docker/docker/client/volume_inspect.go38
-rw-r--r--vendor/github.com/docker/docker/client/volume_list.go32
-rw-r--r--vendor/github.com/docker/docker/client/volume_prune.go36
-rw-r--r--vendor/github.com/docker/docker/client/volume_remove.go21
-rw-r--r--vendor/github.com/docker/docker/pkg/README.md11
-rw-r--r--vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go23
-rw-r--r--vendor/github.com/docker/docker/pkg/homedir/homedir_others.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go34
-rw-r--r--vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go24
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/idtools.go279
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go204
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go25
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go164
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go12
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/utils_unix.go32
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/buffer.go51
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go186
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/fswriters.go162
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/readers.go154
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go10
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go18
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go92
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/writers.go66
-rw-r--r--vendor/github.com/docker/docker/pkg/longpath/longpath.go26
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/flags.go149
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go49
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/flags_linux.go87
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go31
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mount.go86
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go60
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mounter_linux.go57
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go33
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go11
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mountinfo.go54
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go41
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go95
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go37
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go12
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go6
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go69
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go58
-rw-r--r--vendor/github.com/docker/docker/pkg/pools/pools.go137
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/README.md1
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/signal.go54
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/signal_darwin.go41
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go43
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/signal_linux.go82
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/signal_solaris.go42
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/signal_unix.go21
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go10
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/signal_windows.go28
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/trap.go104
-rw-r--r--vendor/github.com/docker/docker/pkg/stringid/README.md1
-rw-r--r--vendor/github.com/docker/docker/pkg/stringid/stringid.go99
-rw-r--r--vendor/github.com/docker/docker/pkg/stringutils/README.md1
-rw-r--r--vendor/github.com/docker/docker/pkg/stringutils/stringutils.go99
-rw-r--r--vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE191
-rw-r--r--vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD27
-rw-r--r--vendor/github.com/docker/docker/pkg/symlink/README.md6
-rw-r--r--vendor/github.com/docker/docker/pkg/symlink/fs.go144
-rw-r--r--vendor/github.com/docker/docker/pkg/symlink/fs_unix.go15
-rw-r--r--vendor/github.com/docker/docker/pkg/symlink/fs_windows.go169
-rw-r--r--vendor/github.com/docker/docker/pkg/system/chtimes.go35
-rw-r--r--vendor/github.com/docker/docker/pkg/system/chtimes_unix.go14
-rw-r--r--vendor/github.com/docker/docker/pkg/system/chtimes_windows.go28
-rw-r--r--vendor/github.com/docker/docker/pkg/system/errors.go10
-rw-r--r--vendor/github.com/docker/docker/pkg/system/events_windows.go85
-rw-r--r--vendor/github.com/docker/docker/pkg/system/exitcode.go33
-rw-r--r--vendor/github.com/docker/docker/pkg/system/filesys.go67
-rw-r--r--vendor/github.com/docker/docker/pkg/system/filesys_windows.go298
-rw-r--r--vendor/github.com/docker/docker/pkg/system/init.go22
-rw-r--r--vendor/github.com/docker/docker/pkg/system/init_windows.go17
-rw-r--r--vendor/github.com/docker/docker/pkg/system/lcow_unix.go8
-rw-r--r--vendor/github.com/docker/docker/pkg/system/lcow_windows.go6
-rw-r--r--vendor/github.com/docker/docker/pkg/system/lstat_unix.go19
-rw-r--r--vendor/github.com/docker/docker/pkg/system/lstat_windows.go14
-rw-r--r--vendor/github.com/docker/docker/pkg/system/meminfo.go17
-rw-r--r--vendor/github.com/docker/docker/pkg/system/meminfo_linux.go65
-rw-r--r--vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go129
-rw-r--r--vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go8
-rw-r--r--vendor/github.com/docker/docker/pkg/system/meminfo_windows.go45
-rw-r--r--vendor/github.com/docker/docker/pkg/system/mknod.go22
-rw-r--r--vendor/github.com/docker/docker/pkg/system/mknod_windows.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/system/path.go21
-rw-r--r--vendor/github.com/docker/docker/pkg/system/path_unix.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/system/path_windows.go33
-rw-r--r--vendor/github.com/docker/docker/pkg/system/process_unix.go24
-rw-r--r--vendor/github.com/docker/docker/pkg/system/rm.go80
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_darwin.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_freebsd.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_linux.go19
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_openbsd.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_solaris.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_unix.go60
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_windows.go49
-rw-r--r--vendor/github.com/docker/docker/pkg/system/syscall_unix.go17
-rw-r--r--vendor/github.com/docker/docker/pkg/system/syscall_windows.go122
-rw-r--r--vendor/github.com/docker/docker/pkg/system/umask.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/system/umask_windows.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go24
-rw-r--r--vendor/github.com/docker/docker/pkg/system/utimes_linux.go25
-rw-r--r--vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go10
-rw-r--r--vendor/github.com/docker/docker/pkg/system/xattrs_linux.go29
-rw-r--r--vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/term/ascii.go66
-rw-r--r--vendor/github.com/docker/docker/pkg/term/proxy.go74
-rw-r--r--vendor/github.com/docker/docker/pkg/term/tc.go21
-rw-r--r--vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go65
-rw-r--r--vendor/github.com/docker/docker/pkg/term/term.go124
-rw-r--r--vendor/github.com/docker/docker/pkg/term/term_windows.go237
-rw-r--r--vendor/github.com/docker/docker/pkg/term/termios_bsd.go42
-rw-r--r--vendor/github.com/docker/docker/pkg/term/termios_linux.go37
-rw-r--r--vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go263
-rw-r--r--vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go64
-rw-r--r--vendor/github.com/docker/docker/pkg/term/windows/console.go35
-rw-r--r--vendor/github.com/docker/docker/pkg/term/windows/windows.go33
-rw-r--r--vendor/github.com/docker/docker/pkg/term/winsize.go30
-rw-r--r--vendor/github.com/docker/docker/pkg/term/winsize_solaris_cgo.go42
-rw-r--r--vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go11
-rw-r--r--vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go33
-rw-r--r--vendor/github.com/docker/docker/pkg/truncindex/truncindex.go139
-rw-r--r--vendor/github.com/docker/docker/vendor.conf147
-rw-r--r--vendor/github.com/docker/go-connections/LICENSE191
-rw-r--r--vendor/github.com/docker/go-connections/README.md13
-rw-r--r--vendor/github.com/docker/go-connections/nat/nat.go242
-rw-r--r--vendor/github.com/docker/go-connections/nat/parse.go57
-rw-r--r--vendor/github.com/docker/go-connections/nat/sort.go96
-rw-r--r--vendor/github.com/docker/go-connections/sockets/README.md0
-rw-r--r--vendor/github.com/docker/go-connections/sockets/inmem_socket.go81
-rw-r--r--vendor/github.com/docker/go-connections/sockets/proxy.go51
-rw-r--r--vendor/github.com/docker/go-connections/sockets/sockets.go38
-rw-r--r--vendor/github.com/docker/go-connections/sockets/sockets_unix.go35
-rw-r--r--vendor/github.com/docker/go-connections/sockets/sockets_windows.go27
-rw-r--r--vendor/github.com/docker/go-connections/sockets/tcp_socket.go22
-rw-r--r--vendor/github.com/docker/go-connections/sockets/unix_socket.go32
-rw-r--r--vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go18
-rw-r--r--vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go14
-rw-r--r--vendor/github.com/docker/go-connections/tlsconfig/config.go244
-rw-r--r--vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go17
-rw-r--r--vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go15
-rw-r--r--vendor/github.com/docker/go-units/LICENSE191
-rw-r--r--vendor/github.com/docker/go-units/README.md16
-rw-r--r--vendor/github.com/docker/go-units/duration.go33
-rw-r--r--vendor/github.com/docker/go-units/size.go96
-rw-r--r--vendor/github.com/docker/go-units/ulimit.go118
-rw-r--r--vendor/github.com/docker/libtrust/LICENSE191
-rw-r--r--vendor/github.com/docker/libtrust/README.md22
-rw-r--r--vendor/github.com/docker/libtrust/certificates.go175
-rw-r--r--vendor/github.com/docker/libtrust/doc.go9
-rw-r--r--vendor/github.com/docker/libtrust/ec_key.go428
-rw-r--r--vendor/github.com/docker/libtrust/filter.go50
-rw-r--r--vendor/github.com/docker/libtrust/hash.go56
-rw-r--r--vendor/github.com/docker/libtrust/jsonsign.go657
-rw-r--r--vendor/github.com/docker/libtrust/key.go253
-rw-r--r--vendor/github.com/docker/libtrust/key_files.go255
-rw-r--r--vendor/github.com/docker/libtrust/key_manager.go175
-rw-r--r--vendor/github.com/docker/libtrust/rsa_key.go427
-rw-r--r--vendor/github.com/docker/libtrust/util.go363
-rw-r--r--vendor/github.com/docker/spdystream/LICENSE191
-rw-r--r--vendor/github.com/docker/spdystream/LICENSE.docs425
-rw-r--r--vendor/github.com/docker/spdystream/README.md77
-rw-r--r--vendor/github.com/docker/spdystream/connection.go959
-rw-r--r--vendor/github.com/docker/spdystream/handlers.go38
-rw-r--r--vendor/github.com/docker/spdystream/priority.go98
-rw-r--r--vendor/github.com/docker/spdystream/spdy/dictionary.go187
-rw-r--r--vendor/github.com/docker/spdystream/spdy/read.go348
-rw-r--r--vendor/github.com/docker/spdystream/spdy/types.go275
-rw-r--r--vendor/github.com/docker/spdystream/spdy/write.go318
-rw-r--r--vendor/github.com/docker/spdystream/stream.go327
-rw-r--r--vendor/github.com/docker/spdystream/utils.go16
-rw-r--r--vendor/github.com/emicklei/go-restful-swagger12/LICENSE22
-rw-r--r--vendor/github.com/emicklei/go-restful-swagger12/README.md83
-rw-r--r--vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go64
-rw-r--r--vendor/github.com/emicklei/go-restful-swagger12/config.go46
-rw-r--r--vendor/github.com/emicklei/go-restful-swagger12/model_builder.go467
-rw-r--r--vendor/github.com/emicklei/go-restful-swagger12/model_list.go86
-rw-r--r--vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go81
-rw-r--r--vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go87
-rw-r--r--vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go36
-rw-r--r--vendor/github.com/emicklei/go-restful-swagger12/swagger.go185
-rw-r--r--vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go21
-rw-r--r--vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go443
-rw-r--r--vendor/github.com/emicklei/go-restful/LICENSE22
-rw-r--r--vendor/github.com/emicklei/go-restful/README.md74
-rw-r--r--vendor/github.com/emicklei/go-restful/compress.go123
-rw-r--r--vendor/github.com/emicklei/go-restful/compressor_cache.go103
-rw-r--r--vendor/github.com/emicklei/go-restful/compressor_pools.go91
-rw-r--r--vendor/github.com/emicklei/go-restful/compressors.go54
-rw-r--r--vendor/github.com/emicklei/go-restful/constants.go30
-rw-r--r--vendor/github.com/emicklei/go-restful/container.go366
-rw-r--r--vendor/github.com/emicklei/go-restful/cors_filter.go202
-rw-r--r--vendor/github.com/emicklei/go-restful/curly.go164
-rw-r--r--vendor/github.com/emicklei/go-restful/curly_route.go52
-rw-r--r--vendor/github.com/emicklei/go-restful/doc.go185
-rw-r--r--vendor/github.com/emicklei/go-restful/entity_accessors.go163
-rw-r--r--vendor/github.com/emicklei/go-restful/filter.go35
-rw-r--r--vendor/github.com/emicklei/go-restful/jsr311.go248
-rw-r--r--vendor/github.com/emicklei/go-restful/log/log.go34
-rw-r--r--vendor/github.com/emicklei/go-restful/logger.go32
-rw-r--r--vendor/github.com/emicklei/go-restful/mime.go45
-rw-r--r--vendor/github.com/emicklei/go-restful/options_filter.go26
-rw-r--r--vendor/github.com/emicklei/go-restful/parameter.go114
-rw-r--r--vendor/github.com/emicklei/go-restful/path_expression.go69
-rw-r--r--vendor/github.com/emicklei/go-restful/request.go113
-rw-r--r--vendor/github.com/emicklei/go-restful/response.go236
-rw-r--r--vendor/github.com/emicklei/go-restful/route.go186
-rw-r--r--vendor/github.com/emicklei/go-restful/route_builder.go293
-rw-r--r--vendor/github.com/emicklei/go-restful/router.go18
-rw-r--r--vendor/github.com/emicklei/go-restful/service_error.go23
-rw-r--r--vendor/github.com/emicklei/go-restful/web_service.go290
-rw-r--r--vendor/github.com/emicklei/go-restful/web_service_container.go39
-rw-r--r--vendor/github.com/exponent-io/jsonpath/LICENSE21
-rw-r--r--vendor/github.com/exponent-io/jsonpath/README.md66
-rw-r--r--vendor/github.com/exponent-io/jsonpath/decoder.go210
-rw-r--r--vendor/github.com/exponent-io/jsonpath/path.go67
-rw-r--r--vendor/github.com/exponent-io/jsonpath/pathaction.go61
-rw-r--r--vendor/github.com/fatih/camelcase/LICENSE.md20
-rw-r--r--vendor/github.com/fatih/camelcase/README.md58
-rw-r--r--vendor/github.com/fatih/camelcase/camelcase.go90
-rw-r--r--vendor/github.com/fsnotify/fsnotify/LICENSE28
-rw-r--r--vendor/github.com/fsnotify/fsnotify/README.md79
-rw-r--r--vendor/github.com/fsnotify/fsnotify/fen.go37
-rw-r--r--vendor/github.com/fsnotify/fsnotify/fsnotify.go66
-rw-r--r--vendor/github.com/fsnotify/fsnotify/inotify.go334
-rw-r--r--vendor/github.com/fsnotify/fsnotify/inotify_poller.go187
-rw-r--r--vendor/github.com/fsnotify/fsnotify/kqueue.go503
-rw-r--r--vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go11
-rw-r--r--vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go12
-rw-r--r--vendor/github.com/fsnotify/fsnotify/windows.go561
-rw-r--r--vendor/github.com/ghodss/yaml/LICENSE50
-rw-r--r--vendor/github.com/ghodss/yaml/README.md120
-rw-r--r--vendor/github.com/ghodss/yaml/fields.go501
-rw-r--r--vendor/github.com/ghodss/yaml/yaml.go277
-rw-r--r--vendor/github.com/go-openapi/analysis/LICENSE202
-rw-r--r--vendor/github.com/go-openapi/analysis/README.md6
-rw-r--r--vendor/github.com/go-openapi/analysis/analyzer.go614
-rw-r--r--vendor/github.com/go-openapi/jsonpointer/LICENSE202
-rw-r--r--vendor/github.com/go-openapi/jsonpointer/README.md15
-rw-r--r--vendor/github.com/go-openapi/jsonpointer/pointer.go390
-rw-r--r--vendor/github.com/go-openapi/jsonreference/LICENSE202
-rw-r--r--vendor/github.com/go-openapi/jsonreference/README.md15
-rw-r--r--vendor/github.com/go-openapi/jsonreference/reference.go156
-rw-r--r--vendor/github.com/go-openapi/loads/LICENSE202
-rw-r--r--vendor/github.com/go-openapi/loads/README.md5
-rw-r--r--vendor/github.com/go-openapi/loads/spec.go203
-rw-r--r--vendor/github.com/go-openapi/spec/LICENSE202
-rw-r--r--vendor/github.com/go-openapi/spec/README.md5
-rw-r--r--vendor/github.com/go-openapi/spec/bindata.go274
-rw-r--r--vendor/github.com/go-openapi/spec/contact_info.go24
-rw-r--r--vendor/github.com/go-openapi/spec/expander.go626
-rw-r--r--vendor/github.com/go-openapi/spec/external_docs.go24
-rw-r--r--vendor/github.com/go-openapi/spec/header.go165
-rw-r--r--vendor/github.com/go-openapi/spec/info.go168
-rw-r--r--vendor/github.com/go-openapi/spec/items.go199
-rw-r--r--vendor/github.com/go-openapi/spec/license.go23
-rw-r--r--vendor/github.com/go-openapi/spec/operation.go233
-rw-r--r--vendor/github.com/go-openapi/spec/parameter.go299
-rw-r--r--vendor/github.com/go-openapi/spec/path_item.go90
-rw-r--r--vendor/github.com/go-openapi/spec/paths.go97
-rw-r--r--vendor/github.com/go-openapi/spec/ref.go167
-rw-r--r--vendor/github.com/go-openapi/spec/response.go113
-rw-r--r--vendor/github.com/go-openapi/spec/responses.go122
-rw-r--r--vendor/github.com/go-openapi/spec/schema.go628
-rw-r--r--vendor/github.com/go-openapi/spec/security_scheme.go142
-rw-r--r--vendor/github.com/go-openapi/spec/spec.go79
-rw-r--r--vendor/github.com/go-openapi/spec/swagger.go317
-rw-r--r--vendor/github.com/go-openapi/spec/tag.go73
-rw-r--r--vendor/github.com/go-openapi/spec/xml_object.go68
-rw-r--r--vendor/github.com/go-openapi/swag/LICENSE202
-rw-r--r--vendor/github.com/go-openapi/swag/README.md12
-rw-r--r--vendor/github.com/go-openapi/swag/convert.go188
-rw-r--r--vendor/github.com/go-openapi/swag/convert_types.go595
-rw-r--r--vendor/github.com/go-openapi/swag/json.go270
-rw-r--r--vendor/github.com/go-openapi/swag/loading.go49
-rw-r--r--vendor/github.com/go-openapi/swag/net.go24
-rw-r--r--vendor/github.com/go-openapi/swag/path.go56
-rw-r--r--vendor/github.com/go-openapi/swag/util.go318
-rw-r--r--vendor/github.com/go-zoo/bone/LICENSE22
-rw-r--r--vendor/github.com/go-zoo/bone/README.md81
-rw-r--r--vendor/github.com/go-zoo/bone/bone.go74
-rw-r--r--vendor/github.com/go-zoo/bone/helper.go169
-rw-r--r--vendor/github.com/go-zoo/bone/helper_15.go45
-rw-r--r--vendor/github.com/go-zoo/bone/helper_17.go39
-rw-r--r--vendor/github.com/go-zoo/bone/mux.go137
-rw-r--r--vendor/github.com/go-zoo/bone/route.go245
-rw-r--r--vendor/github.com/godbus/dbus/LICENSE25
-rw-r--r--vendor/github.com/godbus/dbus/README.markdown44
-rw-r--r--vendor/github.com/godbus/dbus/auth.go253
-rw-r--r--vendor/github.com/godbus/dbus/auth_external.go26
-rw-r--r--vendor/github.com/godbus/dbus/auth_sha1.go102
-rw-r--r--vendor/github.com/godbus/dbus/call.go36
-rw-r--r--vendor/github.com/godbus/dbus/conn.go683
-rw-r--r--vendor/github.com/godbus/dbus/conn_darwin.go33
-rw-r--r--vendor/github.com/godbus/dbus/conn_other.go42
-rw-r--r--vendor/github.com/godbus/dbus/dbus.go427
-rw-r--r--vendor/github.com/godbus/dbus/decoder.go228
-rw-r--r--vendor/github.com/godbus/dbus/default_handler.go291
-rw-r--r--vendor/github.com/godbus/dbus/doc.go69
-rw-r--r--vendor/github.com/godbus/dbus/encoder.go210
-rw-r--r--vendor/github.com/godbus/dbus/export.go413
-rw-r--r--vendor/github.com/godbus/dbus/homedir.go28
-rw-r--r--vendor/github.com/godbus/dbus/homedir_dynamic.go15
-rw-r--r--vendor/github.com/godbus/dbus/homedir_static.go45
-rw-r--r--vendor/github.com/godbus/dbus/message.go353
-rw-r--r--vendor/github.com/godbus/dbus/object.go147
-rw-r--r--vendor/github.com/godbus/dbus/server_interfaces.go89
-rw-r--r--vendor/github.com/godbus/dbus/sig.go259
-rw-r--r--vendor/github.com/godbus/dbus/transport_darwin.go6
-rw-r--r--vendor/github.com/godbus/dbus/transport_generic.go50
-rw-r--r--vendor/github.com/godbus/dbus/transport_tcp.go43
-rw-r--r--vendor/github.com/godbus/dbus/transport_unix.go196
-rw-r--r--vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go95
-rw-r--r--vendor/github.com/godbus/dbus/transport_unixcred_freebsd.go91
-rw-r--r--vendor/github.com/godbus/dbus/transport_unixcred_linux.go25
-rw-r--r--vendor/github.com/godbus/dbus/transport_unixcred_openbsd.go14
-rw-r--r--vendor/github.com/godbus/dbus/variant.go144
-rw-r--r--vendor/github.com/godbus/dbus/variant_lexer.go284
-rw-r--r--vendor/github.com/godbus/dbus/variant_parser.go817
-rw-r--r--vendor/github.com/gogo/protobuf/LICENSE36
-rw-r--r--vendor/github.com/gogo/protobuf/README214
-rw-r--r--vendor/github.com/gogo/protobuf/Readme.md119
-rw-r--r--vendor/github.com/gogo/protobuf/gogoproto/doc.go168
-rw-r--r--vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go665
-rw-r--r--vendor/github.com/gogo/protobuf/gogoproto/gogo.proto122
-rw-r--r--vendor/github.com/gogo/protobuf/gogoproto/helper.go310
-rw-r--r--vendor/github.com/gogo/protobuf/proto/clone.go234
-rw-r--r--vendor/github.com/gogo/protobuf/proto/decode.go882
-rw-r--r--vendor/github.com/gogo/protobuf/proto/decode_gogo.go171
-rw-r--r--vendor/github.com/gogo/protobuf/proto/encode.go1363
-rw-r--r--vendor/github.com/gogo/protobuf/proto/encode_gogo.go354
-rw-r--r--vendor/github.com/gogo/protobuf/proto/equal.go296
-rw-r--r--vendor/github.com/gogo/protobuf/proto/extensions.go689
-rw-r--r--vendor/github.com/gogo/protobuf/proto/extensions_gogo.go294
-rw-r--r--vendor/github.com/gogo/protobuf/proto/lib.go898
-rw-r--r--vendor/github.com/gogo/protobuf/proto/lib_gogo.go42
-rw-r--r--vendor/github.com/gogo/protobuf/proto/message_set.go311
-rw-r--r--vendor/github.com/gogo/protobuf/proto/pointer_reflect.go484
-rw-r--r--vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go270
-rw-r--r--vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go107
-rw-r--r--vendor/github.com/gogo/protobuf/proto/properties.go940
-rw-r--r--vendor/github.com/gogo/protobuf/proto/properties_gogo.go66
-rw-r--r--vendor/github.com/gogo/protobuf/proto/skip_gogo.go119
-rw-r--r--vendor/github.com/gogo/protobuf/proto/text.go815
-rw-r--r--vendor/github.com/gogo/protobuf/proto/text_gogo.go57
-rw-r--r--vendor/github.com/gogo/protobuf/proto/text_parser.go858
-rw-r--r--vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto150
-rw-r--r--vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto779
-rw-r--r--vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go1981
-rw-r--r--vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go658
-rw-r--r--vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go357
-rw-r--r--vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go101
-rw-r--r--vendor/github.com/golang/glog/LICENSE191
-rw-r--r--vendor/github.com/golang/glog/README44
-rw-r--r--vendor/github.com/golang/glog/glog.go1180
-rw-r--r--vendor/github.com/golang/glog/glog_file.go124
-rw-r--r--vendor/github.com/golang/groupcache/LICENSE191
-rw-r--r--vendor/github.com/golang/groupcache/README.md73
-rw-r--r--vendor/github.com/golang/groupcache/lru/lru.go133
-rw-r--r--vendor/github.com/golang/protobuf/LICENSE31
-rw-r--r--vendor/github.com/golang/protobuf/README.md242
-rw-r--r--vendor/github.com/golang/protobuf/proto/clone.go229
-rw-r--r--vendor/github.com/golang/protobuf/proto/decode.go970
-rw-r--r--vendor/github.com/golang/protobuf/proto/encode.go1362
-rw-r--r--vendor/github.com/golang/protobuf/proto/equal.go300
-rw-r--r--vendor/github.com/golang/protobuf/proto/extensions.go587
-rw-r--r--vendor/github.com/golang/protobuf/proto/lib.go897
-rw-r--r--vendor/github.com/golang/protobuf/proto/message_set.go311
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_reflect.go484
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_unsafe.go270
-rw-r--r--vendor/github.com/golang/protobuf/proto/properties.go872
-rw-r--r--vendor/github.com/golang/protobuf/proto/text.go854
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_parser.go895
-rw-r--r--vendor/github.com/google/gofuzz/LICENSE202
-rw-r--r--vendor/github.com/google/gofuzz/README.md71
-rw-r--r--vendor/github.com/google/gofuzz/doc.go18
-rw-r--r--vendor/github.com/google/gofuzz/fuzz.go453
-rw-r--r--vendor/github.com/gorilla/context/LICENSE27
-rw-r--r--vendor/github.com/gorilla/context/README.md7
-rw-r--r--vendor/github.com/gorilla/context/context.go143
-rw-r--r--vendor/github.com/gorilla/context/doc.go82
-rw-r--r--vendor/github.com/gorilla/mux/LICENSE27
-rw-r--r--vendor/github.com/gorilla/mux/README.md339
-rw-r--r--vendor/github.com/gorilla/mux/context_gorilla.go26
-rw-r--r--vendor/github.com/gorilla/mux/context_native.go24
-rw-r--r--vendor/github.com/gorilla/mux/doc.go240
-rw-r--r--vendor/github.com/gorilla/mux/mux.go542
-rw-r--r--vendor/github.com/gorilla/mux/regexp.go323
-rw-r--r--vendor/github.com/gorilla/mux/route.go636
-rw-r--r--vendor/github.com/hashicorp/errwrap/LICENSE353
-rw-r--r--vendor/github.com/hashicorp/errwrap/README.md89
-rw-r--r--vendor/github.com/hashicorp/errwrap/errwrap.go169
-rw-r--r--vendor/github.com/hashicorp/go-multierror/LICENSE353
-rw-r--r--vendor/github.com/hashicorp/go-multierror/README.md97
-rw-r--r--vendor/github.com/hashicorp/go-multierror/append.go41
-rw-r--r--vendor/github.com/hashicorp/go-multierror/flatten.go26
-rw-r--r--vendor/github.com/hashicorp/go-multierror/format.go27
-rw-r--r--vendor/github.com/hashicorp/go-multierror/multierror.go51
-rw-r--r--vendor/github.com/hashicorp/go-multierror/prefix.go37
-rw-r--r--vendor/github.com/hashicorp/golang-lru/2q.go212
-rw-r--r--vendor/github.com/hashicorp/golang-lru/LICENSE362
-rw-r--r--vendor/github.com/hashicorp/golang-lru/README.md25
-rw-r--r--vendor/github.com/hashicorp/golang-lru/arc.go257
-rw-r--r--vendor/github.com/hashicorp/golang-lru/lru.go114
-rw-r--r--vendor/github.com/hashicorp/golang-lru/simplelru/lru.go160
-rw-r--r--vendor/github.com/hpcloud/tail/LICENSE.txt21
-rw-r--r--vendor/github.com/hpcloud/tail/README.md28
-rw-r--r--vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go97
-rw-r--r--vendor/github.com/hpcloud/tail/ratelimiter/memory.go58
-rw-r--r--vendor/github.com/hpcloud/tail/ratelimiter/storage.go6
-rw-r--r--vendor/github.com/hpcloud/tail/tail.go438
-rw-r--r--vendor/github.com/hpcloud/tail/tail_posix.go11
-rw-r--r--vendor/github.com/hpcloud/tail/tail_windows.go12
-rw-r--r--vendor/github.com/hpcloud/tail/util/util.go48
-rw-r--r--vendor/github.com/hpcloud/tail/watch/filechanges.go36
-rw-r--r--vendor/github.com/hpcloud/tail/watch/inotify.go128
-rw-r--r--vendor/github.com/hpcloud/tail/watch/inotify_tracker.go260
-rw-r--r--vendor/github.com/hpcloud/tail/watch/polling.go118
-rw-r--r--vendor/github.com/hpcloud/tail/watch/watch.go20
-rw-r--r--vendor/github.com/hpcloud/tail/winfile/winfile.go92
-rw-r--r--vendor/github.com/imdario/mergo/LICENSE28
-rw-r--r--vendor/github.com/imdario/mergo/README.md128
-rw-r--r--vendor/github.com/imdario/mergo/doc.go44
-rw-r--r--vendor/github.com/imdario/mergo/map.go156
-rw-r--r--vendor/github.com/imdario/mergo/merge.go123
-rw-r--r--vendor/github.com/imdario/mergo/mergo.go90
-rw-r--r--vendor/github.com/juju/ratelimit/LICENSE191
-rw-r--r--vendor/github.com/juju/ratelimit/README.md117
-rw-r--r--vendor/github.com/juju/ratelimit/ratelimit.go284
-rw-r--r--vendor/github.com/juju/ratelimit/reader.go51
-rw-r--r--vendor/github.com/kr/pty/License23
-rw-r--r--vendor/github.com/kr/pty/README.md36
-rw-r--r--vendor/github.com/kr/pty/doc.go16
-rw-r--r--vendor/github.com/kr/pty/ioctl.go13
-rw-r--r--vendor/github.com/kr/pty/ioctl_bsd.go39
-rw-r--r--vendor/github.com/kr/pty/pty_darwin.go60
-rw-r--r--vendor/github.com/kr/pty/pty_dragonfly.go76
-rw-r--r--vendor/github.com/kr/pty/pty_freebsd.go73
-rw-r--r--vendor/github.com/kr/pty/pty_linux.go46
-rw-r--r--vendor/github.com/kr/pty/pty_unsupported.go11
-rw-r--r--vendor/github.com/kr/pty/run.go34
-rw-r--r--vendor/github.com/kr/pty/util.go37
-rw-r--r--vendor/github.com/kr/pty/ztypes_386.go9
-rw-r--r--vendor/github.com/kr/pty/ztypes_amd64.go9
-rw-r--r--vendor/github.com/kr/pty/ztypes_arm.go9
-rw-r--r--vendor/github.com/kr/pty/ztypes_arm64.go11
-rw-r--r--vendor/github.com/kr/pty/ztypes_dragonfly_amd64.go14
-rw-r--r--vendor/github.com/kr/pty/ztypes_freebsd_386.go13
-rw-r--r--vendor/github.com/kr/pty/ztypes_freebsd_amd64.go14
-rw-r--r--vendor/github.com/kr/pty/ztypes_freebsd_arm.go13
-rw-r--r--vendor/github.com/kr/pty/ztypes_mipsx.go12
-rw-r--r--vendor/github.com/kr/pty/ztypes_ppc64.go11
-rw-r--r--vendor/github.com/kr/pty/ztypes_ppc64le.go11
-rw-r--r--vendor/github.com/kr/pty/ztypes_s390x.go11
-rw-r--r--vendor/github.com/mailru/easyjson/LICENSE7
-rw-r--r--vendor/github.com/mailru/easyjson/README.md193
-rw-r--r--vendor/github.com/mailru/easyjson/buffer/pool.go207
-rw-r--r--vendor/github.com/mailru/easyjson/jlexer/error.go15
-rw-r--r--vendor/github.com/mailru/easyjson/jlexer/lexer.go1121
-rw-r--r--vendor/github.com/mailru/easyjson/jwriter/writer.go302
-rw-r--r--vendor/github.com/mattn/go-runewidth/README.mkd26
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth.go464
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth_js.go8
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth_posix.go69
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth_windows.go24
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE201
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/README.md20
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go75
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go16
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go46
-rw-r--r--vendor/github.com/mistifyio/go-zfs/LICENSE201
-rw-r--r--vendor/github.com/mistifyio/go-zfs/README.md54
-rw-r--r--vendor/github.com/mistifyio/go-zfs/error.go18
-rw-r--r--vendor/github.com/mistifyio/go-zfs/utils.go320
-rw-r--r--vendor/github.com/mistifyio/go-zfs/zfs.go382
-rw-r--r--vendor/github.com/mistifyio/go-zfs/zpool.go105
-rw-r--r--vendor/github.com/mitchellh/go-wordwrap/LICENSE.md21
-rw-r--r--vendor/github.com/mitchellh/go-wordwrap/README.md39
-rw-r--r--vendor/github.com/mitchellh/go-wordwrap/wordwrap.go73
-rw-r--r--vendor/github.com/mrunalp/fileutils/LICENSE191
-rw-r--r--vendor/github.com/mrunalp/fileutils/README.md5
-rw-r--r--vendor/github.com/mrunalp/fileutils/fileutils.go161
-rw-r--r--vendor/github.com/mrunalp/fileutils/idtools.go49
-rw-r--r--vendor/github.com/mtrmac/gpgme/LICENSE12
-rw-r--r--vendor/github.com/mtrmac/gpgme/README.md13
-rw-r--r--vendor/github.com/mtrmac/gpgme/callbacks.go42
-rw-r--r--vendor/github.com/mtrmac/gpgme/data.go191
-rw-r--r--vendor/github.com/mtrmac/gpgme/go_gpgme.c89
-rw-r--r--vendor/github.com/mtrmac/gpgme/go_gpgme.h37
-rw-r--r--vendor/github.com/mtrmac/gpgme/gpgme.go748
-rw-r--r--vendor/github.com/opencontainers/go-digest/LICENSE.code191
-rw-r--r--vendor/github.com/opencontainers/go-digest/LICENSE.docs425
-rw-r--r--vendor/github.com/opencontainers/go-digest/README.md104
-rw-r--r--vendor/github.com/opencontainers/go-digest/algorithm.go158
-rw-r--r--vendor/github.com/opencontainers/go-digest/digest.go154
-rw-r--r--vendor/github.com/opencontainers/go-digest/digester.go39
-rw-r--r--vendor/github.com/opencontainers/go-digest/doc.go56
-rw-r--r--vendor/github.com/opencontainers/go-digest/verifiers.go45
-rw-r--r--vendor/github.com/opencontainers/image-spec/LICENSE191
-rw-r--r--vendor/github.com/opencontainers/image-spec/README.md167
-rw-r--r--vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go56
-rw-r--r--vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go103
-rw-r--r--vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go64
-rw-r--r--vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go29
-rw-r--r--vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go28
-rw-r--r--vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go32
-rw-r--r--vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go48
-rw-r--r--vendor/github.com/opencontainers/image-spec/specs-go/version.go32
-rw-r--r--vendor/github.com/opencontainers/image-spec/specs-go/versioned.go23
-rw-r--r--vendor/github.com/opencontainers/runc/LICENSE191
-rw-r--r--vendor/github.com/opencontainers/runc/NOTICE17
-rw-r--r--vendor/github.com/opencontainers/runc/README.md244
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/README.md328
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go39
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_disabled.go20
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go114
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go64
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go3
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go360
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go237
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go125
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go121
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go163
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go80
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go61
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go3
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go71
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go313
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go40
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go43
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go41
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go35
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go73
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go78
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/rootless.go128
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go108
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go55
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go553
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go462
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go10
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go61
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go122
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go6
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go6
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/config.go344
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go61
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/device.go57
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go111
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go9
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go14
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go39
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go5
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go122
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go31
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go13
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go8
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/network.go72
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/validate/rootless.go117
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go195
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/console.go17
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go13
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/console_linux.go152
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go11
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/console_windows.go30
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/container.go166
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/container_linux.go1654
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go20
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/container_windows.go20
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/criu_opts_linux.go37
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go6
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.pb.go1178
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.proto209
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/devices/devices_linux.go100
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go3
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/devices/number.go24
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/error.go70
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/factory.go44
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go325
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/generic_error.go92
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/init_linux.go502
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/keys/keyctl.go50
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/message_linux.go87
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/network_linux.go259
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/notify_linux.go90
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md44
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/nsenter/namespace.h32
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go12
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go25
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go5
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c862
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/process.go106
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/process_linux.go493
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/restored_process.go122
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go812
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/seccomp/config.go76
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go227
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_unsupported.go24
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go11
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go65
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/stacktrace/capture.go27
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/stacktrace/frame.go38
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/stacktrace/stacktrace.go5
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go188
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/state_linux.go248
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/stats.go15
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go5
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go8
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go7
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go5
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/sync.go107
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/system/linux.go136
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/system/proc.go113
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go25
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go25
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go25
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go12
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go15
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go9
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go35
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go95
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go46
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go38
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/user/user.go441
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go95
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go127
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go44
-rw-r--r--vendor/github.com/opencontainers/runc/vendor.conf21
-rw-r--r--vendor/github.com/opencontainers/runtime-spec/LICENSE191
-rw-r--r--vendor/github.com/opencontainers/runtime-spec/README.md158
-rw-r--r--vendor/github.com/opencontainers/runtime-spec/specs-go/config.go570
-rw-r--r--vendor/github.com/opencontainers/runtime-spec/specs-go/state.go17
-rw-r--r--vendor/github.com/opencontainers/runtime-spec/specs-go/version.go18
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/LICENSE191
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/README.md84
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/error/error.go92
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/generate/generate.go1256
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/generate/seccomp/consts.go12
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_action.go135
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_architecture.go55
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_arguments.go73
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_remove.go52
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go577
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/generate/seccomp/syscall_compare.go140
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/generate/spec.go109
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/specerror/error.go170
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/validate/validate.go1050
-rw-r--r--vendor/github.com/opencontainers/selinux/LICENSE201
-rw-r--r--vendor/github.com/opencontainers/selinux/README.md7
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/label/label.go84
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go206
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/selinux.go593
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/xattrs.go78
-rw-r--r--vendor/github.com/ostreedev/ostree-go/LICENSE17
-rw-r--r--vendor/github.com/ostreedev/ostree-go/README.md4
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gboolean.go60
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gcancellable.go47
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gerror.go71
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfile.go52
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfileinfo.go53
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtable.go50
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtableiter.go50
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go27
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go.h17
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gobject.go79
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/glibobject/goptioncontext.go51
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go97
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go93
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h191
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go102
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go482
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go90
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go167
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go217
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go0
-rw-r--r--vendor/github.com/pkg/errors/LICENSE23
-rw-r--r--vendor/github.com/pkg/errors/README.md52
-rw-r--r--vendor/github.com/pkg/errors/errors.go269
-rw-r--r--vendor/github.com/pkg/errors/stack.go178
-rw-r--r--vendor/github.com/pquerna/ffjson/LICENSE202
-rw-r--r--vendor/github.com/pquerna/ffjson/NOTICE8
-rw-r--r--vendor/github.com/pquerna/ffjson/README.md232
-rw-r--r--vendor/github.com/pquerna/ffjson/fflib/v1/buffer.go421
-rw-r--r--vendor/github.com/pquerna/ffjson/fflib/v1/buffer_nopool.go11
-rw-r--r--vendor/github.com/pquerna/ffjson/fflib/v1/buffer_pool.go105
-rw-r--r--vendor/github.com/pquerna/ffjson/fflib/v1/bytenum.go88
-rw-r--r--vendor/github.com/pquerna/ffjson/fflib/v1/decimal.go378
-rw-r--r--vendor/github.com/pquerna/ffjson/fflib/v1/extfloat.go668
-rw-r--r--vendor/github.com/pquerna/ffjson/fflib/v1/fold.go121
-rw-r--r--vendor/github.com/pquerna/ffjson/fflib/v1/ftoa.go542
-rw-r--r--vendor/github.com/pquerna/ffjson/fflib/v1/internal/atof.go936
-rw-r--r--vendor/github.com/pquerna/ffjson/fflib/v1/internal/atoi.go213
-rw-r--r--vendor/github.com/pquerna/ffjson/fflib/v1/internal/extfloat.go668
-rw-r--r--vendor/github.com/pquerna/ffjson/fflib/v1/internal/ftoa.go475
-rw-r--r--vendor/github.com/pquerna/ffjson/fflib/v1/iota.go161
-rw-r--r--vendor/github.com/pquerna/ffjson/fflib/v1/jsonstring.go512
-rw-r--r--vendor/github.com/pquerna/ffjson/fflib/v1/lexer.go937
-rw-r--r--vendor/github.com/pquerna/ffjson/fflib/v1/reader.go512
-rw-r--r--vendor/github.com/pquerna/ffjson/fflib/v1/reader_scan_generic.go34
-rw-r--r--vendor/github.com/prometheus/client_golang/LICENSE201
-rw-r--r--vendor/github.com/prometheus/client_golang/NOTICE23
-rw-r--r--vendor/github.com/prometheus/client_golang/README.md47
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/README.md1
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/collector.go75
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/counter.go164
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/desc.go200
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/doc.go186
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go119
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/fnv.go29
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/gauge.go145
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector.go276
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/histogram.go444
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/http.go524
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/metric.go166
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/observer.go50
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/process_collector.go140
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/registry.go755
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/summary.go543
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/timer.go48
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/untyped.go143
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/value.go239
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/vec.go404
-rw-r--r--vendor/github.com/prometheus/client_model/LICENSE201
-rw-r--r--vendor/github.com/prometheus/client_model/NOTICE5
-rw-r--r--vendor/github.com/prometheus/client_model/README.md26
-rw-r--r--vendor/github.com/prometheus/client_model/go/metrics.pb.go364
-rw-r--r--vendor/github.com/prometheus/common/LICENSE201
-rw-r--r--vendor/github.com/prometheus/common/NOTICE5
-rw-r--r--vendor/github.com/prometheus/common/README.md12
-rw-r--r--vendor/github.com/prometheus/common/expfmt/decode.go429
-rw-r--r--vendor/github.com/prometheus/common/expfmt/encode.go88
-rw-r--r--vendor/github.com/prometheus/common/expfmt/expfmt.go38
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz.go36
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_create.go303
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_parse.go753
-rw-r--r--vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt67
-rw-r--r--vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go162
-rw-r--r--vendor/github.com/prometheus/common/model/alert.go136
-rw-r--r--vendor/github.com/prometheus/common/model/fingerprinting.go105
-rw-r--r--vendor/github.com/prometheus/common/model/fnv.go42
-rw-r--r--vendor/github.com/prometheus/common/model/labels.go210
-rw-r--r--vendor/github.com/prometheus/common/model/labelset.go169
-rw-r--r--vendor/github.com/prometheus/common/model/metric.go103
-rw-r--r--vendor/github.com/prometheus/common/model/model.go16
-rw-r--r--vendor/github.com/prometheus/common/model/signature.go144
-rw-r--r--vendor/github.com/prometheus/common/model/silence.go106
-rw-r--r--vendor/github.com/prometheus/common/model/time.go249
-rw-r--r--vendor/github.com/prometheus/common/model/value.go416
-rw-r--r--vendor/github.com/prometheus/procfs/LICENSE201
-rw-r--r--vendor/github.com/prometheus/procfs/NOTICE7
-rw-r--r--vendor/github.com/prometheus/procfs/README.md11
-rw-r--r--vendor/github.com/prometheus/procfs/buddyinfo.go95
-rw-r--r--vendor/github.com/prometheus/procfs/doc.go45
-rw-r--r--vendor/github.com/prometheus/procfs/fs.go46
-rw-r--r--vendor/github.com/prometheus/procfs/ipvs.go246
-rw-r--r--vendor/github.com/prometheus/procfs/mdstat.go138
-rw-r--r--vendor/github.com/prometheus/procfs/mountstats.go556
-rw-r--r--vendor/github.com/prometheus/procfs/proc.go224
-rw-r--r--vendor/github.com/prometheus/procfs/proc_io.go55
-rw-r--r--vendor/github.com/prometheus/procfs/proc_limits.go137
-rw-r--r--vendor/github.com/prometheus/procfs/proc_stat.go175
-rw-r--r--vendor/github.com/prometheus/procfs/stat.go56
-rw-r--r--vendor/github.com/prometheus/procfs/xfrm.go187
-rw-r--r--vendor/github.com/prometheus/procfs/xfs/parse.go359
-rw-r--r--vendor/github.com/prometheus/procfs/xfs/xfs.go163
-rw-r--r--vendor/github.com/renstrom/dedent/LICENSE21
-rw-r--r--vendor/github.com/renstrom/dedent/README.md50
-rw-r--r--vendor/github.com/renstrom/dedent/dedent.go56
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/LICENSE22
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/README51
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/seccomp.go902
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go514
-rw-r--r--vendor/github.com/sirupsen/logrus/LICENSE21
-rw-r--r--vendor/github.com/sirupsen/logrus/README.md501
-rw-r--r--vendor/github.com/sirupsen/logrus/alt_exit.go64
-rw-r--r--vendor/github.com/sirupsen/logrus/doc.go26
-rw-r--r--vendor/github.com/sirupsen/logrus/entry.go275
-rw-r--r--vendor/github.com/sirupsen/logrus/exported.go193
-rw-r--r--vendor/github.com/sirupsen/logrus/formatter.go45
-rw-r--r--vendor/github.com/sirupsen/logrus/hooks.go34
-rw-r--r--vendor/github.com/sirupsen/logrus/json_formatter.go74
-rw-r--r--vendor/github.com/sirupsen/logrus/logger.go317
-rw-r--r--vendor/github.com/sirupsen/logrus/logrus.go143
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_appengine.go10
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_bsd.go10
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_linux.go14
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_notwindows.go28
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_solaris.go21
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_windows.go82
-rw-r--r--vendor/github.com/sirupsen/logrus/text_formatter.go189
-rw-r--r--vendor/github.com/sirupsen/logrus/writer.go62
-rw-r--r--vendor/github.com/soheilhy/cmux/LICENSE202
-rw-r--r--vendor/github.com/soheilhy/cmux/README.md83
-rw-r--r--vendor/github.com/soheilhy/cmux/buffer.go67
-rw-r--r--vendor/github.com/soheilhy/cmux/cmux.go269
-rw-r--r--vendor/github.com/soheilhy/cmux/doc.go18
-rw-r--r--vendor/github.com/soheilhy/cmux/matchers.go262
-rw-r--r--vendor/github.com/soheilhy/cmux/patricia.go179
-rw-r--r--vendor/github.com/spf13/pflag/LICENSE28
-rw-r--r--vendor/github.com/spf13/pflag/README.md277
-rw-r--r--vendor/github.com/spf13/pflag/bool.go94
-rw-r--r--vendor/github.com/spf13/pflag/bool_slice.go147
-rw-r--r--vendor/github.com/spf13/pflag/count.go94
-rw-r--r--vendor/github.com/spf13/pflag/duration.go86
-rw-r--r--vendor/github.com/spf13/pflag/flag.go1063
-rw-r--r--vendor/github.com/spf13/pflag/float32.go88
-rw-r--r--vendor/github.com/spf13/pflag/float64.go84
-rw-r--r--vendor/github.com/spf13/pflag/golangflag.go101
-rw-r--r--vendor/github.com/spf13/pflag/int.go84
-rw-r--r--vendor/github.com/spf13/pflag/int32.go88
-rw-r--r--vendor/github.com/spf13/pflag/int64.go84
-rw-r--r--vendor/github.com/spf13/pflag/int8.go88
-rw-r--r--vendor/github.com/spf13/pflag/int_slice.go128
-rw-r--r--vendor/github.com/spf13/pflag/ip.go94
-rw-r--r--vendor/github.com/spf13/pflag/ip_slice.go148
-rw-r--r--vendor/github.com/spf13/pflag/ipmask.go122
-rw-r--r--vendor/github.com/spf13/pflag/ipnet.go98
-rw-r--r--vendor/github.com/spf13/pflag/string.go80
-rw-r--r--vendor/github.com/spf13/pflag/string_array.go103
-rw-r--r--vendor/github.com/spf13/pflag/string_slice.go129
-rw-r--r--vendor/github.com/spf13/pflag/uint.go88
-rw-r--r--vendor/github.com/spf13/pflag/uint16.go88
-rw-r--r--vendor/github.com/spf13/pflag/uint32.go88
-rw-r--r--vendor/github.com/spf13/pflag/uint64.go88
-rw-r--r--vendor/github.com/spf13/pflag/uint8.go88
-rw-r--r--vendor/github.com/spf13/pflag/uint_slice.go126
-rw-r--r--vendor/github.com/syndtr/gocapability/LICENSE24
-rw-r--r--vendor/github.com/syndtr/gocapability/capability/capability.go72
-rw-r--r--vendor/github.com/syndtr/gocapability/capability/capability_linux.go650
-rw-r--r--vendor/github.com/syndtr/gocapability/capability/capability_noop.go19
-rw-r--r--vendor/github.com/syndtr/gocapability/capability/enum.go268
-rw-r--r--vendor/github.com/syndtr/gocapability/capability/enum_gen.go129
-rw-r--r--vendor/github.com/syndtr/gocapability/capability/syscall_linux.go154
-rw-r--r--vendor/github.com/tchap/go-patricia/LICENSE20
-rw-r--r--vendor/github.com/tchap/go-patricia/README.md123
-rw-r--r--vendor/github.com/tchap/go-patricia/patricia/children.go325
-rw-r--r--vendor/github.com/tchap/go-patricia/patricia/patricia.go594
-rw-r--r--vendor/github.com/ugorji/go/LICENSE22
-rw-r--r--vendor/github.com/ugorji/go/README.md20
-rw-r--r--vendor/github.com/ugorji/go/codec/0doc.go199
-rw-r--r--vendor/github.com/ugorji/go/codec/README.md148
-rw-r--r--vendor/github.com/ugorji/go/codec/binc.go929
-rw-r--r--vendor/github.com/ugorji/go/codec/cbor.go592
-rw-r--r--vendor/github.com/ugorji/go/codec/decode.go2066
-rw-r--r--vendor/github.com/ugorji/go/codec/decode_go.go16
-rw-r--r--vendor/github.com/ugorji/go/codec/decode_go14.go14
-rw-r--r--vendor/github.com/ugorji/go/codec/encode.go1461
-rw-r--r--vendor/github.com/ugorji/go/codec/fast-path.generated.go39352
-rw-r--r--vendor/github.com/ugorji/go/codec/fast-path.not.go34
-rw-r--r--vendor/github.com/ugorji/go/codec/gen-helper.generated.go243
-rw-r--r--vendor/github.com/ugorji/go/codec/gen.generated.go175
-rw-r--r--vendor/github.com/ugorji/go/codec/gen.go2014
-rw-r--r--vendor/github.com/ugorji/go/codec/gen_15.go12
-rw-r--r--vendor/github.com/ugorji/go/codec/gen_16.go12
-rw-r--r--vendor/github.com/ugorji/go/codec/gen_17.go10
-rw-r--r--vendor/github.com/ugorji/go/codec/helper.go1314
-rw-r--r--vendor/github.com/ugorji/go/codec/helper_internal.go242
-rw-r--r--vendor/github.com/ugorji/go/codec/helper_not_unsafe.go20
-rw-r--r--vendor/github.com/ugorji/go/codec/helper_unsafe.go49
-rw-r--r--vendor/github.com/ugorji/go/codec/json.go1234
-rw-r--r--vendor/github.com/ugorji/go/codec/msgpack.go852
-rw-r--r--vendor/github.com/ugorji/go/codec/noop.go213
-rw-r--r--vendor/github.com/ugorji/go/codec/prebuild.go3
-rw-r--r--vendor/github.com/ugorji/go/codec/rpc.go180
-rw-r--r--vendor/github.com/ugorji/go/codec/simple.go526
-rw-r--r--vendor/github.com/ugorji/go/codec/time.go233
-rw-r--r--vendor/github.com/ulule/deepcopier/LICENSE22
-rw-r--r--vendor/github.com/ulule/deepcopier/README.md129
-rw-r--r--vendor/github.com/ulule/deepcopier/deepcopier.go350
-rw-r--r--vendor/github.com/urfave/cli/LICENSE21
-rw-r--r--vendor/github.com/urfave/cli/README.md1381
-rw-r--r--vendor/github.com/urfave/cli/app.go497
-rw-r--r--vendor/github.com/urfave/cli/category.go44
-rw-r--r--vendor/github.com/urfave/cli/cli.go22
-rw-r--r--vendor/github.com/urfave/cli/command.go304
-rw-r--r--vendor/github.com/urfave/cli/context.go278
-rw-r--r--vendor/github.com/urfave/cli/errors.go115
-rw-r--r--vendor/github.com/urfave/cli/flag.go799
-rw-r--r--vendor/github.com/urfave/cli/flag_generated.go627
-rw-r--r--vendor/github.com/urfave/cli/funcs.go28
-rw-r--r--vendor/github.com/urfave/cli/help.go338
-rw-r--r--vendor/github.com/vbatts/tar-split/LICENSE28
-rw-r--r--vendor/github.com/vbatts/tar-split/README.md137
-rw-r--r--vendor/github.com/vbatts/tar-split/archive/tar/common.go340
-rw-r--r--vendor/github.com/vbatts/tar-split/archive/tar/reader.go1064
-rw-r--r--vendor/github.com/vbatts/tar-split/archive/tar/stat_atim.go20
-rw-r--r--vendor/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go20
-rw-r--r--vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go32
-rw-r--r--vendor/github.com/vbatts/tar-split/archive/tar/writer.go416
-rw-r--r--vendor/github.com/vbatts/tar-split/tar/asm/README.md44
-rw-r--r--vendor/github.com/vbatts/tar-split/tar/asm/assemble.go130
-rw-r--r--vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go141
-rw-r--r--vendor/github.com/vbatts/tar-split/tar/asm/doc.go9
-rw-r--r--vendor/github.com/vbatts/tar-split/tar/storage/doc.go12
-rw-r--r--vendor/github.com/vbatts/tar-split/tar/storage/entry.go78
-rw-r--r--vendor/github.com/vbatts/tar-split/tar/storage/getter.go104
-rw-r--r--vendor/github.com/vbatts/tar-split/tar/storage/packer.go127
-rw-r--r--vendor/github.com/vishvananda/netlink/LICENSE192
-rw-r--r--vendor/github.com/vishvananda/netlink/README.md91
-rw-r--r--vendor/github.com/vishvananda/netlink/addr.go56
-rw-r--r--vendor/github.com/vishvananda/netlink/addr_linux.go288
-rw-r--r--vendor/github.com/vishvananda/netlink/bpf_linux.go62
-rw-r--r--vendor/github.com/vishvananda/netlink/bridge_linux.go115
-rw-r--r--vendor/github.com/vishvananda/netlink/class.go78
-rw-r--r--vendor/github.com/vishvananda/netlink/class_linux.go254
-rw-r--r--vendor/github.com/vishvananda/netlink/conntrack_linux.go371
-rw-r--r--vendor/github.com/vishvananda/netlink/conntrack_unspecified.go53
-rw-r--r--vendor/github.com/vishvananda/netlink/filter.go283
-rw-r--r--vendor/github.com/vishvananda/netlink/filter_linux.go591
-rw-r--r--vendor/github.com/vishvananda/netlink/genetlink_linux.go167
-rw-r--r--vendor/github.com/vishvananda/netlink/genetlink_unspecified.go25
-rw-r--r--vendor/github.com/vishvananda/netlink/gtp_linux.go238
-rw-r--r--vendor/github.com/vishvananda/netlink/handle_linux.go111
-rw-r--r--vendor/github.com/vishvananda/netlink/handle_unspecified.go218
-rw-r--r--vendor/github.com/vishvananda/netlink/link.go776
-rw-r--r--vendor/github.com/vishvananda/netlink/link_linux.go1819
-rw-r--r--vendor/github.com/vishvananda/netlink/link_tuntap_linux.go14
-rw-r--r--vendor/github.com/vishvananda/netlink/neigh.go22
-rw-r--r--vendor/github.com/vishvananda/netlink/neigh_linux.go250
-rw-r--r--vendor/github.com/vishvananda/netlink/netlink.go39
-rw-r--r--vendor/github.com/vishvananda/netlink/netlink_linux.go11
-rw-r--r--vendor/github.com/vishvananda/netlink/netlink_unspecified.go221
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/addr_linux.go76
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/bridge_linux.go74
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go189
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/genetlink_linux.go89
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/link_linux.go525
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/mpls_linux.go36
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/nl_linux.go718
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/nl_unspecified.go11
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/route_linux.go80
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/syscall.go68
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/tc_linux.go675
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go296
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/xfrm_monitor_linux.go32
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go119
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go334
-rw-r--r--vendor/github.com/vishvananda/netlink/order.go32
-rw-r--r--vendor/github.com/vishvananda/netlink/protinfo.go58
-rw-r--r--vendor/github.com/vishvananda/netlink/protinfo_linux.go74
-rw-r--r--vendor/github.com/vishvananda/netlink/qdisc.go232
-rw-r--r--vendor/github.com/vishvananda/netlink/qdisc_linux.go527
-rw-r--r--vendor/github.com/vishvananda/netlink/route.go116
-rw-r--r--vendor/github.com/vishvananda/netlink/route_linux.go674
-rw-r--r--vendor/github.com/vishvananda/netlink/route_unspecified.go11
-rw-r--r--vendor/github.com/vishvananda/netlink/rule.go40
-rw-r--r--vendor/github.com/vishvananda/netlink/rule_linux.go221
-rw-r--r--vendor/github.com/vishvananda/netlink/socket.go27
-rw-r--r--vendor/github.com/vishvananda/netlink/socket_linux.go159
-rw-r--r--vendor/github.com/vishvananda/netlink/xfrm.go74
-rw-r--r--vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go98
-rw-r--r--vendor/github.com/vishvananda/netlink/xfrm_policy.go74
-rw-r--r--vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go257
-rw-r--r--vendor/github.com/vishvananda/netlink/xfrm_state.go108
-rw-r--r--vendor/github.com/vishvananda/netlink/xfrm_state_linux.go444
-rw-r--r--vendor/github.com/vishvananda/netns/LICENSE192
-rw-r--r--vendor/github.com/vishvananda/netns/README.md51
-rw-r--r--vendor/github.com/vishvananda/netns/netns.go80
-rw-r--r--vendor/github.com/vishvananda/netns/netns_linux.go224
-rw-r--r--vendor/github.com/vishvananda/netns/netns_unspecified.go43
1742 files changed, 303647 insertions, 0 deletions
diff --git a/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE
new file mode 100644
index 000000000..e3d9a64d1
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Microsoft Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md
new file mode 100644
index 000000000..261c041e7
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/README.md
@@ -0,0 +1,12 @@
+# go-ansiterm
+
+This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent.
+
+For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position.
+
+The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go).
+
+See parser_test.go for examples exercising the state machine and generating appropriate function calls.
+
+-----
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go
new file mode 100644
index 000000000..96504a33b
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/constants.go
@@ -0,0 +1,188 @@
+package ansiterm
+
+const LogEnv = "DEBUG_TERMINAL"
+
+// ANSI constants
+// References:
+// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm
+// -- http://man7.org/linux/man-pages/man4/console_codes.4.html
+// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html
+// -- http://en.wikipedia.org/wiki/ANSI_escape_code
+// -- http://vt100.net/emu/dec_ansi_parser
+// -- http://vt100.net/emu/vt500_parser.svg
+// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html
+// -- http://www.inwap.com/pdp10/ansicode.txt
+const (
+ // ECMA-48 Set Graphics Rendition
+ // Note:
+ // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved
+ // -- Fonts could possibly be supported via SetCurrentConsoleFontEx
+ // -- Windows does not expose the per-window cursor (i.e., caret) blink times
+ ANSI_SGR_RESET = 0
+ ANSI_SGR_BOLD = 1
+ ANSI_SGR_DIM = 2
+ _ANSI_SGR_ITALIC = 3
+ ANSI_SGR_UNDERLINE = 4
+ _ANSI_SGR_BLINKSLOW = 5
+ _ANSI_SGR_BLINKFAST = 6
+ ANSI_SGR_REVERSE = 7
+ _ANSI_SGR_INVISIBLE = 8
+ _ANSI_SGR_LINETHROUGH = 9
+ _ANSI_SGR_FONT_00 = 10
+ _ANSI_SGR_FONT_01 = 11
+ _ANSI_SGR_FONT_02 = 12
+ _ANSI_SGR_FONT_03 = 13
+ _ANSI_SGR_FONT_04 = 14
+ _ANSI_SGR_FONT_05 = 15
+ _ANSI_SGR_FONT_06 = 16
+ _ANSI_SGR_FONT_07 = 17
+ _ANSI_SGR_FONT_08 = 18
+ _ANSI_SGR_FONT_09 = 19
+ _ANSI_SGR_FONT_10 = 20
+ _ANSI_SGR_DOUBLEUNDERLINE = 21
+ ANSI_SGR_BOLD_DIM_OFF = 22
+ _ANSI_SGR_ITALIC_OFF = 23
+ ANSI_SGR_UNDERLINE_OFF = 24
+ _ANSI_SGR_BLINK_OFF = 25
+ _ANSI_SGR_RESERVED_00 = 26
+ ANSI_SGR_REVERSE_OFF = 27
+ _ANSI_SGR_INVISIBLE_OFF = 28
+ _ANSI_SGR_LINETHROUGH_OFF = 29
+ ANSI_SGR_FOREGROUND_BLACK = 30
+ ANSI_SGR_FOREGROUND_RED = 31
+ ANSI_SGR_FOREGROUND_GREEN = 32
+ ANSI_SGR_FOREGROUND_YELLOW = 33
+ ANSI_SGR_FOREGROUND_BLUE = 34
+ ANSI_SGR_FOREGROUND_MAGENTA = 35
+ ANSI_SGR_FOREGROUND_CYAN = 36
+ ANSI_SGR_FOREGROUND_WHITE = 37
+ _ANSI_SGR_RESERVED_01 = 38
+ ANSI_SGR_FOREGROUND_DEFAULT = 39
+ ANSI_SGR_BACKGROUND_BLACK = 40
+ ANSI_SGR_BACKGROUND_RED = 41
+ ANSI_SGR_BACKGROUND_GREEN = 42
+ ANSI_SGR_BACKGROUND_YELLOW = 43
+ ANSI_SGR_BACKGROUND_BLUE = 44
+ ANSI_SGR_BACKGROUND_MAGENTA = 45
+ ANSI_SGR_BACKGROUND_CYAN = 46
+ ANSI_SGR_BACKGROUND_WHITE = 47
+ _ANSI_SGR_RESERVED_02 = 48
+ ANSI_SGR_BACKGROUND_DEFAULT = 49
+ // 50 - 65: Unsupported
+
+ ANSI_MAX_CMD_LENGTH = 4096
+
+ MAX_INPUT_EVENTS = 128
+ DEFAULT_WIDTH = 80
+ DEFAULT_HEIGHT = 24
+
+ ANSI_BEL = 0x07
+ ANSI_BACKSPACE = 0x08
+ ANSI_TAB = 0x09
+ ANSI_LINE_FEED = 0x0A
+ ANSI_VERTICAL_TAB = 0x0B
+ ANSI_FORM_FEED = 0x0C
+ ANSI_CARRIAGE_RETURN = 0x0D
+ ANSI_ESCAPE_PRIMARY = 0x1B
+ ANSI_ESCAPE_SECONDARY = 0x5B
+ ANSI_OSC_STRING_ENTRY = 0x5D
+ ANSI_COMMAND_FIRST = 0x40
+ ANSI_COMMAND_LAST = 0x7E
+ DCS_ENTRY = 0x90
+ CSI_ENTRY = 0x9B
+ OSC_STRING = 0x9D
+ ANSI_PARAMETER_SEP = ";"
+ ANSI_CMD_G0 = '('
+ ANSI_CMD_G1 = ')'
+ ANSI_CMD_G2 = '*'
+ ANSI_CMD_G3 = '+'
+ ANSI_CMD_DECPNM = '>'
+ ANSI_CMD_DECPAM = '='
+ ANSI_CMD_OSC = ']'
+ ANSI_CMD_STR_TERM = '\\'
+
+ KEY_CONTROL_PARAM_2 = ";2"
+ KEY_CONTROL_PARAM_3 = ";3"
+ KEY_CONTROL_PARAM_4 = ";4"
+ KEY_CONTROL_PARAM_5 = ";5"
+ KEY_CONTROL_PARAM_6 = ";6"
+ KEY_CONTROL_PARAM_7 = ";7"
+ KEY_CONTROL_PARAM_8 = ";8"
+ KEY_ESC_CSI = "\x1B["
+ KEY_ESC_N = "\x1BN"
+ KEY_ESC_O = "\x1BO"
+
+ FILL_CHARACTER = ' '
+)
+
+func getByteRange(start byte, end byte) []byte {
+ bytes := make([]byte, 0, 32)
+ for i := start; i <= end; i++ {
+ bytes = append(bytes, byte(i))
+ }
+
+ return bytes
+}
+
+var toGroundBytes = getToGroundBytes()
+var executors = getExecuteBytes()
+
+// SPACE 20+A0 hex Always and everywhere a blank space
+// Intermediate 20-2F hex !"#$%&'()*+,-./
+var intermeds = getByteRange(0x20, 0x2F)
+
+// Parameters 30-3F hex 0123456789:;<=>?
+// CSI Parameters 30-39, 3B hex 0123456789;
+var csiParams = getByteRange(0x30, 0x3F)
+
+var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...)
+
+// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_
+var upperCase = getByteRange(0x40, 0x5F)
+
+// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~
+var lowerCase = getByteRange(0x60, 0x7E)
+
+// Alphabetics 40-7E hex (all of upper and lower case)
+var alphabetics = append(upperCase, lowerCase...)
+
+var printables = getByteRange(0x20, 0x7F)
+
+var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E)
+var escapeToGroundBytes = getEscapeToGroundBytes()
+
+// See http://www.vt100.net/emu/vt500_parser.png for description of the complex
+// byte ranges below
+
+func getEscapeToGroundBytes() []byte {
+ escapeToGroundBytes := getByteRange(0x30, 0x4F)
+ escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...)
+ escapeToGroundBytes = append(escapeToGroundBytes, 0x59)
+ escapeToGroundBytes = append(escapeToGroundBytes, 0x5A)
+ escapeToGroundBytes = append(escapeToGroundBytes, 0x5C)
+ escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...)
+ return escapeToGroundBytes
+}
+
+func getExecuteBytes() []byte {
+ executeBytes := getByteRange(0x00, 0x17)
+ executeBytes = append(executeBytes, 0x19)
+ executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...)
+ return executeBytes
+}
+
+func getToGroundBytes() []byte {
+ groundBytes := []byte{0x18}
+ groundBytes = append(groundBytes, 0x1A)
+ groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...)
+ groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...)
+ groundBytes = append(groundBytes, 0x99)
+ groundBytes = append(groundBytes, 0x9A)
+ groundBytes = append(groundBytes, 0x9C)
+ return groundBytes
+}
+
+// Delete 7F hex Always and everywhere ignored
+// C1 Control 80-9F hex 32 additional control characters
+// G1 Displayable A1-FE hex 94 additional displayable characters
+// Special A0+FF hex Same as SPACE and DELETE
diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go
new file mode 100644
index 000000000..8d66e777c
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/context.go
@@ -0,0 +1,7 @@
+package ansiterm
+
+type ansiContext struct {
+ currentChar byte
+ paramBuffer []byte
+ interBuffer []byte
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
new file mode 100644
index 000000000..1bd6057da
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
@@ -0,0 +1,49 @@
+package ansiterm
+
+type csiEntryState struct {
+ baseState
+}
+
+func (csiState csiEntryState) Handle(b byte) (s state, e error) {
+ logger.Infof("CsiEntry::Handle %#x", b)
+
+ nextState, err := csiState.baseState.Handle(b)
+ if nextState != nil || err != nil {
+ return nextState, err
+ }
+
+ switch {
+ case sliceContains(alphabetics, b):
+ return csiState.parser.ground, nil
+ case sliceContains(csiCollectables, b):
+ return csiState.parser.csiParam, nil
+ case sliceContains(executors, b):
+ return csiState, csiState.parser.execute()
+ }
+
+ return csiState, nil
+}
+
+func (csiState csiEntryState) Transition(s state) error {
+ logger.Infof("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name())
+ csiState.baseState.Transition(s)
+
+ switch s {
+ case csiState.parser.ground:
+ return csiState.parser.csiDispatch()
+ case csiState.parser.csiParam:
+ switch {
+ case sliceContains(csiParams, csiState.parser.context.currentChar):
+ csiState.parser.collectParam()
+ case sliceContains(intermeds, csiState.parser.context.currentChar):
+ csiState.parser.collectInter()
+ }
+ }
+
+ return nil
+}
+
+func (csiState csiEntryState) Enter() error {
+ csiState.parser.clear()
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go
new file mode 100644
index 000000000..4be35c5fd
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go
@@ -0,0 +1,38 @@
+package ansiterm
+
+type csiParamState struct {
+ baseState
+}
+
+func (csiState csiParamState) Handle(b byte) (s state, e error) {
+ logger.Infof("CsiParam::Handle %#x", b)
+
+ nextState, err := csiState.baseState.Handle(b)
+ if nextState != nil || err != nil {
+ return nextState, err
+ }
+
+ switch {
+ case sliceContains(alphabetics, b):
+ return csiState.parser.ground, nil
+ case sliceContains(csiCollectables, b):
+ csiState.parser.collectParam()
+ return csiState, nil
+ case sliceContains(executors, b):
+ return csiState, csiState.parser.execute()
+ }
+
+ return csiState, nil
+}
+
+func (csiState csiParamState) Transition(s state) error {
+ logger.Infof("CsiParam::Transition %s --> %s", csiState.Name(), s.Name())
+ csiState.baseState.Transition(s)
+
+ switch s {
+ case csiState.parser.ground:
+ return csiState.parser.csiDispatch()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
new file mode 100644
index 000000000..2189eb6b6
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
@@ -0,0 +1,36 @@
+package ansiterm
+
+type escapeIntermediateState struct {
+ baseState
+}
+
+func (escState escapeIntermediateState) Handle(b byte) (s state, e error) {
+ logger.Infof("escapeIntermediateState::Handle %#x", b)
+ nextState, err := escState.baseState.Handle(b)
+ if nextState != nil || err != nil {
+ return nextState, err
+ }
+
+ switch {
+ case sliceContains(intermeds, b):
+ return escState, escState.parser.collectInter()
+ case sliceContains(executors, b):
+ return escState, escState.parser.execute()
+ case sliceContains(escapeIntermediateToGroundBytes, b):
+ return escState.parser.ground, nil
+ }
+
+ return escState, nil
+}
+
+func (escState escapeIntermediateState) Transition(s state) error {
+ logger.Infof("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name())
+ escState.baseState.Transition(s)
+
+ switch s {
+ case escState.parser.ground:
+ return escState.parser.escDispatch()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go
new file mode 100644
index 000000000..7b1b9ad3f
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/escape_state.go
@@ -0,0 +1,47 @@
+package ansiterm
+
+type escapeState struct {
+ baseState
+}
+
+func (escState escapeState) Handle(b byte) (s state, e error) {
+ logger.Infof("escapeState::Handle %#x", b)
+ nextState, err := escState.baseState.Handle(b)
+ if nextState != nil || err != nil {
+ return nextState, err
+ }
+
+ switch {
+ case b == ANSI_ESCAPE_SECONDARY:
+ return escState.parser.csiEntry, nil
+ case b == ANSI_OSC_STRING_ENTRY:
+ return escState.parser.oscString, nil
+ case sliceContains(executors, b):
+ return escState, escState.parser.execute()
+ case sliceContains(escapeToGroundBytes, b):
+ return escState.parser.ground, nil
+ case sliceContains(intermeds, b):
+ return escState.parser.escapeIntermediate, nil
+ }
+
+ return escState, nil
+}
+
+func (escState escapeState) Transition(s state) error {
+ logger.Infof("Escape::Transition %s --> %s", escState.Name(), s.Name())
+ escState.baseState.Transition(s)
+
+ switch s {
+ case escState.parser.ground:
+ return escState.parser.escDispatch()
+ case escState.parser.escapeIntermediate:
+ return escState.parser.collectInter()
+ }
+
+ return nil
+}
+
+func (escState escapeState) Enter() error {
+ escState.parser.clear()
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go
new file mode 100644
index 000000000..98087b38c
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/event_handler.go
@@ -0,0 +1,90 @@
+package ansiterm
+
+type AnsiEventHandler interface {
+ // Print
+ Print(b byte) error
+
+ // Execute C0 commands
+ Execute(b byte) error
+
+ // CUrsor Up
+ CUU(int) error
+
+ // CUrsor Down
+ CUD(int) error
+
+ // CUrsor Forward
+ CUF(int) error
+
+ // CUrsor Backward
+ CUB(int) error
+
+ // Cursor to Next Line
+ CNL(int) error
+
+ // Cursor to Previous Line
+ CPL(int) error
+
+ // Cursor Horizontal position Absolute
+ CHA(int) error
+
+ // Vertical line Position Absolute
+ VPA(int) error
+
+ // CUrsor Position
+ CUP(int, int) error
+
+ // Horizontal and Vertical Position (depends on PUM)
+ HVP(int, int) error
+
+ // Text Cursor Enable Mode
+ DECTCEM(bool) error
+
+ // Origin Mode
+ DECOM(bool) error
+
+ // 132 Column Mode
+ DECCOLM(bool) error
+
+ // Erase in Display
+ ED(int) error
+
+ // Erase in Line
+ EL(int) error
+
+ // Insert Line
+ IL(int) error
+
+ // Delete Line
+ DL(int) error
+
+ // Insert Character
+ ICH(int) error
+
+ // Delete Character
+ DCH(int) error
+
+ // Set Graphics Rendition
+ SGR([]int) error
+
+ // Pan Down
+ SU(int) error
+
+ // Pan Up
+ SD(int) error
+
+ // Device Attributes
+ DA([]string) error
+
+ // Set Top and Bottom Margins
+ DECSTBM(int, int) error
+
+ // Index
+ IND() error
+
+ // Reverse Index
+ RI() error
+
+ // Flush updates from previous commands
+ Flush() error
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go
new file mode 100644
index 000000000..52451e946
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/ground_state.go
@@ -0,0 +1,24 @@
+package ansiterm
+
+type groundState struct {
+ baseState
+}
+
+func (gs groundState) Handle(b byte) (s state, e error) {
+ gs.parser.context.currentChar = b
+
+ nextState, err := gs.baseState.Handle(b)
+ if nextState != nil || err != nil {
+ return nextState, err
+ }
+
+ switch {
+ case sliceContains(printables, b):
+ return gs, gs.parser.print()
+
+ case sliceContains(executors, b):
+ return gs, gs.parser.execute()
+ }
+
+ return gs, nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go
new file mode 100644
index 000000000..24062d420
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go
@@ -0,0 +1,31 @@
+package ansiterm
+
+type oscStringState struct {
+ baseState
+}
+
+func (oscState oscStringState) Handle(b byte) (s state, e error) {
+ logger.Infof("OscString::Handle %#x", b)
+ nextState, err := oscState.baseState.Handle(b)
+ if nextState != nil || err != nil {
+ return nextState, err
+ }
+
+ switch {
+ case isOscStringTerminator(b):
+ return oscState.parser.ground, nil
+ }
+
+ return oscState, nil
+}
+
+// See below for OSC string terminators for linux
+// http://man7.org/linux/man-pages/man4/console_codes.4.html
+func isOscStringTerminator(b byte) bool {
+
+ if b == ANSI_BEL || b == 0x5C {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go
new file mode 100644
index 000000000..3286a9cb5
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/parser.go
@@ -0,0 +1,136 @@
+package ansiterm
+
+import (
+ "errors"
+ "io/ioutil"
+ "os"
+
+ "github.com/sirupsen/logrus"
+)
+
+var logger *logrus.Logger
+
+type AnsiParser struct {
+ currState state
+ eventHandler AnsiEventHandler
+ context *ansiContext
+ csiEntry state
+ csiParam state
+ dcsEntry state
+ escape state
+ escapeIntermediate state
+ error state
+ ground state
+ oscString state
+ stateMap []state
+}
+
+func CreateParser(initialState string, evtHandler AnsiEventHandler) *AnsiParser {
+ logFile := ioutil.Discard
+
+ if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" {
+ logFile, _ = os.Create("ansiParser.log")
+ }
+
+ logger = &logrus.Logger{
+ Out: logFile,
+ Formatter: new(logrus.TextFormatter),
+ Level: logrus.InfoLevel,
+ }
+
+ parser := &AnsiParser{
+ eventHandler: evtHandler,
+ context: &ansiContext{},
+ }
+
+ parser.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: parser}}
+ parser.csiParam = csiParamState{baseState{name: "CsiParam", parser: parser}}
+ parser.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: parser}}
+ parser.escape = escapeState{baseState{name: "Escape", parser: parser}}
+ parser.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: parser}}
+ parser.error = errorState{baseState{name: "Error", parser: parser}}
+ parser.ground = groundState{baseState{name: "Ground", parser: parser}}
+ parser.oscString = oscStringState{baseState{name: "OscString", parser: parser}}
+
+ parser.stateMap = []state{
+ parser.csiEntry,
+ parser.csiParam,
+ parser.dcsEntry,
+ parser.escape,
+ parser.escapeIntermediate,
+ parser.error,
+ parser.ground,
+ parser.oscString,
+ }
+
+ parser.currState = getState(initialState, parser.stateMap)
+
+ logger.Infof("CreateParser: parser %p", parser)
+ return parser
+}
+
+func getState(name string, states []state) state {
+ for _, el := range states {
+ if el.Name() == name {
+ return el
+ }
+ }
+
+ return nil
+}
+
+func (ap *AnsiParser) Parse(bytes []byte) (int, error) {
+ for i, b := range bytes {
+ if err := ap.handle(b); err != nil {
+ return i, err
+ }
+ }
+
+ return len(bytes), ap.eventHandler.Flush()
+}
+
+func (ap *AnsiParser) handle(b byte) error {
+ ap.context.currentChar = b
+ newState, err := ap.currState.Handle(b)
+ if err != nil {
+ return err
+ }
+
+ if newState == nil {
+ logger.Warning("newState is nil")
+ return errors.New("New state of 'nil' is invalid.")
+ }
+
+ if newState != ap.currState {
+ if err := ap.changeState(newState); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (ap *AnsiParser) changeState(newState state) error {
+ logger.Infof("ChangeState %s --> %s", ap.currState.Name(), newState.Name())
+
+ // Exit old state
+ if err := ap.currState.Exit(); err != nil {
+ logger.Infof("Exit state '%s' failed with : '%v'", ap.currState.Name(), err)
+ return err
+ }
+
+ // Perform transition action
+ if err := ap.currState.Transition(newState); err != nil {
+ logger.Infof("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err)
+ return err
+ }
+
+ // Enter new state
+ if err := newState.Enter(); err != nil {
+ logger.Infof("Enter state '%s' failed with: '%v'", newState.Name(), err)
+ return err
+ }
+
+ ap.currState = newState
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
new file mode 100644
index 000000000..8b69a67a5
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
@@ -0,0 +1,103 @@
+package ansiterm
+
+import (
+ "strconv"
+)
+
+func parseParams(bytes []byte) ([]string, error) {
+ paramBuff := make([]byte, 0, 0)
+ params := []string{}
+
+ for _, v := range bytes {
+ if v == ';' {
+ if len(paramBuff) > 0 {
+ // Completed parameter, append it to the list
+ s := string(paramBuff)
+ params = append(params, s)
+ paramBuff = make([]byte, 0, 0)
+ }
+ } else {
+ paramBuff = append(paramBuff, v)
+ }
+ }
+
+ // Last parameter may not be terminated with ';'
+ if len(paramBuff) > 0 {
+ s := string(paramBuff)
+ params = append(params, s)
+ }
+
+ logger.Infof("Parsed params: %v with length: %d", params, len(params))
+ return params, nil
+}
+
+func parseCmd(context ansiContext) (string, error) {
+ return string(context.currentChar), nil
+}
+
+func getInt(params []string, dflt int) int {
+ i := getInts(params, 1, dflt)[0]
+ logger.Infof("getInt: %v", i)
+ return i
+}
+
+func getInts(params []string, minCount int, dflt int) []int {
+ ints := []int{}
+
+ for _, v := range params {
+ i, _ := strconv.Atoi(v)
+ // Zero is mapped to the default value in VT100.
+ if i == 0 {
+ i = dflt
+ }
+ ints = append(ints, i)
+ }
+
+ if len(ints) < minCount {
+ remaining := minCount - len(ints)
+ for i := 0; i < remaining; i++ {
+ ints = append(ints, dflt)
+ }
+ }
+
+ logger.Infof("getInts: %v", ints)
+
+ return ints
+}
+
+func (ap *AnsiParser) modeDispatch(param string, set bool) error {
+ switch param {
+ case "?3":
+ return ap.eventHandler.DECCOLM(set)
+ case "?6":
+ return ap.eventHandler.DECOM(set)
+ case "?25":
+ return ap.eventHandler.DECTCEM(set)
+ }
+ return nil
+}
+
+func (ap *AnsiParser) hDispatch(params []string) error {
+ if len(params) == 1 {
+ return ap.modeDispatch(params[0], true)
+ }
+
+ return nil
+}
+
+func (ap *AnsiParser) lDispatch(params []string) error {
+ if len(params) == 1 {
+ return ap.modeDispatch(params[0], false)
+ }
+
+ return nil
+}
+
+func getEraseParam(params []string) int {
+ param := getInt(params, 0)
+ if param < 0 || 3 < param {
+ param = 0
+ }
+
+ return param
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go
new file mode 100644
index 000000000..58750a2d2
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/parser_actions.go
@@ -0,0 +1,122 @@
+package ansiterm
+
+import (
+ "fmt"
+)
+
+func (ap *AnsiParser) collectParam() error {
+ currChar := ap.context.currentChar
+ logger.Infof("collectParam %#x", currChar)
+ ap.context.paramBuffer = append(ap.context.paramBuffer, currChar)
+ return nil
+}
+
+func (ap *AnsiParser) collectInter() error {
+ currChar := ap.context.currentChar
+ logger.Infof("collectInter %#x", currChar)
+ ap.context.paramBuffer = append(ap.context.interBuffer, currChar)
+ return nil
+}
+
+func (ap *AnsiParser) escDispatch() error {
+ cmd, _ := parseCmd(*ap.context)
+ intermeds := ap.context.interBuffer
+ logger.Infof("escDispatch currentChar: %#x", ap.context.currentChar)
+ logger.Infof("escDispatch: %v(%v)", cmd, intermeds)
+
+ switch cmd {
+ case "D": // IND
+ return ap.eventHandler.IND()
+ case "E": // NEL, equivalent to CRLF
+ err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN)
+ if err == nil {
+ err = ap.eventHandler.Execute(ANSI_LINE_FEED)
+ }
+ return err
+ case "M": // RI
+ return ap.eventHandler.RI()
+ }
+
+ return nil
+}
+
+func (ap *AnsiParser) csiDispatch() error {
+ cmd, _ := parseCmd(*ap.context)
+ params, _ := parseParams(ap.context.paramBuffer)
+
+ logger.Infof("csiDispatch: %v(%v)", cmd, params)
+
+ switch cmd {
+ case "@":
+ return ap.eventHandler.ICH(getInt(params, 1))
+ case "A":
+ return ap.eventHandler.CUU(getInt(params, 1))
+ case "B":
+ return ap.eventHandler.CUD(getInt(params, 1))
+ case "C":
+ return ap.eventHandler.CUF(getInt(params, 1))
+ case "D":
+ return ap.eventHandler.CUB(getInt(params, 1))
+ case "E":
+ return ap.eventHandler.CNL(getInt(params, 1))
+ case "F":
+ return ap.eventHandler.CPL(getInt(params, 1))
+ case "G":
+ return ap.eventHandler.CHA(getInt(params, 1))
+ case "H":
+ ints := getInts(params, 2, 1)
+ x, y := ints[0], ints[1]
+ return ap.eventHandler.CUP(x, y)
+ case "J":
+ param := getEraseParam(params)
+ return ap.eventHandler.ED(param)
+ case "K":
+ param := getEraseParam(params)
+ return ap.eventHandler.EL(param)
+ case "L":
+ return ap.eventHandler.IL(getInt(params, 1))
+ case "M":
+ return ap.eventHandler.DL(getInt(params, 1))
+ case "P":
+ return ap.eventHandler.DCH(getInt(params, 1))
+ case "S":
+ return ap.eventHandler.SU(getInt(params, 1))
+ case "T":
+ return ap.eventHandler.SD(getInt(params, 1))
+ case "c":
+ return ap.eventHandler.DA(params)
+ case "d":
+ return ap.eventHandler.VPA(getInt(params, 1))
+ case "f":
+ ints := getInts(params, 2, 1)
+ x, y := ints[0], ints[1]
+ return ap.eventHandler.HVP(x, y)
+ case "h":
+ return ap.hDispatch(params)
+ case "l":
+ return ap.lDispatch(params)
+ case "m":
+ return ap.eventHandler.SGR(getInts(params, 1, 0))
+ case "r":
+ ints := getInts(params, 2, 1)
+ top, bottom := ints[0], ints[1]
+ return ap.eventHandler.DECSTBM(top, bottom)
+ default:
+ logger.Errorf(fmt.Sprintf("Unsupported CSI command: '%s', with full context: %v", cmd, ap.context))
+ return nil
+ }
+
+}
+
+func (ap *AnsiParser) print() error {
+ return ap.eventHandler.Print(ap.context.currentChar)
+}
+
+func (ap *AnsiParser) clear() error {
+ ap.context = &ansiContext{}
+ return nil
+}
+
+func (ap *AnsiParser) execute() error {
+ return ap.eventHandler.Execute(ap.context.currentChar)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go
new file mode 100644
index 000000000..f2ea1fcd1
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/states.go
@@ -0,0 +1,71 @@
+package ansiterm
+
+type stateID int
+
+type state interface {
+ Enter() error
+ Exit() error
+ Handle(byte) (state, error)
+ Name() string
+ Transition(state) error
+}
+
+type baseState struct {
+ name string
+ parser *AnsiParser
+}
+
+func (base baseState) Enter() error {
+ return nil
+}
+
+func (base baseState) Exit() error {
+ return nil
+}
+
+func (base baseState) Handle(b byte) (s state, e error) {
+
+ switch {
+ case b == CSI_ENTRY:
+ return base.parser.csiEntry, nil
+ case b == DCS_ENTRY:
+ return base.parser.dcsEntry, nil
+ case b == ANSI_ESCAPE_PRIMARY:
+ return base.parser.escape, nil
+ case b == OSC_STRING:
+ return base.parser.oscString, nil
+ case sliceContains(toGroundBytes, b):
+ return base.parser.ground, nil
+ }
+
+ return nil, nil
+}
+
+func (base baseState) Name() string {
+ return base.name
+}
+
+func (base baseState) Transition(s state) error {
+ if s == base.parser.ground {
+ execBytes := []byte{0x18}
+ execBytes = append(execBytes, 0x1A)
+ execBytes = append(execBytes, getByteRange(0x80, 0x8F)...)
+ execBytes = append(execBytes, getByteRange(0x91, 0x97)...)
+ execBytes = append(execBytes, 0x99)
+ execBytes = append(execBytes, 0x9A)
+
+ if sliceContains(execBytes, base.parser.context.currentChar) {
+ return base.parser.execute()
+ }
+ }
+
+ return nil
+}
+
+type dcsEntryState struct {
+ baseState
+}
+
+type errorState struct {
+ baseState
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go
new file mode 100644
index 000000000..392114493
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/utilities.go
@@ -0,0 +1,21 @@
+package ansiterm
+
+import (
+ "strconv"
+)
+
+func sliceContains(bytes []byte, b byte) bool {
+ for _, v := range bytes {
+ if v == b {
+ return true
+ }
+ }
+
+ return false
+}
+
+func convertBytesToInteger(bytes []byte) int {
+ s := string(bytes)
+ i, _ := strconv.Atoi(s)
+ return i
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
new file mode 100644
index 000000000..daf2f0696
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
@@ -0,0 +1,182 @@
+// +build windows
+
+package winterm
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/Azure/go-ansiterm"
+)
+
+// Windows keyboard constants
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx.
+const (
+ VK_PRIOR = 0x21 // PAGE UP key
+ VK_NEXT = 0x22 // PAGE DOWN key
+ VK_END = 0x23 // END key
+ VK_HOME = 0x24 // HOME key
+ VK_LEFT = 0x25 // LEFT ARROW key
+ VK_UP = 0x26 // UP ARROW key
+ VK_RIGHT = 0x27 // RIGHT ARROW key
+ VK_DOWN = 0x28 // DOWN ARROW key
+ VK_SELECT = 0x29 // SELECT key
+ VK_PRINT = 0x2A // PRINT key
+ VK_EXECUTE = 0x2B // EXECUTE key
+ VK_SNAPSHOT = 0x2C // PRINT SCREEN key
+ VK_INSERT = 0x2D // INS key
+ VK_DELETE = 0x2E // DEL key
+ VK_HELP = 0x2F // HELP key
+ VK_F1 = 0x70 // F1 key
+ VK_F2 = 0x71 // F2 key
+ VK_F3 = 0x72 // F3 key
+ VK_F4 = 0x73 // F4 key
+ VK_F5 = 0x74 // F5 key
+ VK_F6 = 0x75 // F6 key
+ VK_F7 = 0x76 // F7 key
+ VK_F8 = 0x77 // F8 key
+ VK_F9 = 0x78 // F9 key
+ VK_F10 = 0x79 // F10 key
+ VK_F11 = 0x7A // F11 key
+ VK_F12 = 0x7B // F12 key
+
+ RIGHT_ALT_PRESSED = 0x0001
+ LEFT_ALT_PRESSED = 0x0002
+ RIGHT_CTRL_PRESSED = 0x0004
+ LEFT_CTRL_PRESSED = 0x0008
+ SHIFT_PRESSED = 0x0010
+ NUMLOCK_ON = 0x0020
+ SCROLLLOCK_ON = 0x0040
+ CAPSLOCK_ON = 0x0080
+ ENHANCED_KEY = 0x0100
+)
+
+type ansiCommand struct {
+ CommandBytes []byte
+ Command string
+ Parameters []string
+ IsSpecial bool
+}
+
+func newAnsiCommand(command []byte) *ansiCommand {
+
+ if isCharacterSelectionCmdChar(command[1]) {
+ // Is Character Set Selection commands
+ return &ansiCommand{
+ CommandBytes: command,
+ Command: string(command),
+ IsSpecial: true,
+ }
+ }
+
+ // last char is command character
+ lastCharIndex := len(command) - 1
+
+ ac := &ansiCommand{
+ CommandBytes: command,
+ Command: string(command[lastCharIndex]),
+ IsSpecial: false,
+ }
+
+ // more than a single escape
+ if lastCharIndex != 0 {
+ start := 1
+ // skip if double char escape sequence
+ if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY {
+ start++
+ }
+ // convert this to GetNextParam method
+ ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP)
+ }
+
+ return ac
+}
+
+func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 {
+ if index < 0 || index >= len(ac.Parameters) {
+ return defaultValue
+ }
+
+ param, err := strconv.ParseInt(ac.Parameters[index], 10, 16)
+ if err != nil {
+ return defaultValue
+ }
+
+ return int16(param)
+}
+
+func (ac *ansiCommand) String() string {
+ return fmt.Sprintf("0x%v \"%v\" (\"%v\")",
+ bytesToHex(ac.CommandBytes),
+ ac.Command,
+ strings.Join(ac.Parameters, "\",\""))
+}
+
+// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands.
+// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html.
+func isAnsiCommandChar(b byte) bool {
+ switch {
+ case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY:
+ return true
+ case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM:
+ // non-CSI escape sequence terminator
+ return true
+ case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL:
+ // String escape sequence terminator
+ return true
+ }
+ return false
+}
+
+func isXtermOscSequence(command []byte, current byte) bool {
+ return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL)
+}
+
+func isCharacterSelectionCmdChar(b byte) bool {
+ return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3)
+}
+
+// bytesToHex converts a slice of bytes to a human-readable string.
+func bytesToHex(b []byte) string {
+ hex := make([]string, len(b))
+ for i, ch := range b {
+ hex[i] = fmt.Sprintf("%X", ch)
+ }
+ return strings.Join(hex, "")
+}
+
+// ensureInRange adjusts the passed value, if necessary, to ensure it is within
+// the passed min / max range.
+func ensureInRange(n int16, min int16, max int16) int16 {
+ if n < min {
+ return min
+ } else if n > max {
+ return max
+ } else {
+ return n
+ }
+}
+
+func GetStdFile(nFile int) (*os.File, uintptr) {
+ var file *os.File
+ switch nFile {
+ case syscall.STD_INPUT_HANDLE:
+ file = os.Stdin
+ case syscall.STD_OUTPUT_HANDLE:
+ file = os.Stdout
+ case syscall.STD_ERROR_HANDLE:
+ file = os.Stderr
+ default:
+ panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile))
+ }
+
+ fd, err := syscall.GetStdHandle(nFile)
+ if err != nil {
+ panic(fmt.Errorf("Invalid standard handle indentifier: %v -- %v", nFile, err))
+ }
+
+ return file, uintptr(fd)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go
new file mode 100644
index 000000000..462d92f8e
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/api.go
@@ -0,0 +1,322 @@
+// +build windows
+
+package winterm
+
+import (
+ "fmt"
+ "syscall"
+ "unsafe"
+)
+
+//===========================================================================================================
+// IMPORTANT NOTE:
+//
+// The methods below make extensive use of the "unsafe" package to obtain the required pointers.
+// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack
+// variables) the pointers reference *before* the API completes.
+//
+// As a result, in those cases, the code must hint that the variables remain in active by invoking the
+// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer
+// require unsafe pointers.
+//
+// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform
+// the garbage collector the variables remain in use if:
+//
+// -- The value is not a pointer (e.g., int32, struct)
+// -- The value is not referenced by the method after passing the pointer to Windows
+//
+// See http://golang.org/doc/go1.3.
+//===========================================================================================================
+
+var (
+ kernel32DLL = syscall.NewLazyDLL("kernel32.dll")
+
+ getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo")
+ setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo")
+ setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition")
+ setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode")
+ getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo")
+ setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize")
+ scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA")
+ setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute")
+ setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo")
+ writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW")
+ readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW")
+ waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject")
+)
+
+// Windows Console constants
+const (
+ // Console modes
+ // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
+ ENABLE_PROCESSED_INPUT = 0x0001
+ ENABLE_LINE_INPUT = 0x0002
+ ENABLE_ECHO_INPUT = 0x0004
+ ENABLE_WINDOW_INPUT = 0x0008
+ ENABLE_MOUSE_INPUT = 0x0010
+ ENABLE_INSERT_MODE = 0x0020
+ ENABLE_QUICK_EDIT_MODE = 0x0040
+ ENABLE_EXTENDED_FLAGS = 0x0080
+
+ ENABLE_PROCESSED_OUTPUT = 0x0001
+ ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002
+
+ // Character attributes
+ // Note:
+ // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan).
+ // Clearing all foreground or background colors results in black; setting all creates white.
+ // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes.
+ FOREGROUND_BLUE uint16 = 0x0001
+ FOREGROUND_GREEN uint16 = 0x0002
+ FOREGROUND_RED uint16 = 0x0004
+ FOREGROUND_INTENSITY uint16 = 0x0008
+ FOREGROUND_MASK uint16 = 0x000F
+
+ BACKGROUND_BLUE uint16 = 0x0010
+ BACKGROUND_GREEN uint16 = 0x0020
+ BACKGROUND_RED uint16 = 0x0040
+ BACKGROUND_INTENSITY uint16 = 0x0080
+ BACKGROUND_MASK uint16 = 0x00F0
+
+ COMMON_LVB_MASK uint16 = 0xFF00
+ COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000
+ COMMON_LVB_UNDERSCORE uint16 = 0x8000
+
+ // Input event types
+ // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
+ KEY_EVENT = 0x0001
+ MOUSE_EVENT = 0x0002
+ WINDOW_BUFFER_SIZE_EVENT = 0x0004
+ MENU_EVENT = 0x0008
+ FOCUS_EVENT = 0x0010
+
+ // WaitForSingleObject return codes
+ WAIT_ABANDONED = 0x00000080
+ WAIT_FAILED = 0xFFFFFFFF
+ WAIT_SIGNALED = 0x0000000
+ WAIT_TIMEOUT = 0x00000102
+
+ // WaitForSingleObject wait duration
+ WAIT_INFINITE = 0xFFFFFFFF
+ WAIT_ONE_SECOND = 1000
+ WAIT_HALF_SECOND = 500
+ WAIT_QUARTER_SECOND = 250
+)
+
+// Windows API Console types
+// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD)
+// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment
+type (
+ CHAR_INFO struct {
+ UnicodeChar uint16
+ Attributes uint16
+ }
+
+ CONSOLE_CURSOR_INFO struct {
+ Size uint32
+ Visible int32
+ }
+
+ CONSOLE_SCREEN_BUFFER_INFO struct {
+ Size COORD
+ CursorPosition COORD
+ Attributes uint16
+ Window SMALL_RECT
+ MaximumWindowSize COORD
+ }
+
+ COORD struct {
+ X int16
+ Y int16
+ }
+
+ SMALL_RECT struct {
+ Left int16
+ Top int16
+ Right int16
+ Bottom int16
+ }
+
+ // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest
+ // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
+ INPUT_RECORD struct {
+ EventType uint16
+ KeyEvent KEY_EVENT_RECORD
+ }
+
+ KEY_EVENT_RECORD struct {
+ KeyDown int32
+ RepeatCount uint16
+ VirtualKeyCode uint16
+ VirtualScanCode uint16
+ UnicodeChar uint16
+ ControlKeyState uint32
+ }
+
+ WINDOW_BUFFER_SIZE struct {
+ Size COORD
+ }
+)
+
+// boolToBOOL converts a Go bool into a Windows int32.
+func boolToBOOL(f bool) int32 {
+ if f {
+ return int32(1)
+ } else {
+ return int32(0)
+ }
+}
+
+// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx.
+func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
+ r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
+ return checkError(r1, r2, err)
+}
+
+// SetConsoleCursorInfo sets the size and visiblity of the console cursor.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx.
+func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
+ r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
+ return checkError(r1, r2, err)
+}
+
+// SetConsoleCursorPosition location of the console cursor.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx.
+func SetConsoleCursorPosition(handle uintptr, coord COORD) error {
+ r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord))
+ use(coord)
+ return checkError(r1, r2, err)
+}
+
+// GetConsoleMode gets the console mode for given file descriptor
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx.
+func GetConsoleMode(handle uintptr) (mode uint32, err error) {
+ err = syscall.GetConsoleMode(syscall.Handle(handle), &mode)
+ return mode, err
+}
+
+// SetConsoleMode sets the console mode for given file descriptor
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
+func SetConsoleMode(handle uintptr, mode uint32) error {
+ r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0)
+ use(mode)
+ return checkError(r1, r2, err)
+}
+
+// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer.
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx.
+func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) {
+ info := CONSOLE_SCREEN_BUFFER_INFO{}
+ err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0))
+ if err != nil {
+ return nil, err
+ }
+ return &info, nil
+}
+
+func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error {
+ r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char)))
+ use(scrollRect)
+ use(clipRect)
+ use(destOrigin)
+ use(char)
+ return checkError(r1, r2, err)
+}
+
+// SetConsoleScreenBufferSize sets the size of the console screen buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx.
+func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error {
+ r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord))
+ use(coord)
+ return checkError(r1, r2, err)
+}
+
+// SetConsoleTextAttribute sets the attributes of characters written to the
+// console screen buffer by the WriteFile or WriteConsole function.
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx.
+func SetConsoleTextAttribute(handle uintptr, attribute uint16) error {
+ r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0)
+ use(attribute)
+ return checkError(r1, r2, err)
+}
+
+// SetConsoleWindowInfo sets the size and position of the console screen buffer's window.
+// Note that the size and location must be within and no larger than the backing console screen buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx.
+func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error {
+ r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect)))
+ use(isAbsolute)
+ use(rect)
+ return checkError(r1, r2, err)
+}
+
+// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx.
+func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error {
+ r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion)))
+ use(buffer)
+ use(bufferSize)
+ use(bufferCoord)
+ return checkError(r1, r2, err)
+}
+
+// ReadConsoleInput reads (and removes) data from the console input buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx.
+func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error {
+ r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count)))
+ use(buffer)
+ return checkError(r1, r2, err)
+}
+
+// WaitForSingleObject waits for the passed handle to be signaled.
+// It returns true if the handle was signaled; false otherwise.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx.
+func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) {
+ r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait)))
+ switch r1 {
+ case WAIT_ABANDONED, WAIT_TIMEOUT:
+ return false, nil
+ case WAIT_SIGNALED:
+ return true, nil
+ }
+ use(msWait)
+ return false, err
+}
+
+// String helpers
+func (info CONSOLE_SCREEN_BUFFER_INFO) String() string {
+ return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize)
+}
+
+func (coord COORD) String() string {
+ return fmt.Sprintf("%v,%v", coord.X, coord.Y)
+}
+
+func (rect SMALL_RECT) String() string {
+ return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom)
+}
+
+// checkError evaluates the results of a Windows API call and returns the error if it failed.
+func checkError(r1, r2 uintptr, err error) error {
+ // Windows APIs return non-zero to indicate success
+ if r1 != 0 {
+ return nil
+ }
+
+ // Return the error if provided, otherwise default to EINVAL
+ if err != nil {
+ return err
+ }
+ return syscall.EINVAL
+}
+
+// coordToPointer converts a COORD into a uintptr (by fooling the type system).
+func coordToPointer(c COORD) uintptr {
+ // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass.
+ return uintptr(*((*uint32)(unsafe.Pointer(&c))))
+}
+
+// use is a no-op, but the compiler cannot see that it is.
+// Calling use(p) ensures that p is kept live until that point.
+func use(p interface{}) {}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go
new file mode 100644
index 000000000..cbec8f728
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go
@@ -0,0 +1,100 @@
+// +build windows
+
+package winterm
+
+import "github.com/Azure/go-ansiterm"
+
+const (
+ FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
+ BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
+)
+
+// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the
+// request represented by the passed ANSI mode.
+func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) {
+ switch ansiMode {
+
+ // Mode styles
+ case ansiterm.ANSI_SGR_BOLD:
+ windowsMode = windowsMode | FOREGROUND_INTENSITY
+
+ case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF:
+ windowsMode &^= FOREGROUND_INTENSITY
+
+ case ansiterm.ANSI_SGR_UNDERLINE:
+ windowsMode = windowsMode | COMMON_LVB_UNDERSCORE
+
+ case ansiterm.ANSI_SGR_REVERSE:
+ inverted = true
+
+ case ansiterm.ANSI_SGR_REVERSE_OFF:
+ inverted = false
+
+ case ansiterm.ANSI_SGR_UNDERLINE_OFF:
+ windowsMode &^= COMMON_LVB_UNDERSCORE
+
+ // Foreground colors
+ case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT:
+ windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK)
+
+ case ansiterm.ANSI_SGR_FOREGROUND_BLACK:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK)
+
+ case ansiterm.ANSI_SGR_FOREGROUND_RED:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED
+
+ case ansiterm.ANSI_SGR_FOREGROUND_GREEN:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN
+
+ case ansiterm.ANSI_SGR_FOREGROUND_YELLOW:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN
+
+ case ansiterm.ANSI_SGR_FOREGROUND_BLUE:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE
+
+ case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE
+
+ case ansiterm.ANSI_SGR_FOREGROUND_CYAN:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE
+
+ case ansiterm.ANSI_SGR_FOREGROUND_WHITE:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
+
+ // Background colors
+ case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT:
+ // Black with no intensity
+ windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK)
+
+ case ansiterm.ANSI_SGR_BACKGROUND_BLACK:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK)
+
+ case ansiterm.ANSI_SGR_BACKGROUND_RED:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED
+
+ case ansiterm.ANSI_SGR_BACKGROUND_GREEN:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN
+
+ case ansiterm.ANSI_SGR_BACKGROUND_YELLOW:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN
+
+ case ansiterm.ANSI_SGR_BACKGROUND_BLUE:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE
+
+ case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE
+
+ case ansiterm.ANSI_SGR_BACKGROUND_CYAN:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE
+
+ case ansiterm.ANSI_SGR_BACKGROUND_WHITE:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
+ }
+
+ return windowsMode, inverted
+}
+
+// invertAttributes inverts the foreground and background colors of a Windows attributes value
+func invertAttributes(windowsMode uint16) uint16 {
+ return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
new file mode 100644
index 000000000..f015723ad
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
@@ -0,0 +1,101 @@
+// +build windows
+
+package winterm
+
+const (
+ horizontal = iota
+ vertical
+)
+
+func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT {
+ if h.originMode {
+ sr := h.effectiveSr(info.Window)
+ return SMALL_RECT{
+ Top: sr.top,
+ Bottom: sr.bottom,
+ Left: 0,
+ Right: info.Size.X - 1,
+ }
+ } else {
+ return SMALL_RECT{
+ Top: info.Window.Top,
+ Bottom: info.Window.Bottom,
+ Left: 0,
+ Right: info.Size.X - 1,
+ }
+ }
+}
+
+// setCursorPosition sets the cursor to the specified position, bounded to the screen size
+func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error {
+ position.X = ensureInRange(position.X, window.Left, window.Right)
+ position.Y = ensureInRange(position.Y, window.Top, window.Bottom)
+ err := SetConsoleCursorPosition(h.fd, position)
+ if err != nil {
+ return err
+ }
+ logger.Infof("Cursor position set: (%d, %d)", position.X, position.Y)
+ return err
+}
+
+func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error {
+ return h.moveCursor(vertical, param)
+}
+
+func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error {
+ return h.moveCursor(horizontal, param)
+}
+
+func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ position := info.CursorPosition
+ switch moveMode {
+ case horizontal:
+ position.X += int16(param)
+ case vertical:
+ position.Y += int16(param)
+ }
+
+ if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) moveCursorLine(param int) error {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ position := info.CursorPosition
+ position.X = 0
+ position.Y += int16(param)
+
+ if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ position := info.CursorPosition
+ position.X = int16(param) - 1
+
+ if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go
new file mode 100644
index 000000000..244b5fa25
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go
@@ -0,0 +1,84 @@
+// +build windows
+
+package winterm
+
+import "github.com/Azure/go-ansiterm"
+
+func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error {
+ // Ignore an invalid (negative area) request
+ if toCoord.Y < fromCoord.Y {
+ return nil
+ }
+
+ var err error
+
+ var coordStart = COORD{}
+ var coordEnd = COORD{}
+
+ xCurrent, yCurrent := fromCoord.X, fromCoord.Y
+ xEnd, yEnd := toCoord.X, toCoord.Y
+
+ // Clear any partial initial line
+ if xCurrent > 0 {
+ coordStart.X, coordStart.Y = xCurrent, yCurrent
+ coordEnd.X, coordEnd.Y = xEnd, yCurrent
+
+ err = h.clearRect(attributes, coordStart, coordEnd)
+ if err != nil {
+ return err
+ }
+
+ xCurrent = 0
+ yCurrent += 1
+ }
+
+ // Clear intervening rectangular section
+ if yCurrent < yEnd {
+ coordStart.X, coordStart.Y = xCurrent, yCurrent
+ coordEnd.X, coordEnd.Y = xEnd, yEnd-1
+
+ err = h.clearRect(attributes, coordStart, coordEnd)
+ if err != nil {
+ return err
+ }
+
+ xCurrent = 0
+ yCurrent = yEnd
+ }
+
+ // Clear remaining partial ending line
+ coordStart.X, coordStart.Y = xCurrent, yCurrent
+ coordEnd.X, coordEnd.Y = xEnd, yEnd
+
+ err = h.clearRect(attributes, coordStart, coordEnd)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error {
+ region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X}
+ width := toCoord.X - fromCoord.X + 1
+ height := toCoord.Y - fromCoord.Y + 1
+ size := uint32(width) * uint32(height)
+
+ if size <= 0 {
+ return nil
+ }
+
+ buffer := make([]CHAR_INFO, size)
+
+ char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes}
+ for i := 0; i < int(size); i++ {
+ buffer[i] = char
+ }
+
+ err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, &region)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
new file mode 100644
index 000000000..706d27057
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
@@ -0,0 +1,118 @@
+// +build windows
+
+package winterm
+
+// effectiveSr gets the current effective scroll region in buffer coordinates
+func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion {
+ top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom)
+ bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom)
+ if top >= bottom {
+ top = window.Top
+ bottom = window.Bottom
+ }
+ return scrollRegion{top: top, bottom: bottom}
+}
+
+func (h *windowsAnsiEventHandler) scrollUp(param int) error {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ sr := h.effectiveSr(info.Window)
+ return h.scroll(param, sr, info)
+}
+
+func (h *windowsAnsiEventHandler) scrollDown(param int) error {
+ return h.scrollUp(-param)
+}
+
+func (h *windowsAnsiEventHandler) deleteLines(param int) error {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ start := info.CursorPosition.Y
+ sr := h.effectiveSr(info.Window)
+ // Lines cannot be inserted or deleted outside the scrolling region.
+ if start >= sr.top && start <= sr.bottom {
+ sr.top = start
+ return h.scroll(param, sr, info)
+ } else {
+ return nil
+ }
+}
+
+func (h *windowsAnsiEventHandler) insertLines(param int) error {
+ return h.deleteLines(-param)
+}
+
+// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates.
+func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error {
+ logger.Infof("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom)
+ logger.Infof("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom)
+
+ // Copy from and clip to the scroll region (full buffer width)
+ scrollRect := SMALL_RECT{
+ Top: sr.top,
+ Bottom: sr.bottom,
+ Left: 0,
+ Right: info.Size.X - 1,
+ }
+
+ // Origin to which area should be copied
+ destOrigin := COORD{
+ X: 0,
+ Y: sr.top - int16(param),
+ }
+
+ char := CHAR_INFO{
+ UnicodeChar: ' ',
+ Attributes: h.attributes,
+ }
+
+ if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) deleteCharacters(param int) error {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+ return h.scrollLine(param, info.CursorPosition, info)
+}
+
+func (h *windowsAnsiEventHandler) insertCharacters(param int) error {
+ return h.deleteCharacters(-param)
+}
+
+// scrollLine scrolls a line horizontally starting at the provided position by a number of columns.
+func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error {
+ // Copy from and clip to the scroll region (full buffer width)
+ scrollRect := SMALL_RECT{
+ Top: position.Y,
+ Bottom: position.Y,
+ Left: position.X,
+ Right: info.Size.X - 1,
+ }
+
+ // Origin to which area should be copied
+ destOrigin := COORD{
+ X: position.X - int16(columns),
+ Y: position.Y,
+ }
+
+ char := CHAR_INFO{
+ UnicodeChar: ' ',
+ Attributes: h.attributes,
+ }
+
+ if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go
new file mode 100644
index 000000000..afa7635d7
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go
@@ -0,0 +1,9 @@
+// +build windows
+
+package winterm
+
+// AddInRange increments a value by the passed quantity while ensuring the values
+// always remain within the supplied min / max range.
+func addInRange(n int16, increment int16, min int16, max int16) int16 {
+ return ensureInRange(n+increment, min, max)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
new file mode 100644
index 000000000..48998bb05
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
@@ -0,0 +1,726 @@
+// +build windows
+
+package winterm
+
+import (
+ "bytes"
+ "io/ioutil"
+ "os"
+ "strconv"
+
+ "github.com/Azure/go-ansiterm"
+ "github.com/sirupsen/logrus"
+)
+
+var logger *logrus.Logger
+
+type windowsAnsiEventHandler struct {
+ fd uintptr
+ file *os.File
+ infoReset *CONSOLE_SCREEN_BUFFER_INFO
+ sr scrollRegion
+ buffer bytes.Buffer
+ attributes uint16
+ inverted bool
+ wrapNext bool
+ drewMarginByte bool
+ originMode bool
+ marginByte byte
+ curInfo *CONSOLE_SCREEN_BUFFER_INFO
+ curPos COORD
+}
+
+func CreateWinEventHandler(fd uintptr, file *os.File) ansiterm.AnsiEventHandler {
+ logFile := ioutil.Discard
+
+ if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
+ logFile, _ = os.Create("winEventHandler.log")
+ }
+
+ logger = &logrus.Logger{
+ Out: logFile,
+ Formatter: new(logrus.TextFormatter),
+ Level: logrus.DebugLevel,
+ }
+
+ infoReset, err := GetConsoleScreenBufferInfo(fd)
+ if err != nil {
+ return nil
+ }
+
+ return &windowsAnsiEventHandler{
+ fd: fd,
+ file: file,
+ infoReset: infoReset,
+ attributes: infoReset.Attributes,
+ }
+}
+
+type scrollRegion struct {
+ top int16
+ bottom int16
+}
+
+// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the
+// current cursor position and scroll region settings, in which case it returns
+// true. If no special handling is necessary, then it does nothing and returns
+// false.
+//
+// In the false case, the caller should ensure that a carriage return
+// and line feed are inserted or that the text is otherwise wrapped.
+func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) {
+ if h.wrapNext {
+ if err := h.Flush(); err != nil {
+ return false, err
+ }
+ h.clearWrap()
+ }
+ pos, info, err := h.getCurrentInfo()
+ if err != nil {
+ return false, err
+ }
+ sr := h.effectiveSr(info.Window)
+ if pos.Y == sr.bottom {
+ // Scrolling is necessary. Let Windows automatically scroll if the scrolling region
+ // is the full window.
+ if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom {
+ if includeCR {
+ pos.X = 0
+ h.updatePos(pos)
+ }
+ return false, nil
+ }
+
+ // A custom scroll region is active. Scroll the window manually to simulate
+ // the LF.
+ if err := h.Flush(); err != nil {
+ return false, err
+ }
+ logger.Info("Simulating LF inside scroll region")
+ if err := h.scrollUp(1); err != nil {
+ return false, err
+ }
+ if includeCR {
+ pos.X = 0
+ if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+ return false, err
+ }
+ }
+ return true, nil
+
+ } else if pos.Y < info.Window.Bottom {
+ // Let Windows handle the LF.
+ pos.Y++
+ if includeCR {
+ pos.X = 0
+ }
+ h.updatePos(pos)
+ return false, nil
+ } else {
+ // The cursor is at the bottom of the screen but outside the scroll
+ // region. Skip the LF.
+ logger.Info("Simulating LF outside scroll region")
+ if includeCR {
+ if err := h.Flush(); err != nil {
+ return false, err
+ }
+ pos.X = 0
+ if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+ return false, err
+ }
+ }
+ return true, nil
+ }
+}
+
+// executeLF executes a LF without a CR.
+func (h *windowsAnsiEventHandler) executeLF() error {
+ handled, err := h.simulateLF(false)
+ if err != nil {
+ return err
+ }
+ if !handled {
+ // Windows LF will reset the cursor column position. Write the LF
+ // and restore the cursor position.
+ pos, _, err := h.getCurrentInfo()
+ if err != nil {
+ return err
+ }
+ h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
+ if pos.X != 0 {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Info("Resetting cursor position for LF without CR")
+ if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) Print(b byte) error {
+ if h.wrapNext {
+ h.buffer.WriteByte(h.marginByte)
+ h.clearWrap()
+ if _, err := h.simulateLF(true); err != nil {
+ return err
+ }
+ }
+ pos, info, err := h.getCurrentInfo()
+ if err != nil {
+ return err
+ }
+ if pos.X == info.Size.X-1 {
+ h.wrapNext = true
+ h.marginByte = b
+ } else {
+ pos.X++
+ h.updatePos(pos)
+ h.buffer.WriteByte(b)
+ }
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) Execute(b byte) error {
+ switch b {
+ case ansiterm.ANSI_TAB:
+ logger.Info("Execute(TAB)")
+ // Move to the next tab stop, but preserve auto-wrap if already set.
+ if !h.wrapNext {
+ pos, info, err := h.getCurrentInfo()
+ if err != nil {
+ return err
+ }
+ pos.X = (pos.X + 8) - pos.X%8
+ if pos.X >= info.Size.X {
+ pos.X = info.Size.X - 1
+ }
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case ansiterm.ANSI_BEL:
+ h.buffer.WriteByte(ansiterm.ANSI_BEL)
+ return nil
+
+ case ansiterm.ANSI_BACKSPACE:
+ if h.wrapNext {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.clearWrap()
+ }
+ pos, _, err := h.getCurrentInfo()
+ if err != nil {
+ return err
+ }
+ if pos.X > 0 {
+ pos.X--
+ h.updatePos(pos)
+ h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE)
+ }
+ return nil
+
+ case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED:
+ // Treat as true LF.
+ return h.executeLF()
+
+ case ansiterm.ANSI_LINE_FEED:
+ // Simulate a CR and LF for now since there is no way in go-ansiterm
+ // to tell if the LF should include CR (and more things break when it's
+ // missing than when it's incorrectly added).
+ handled, err := h.simulateLF(true)
+ if handled || err != nil {
+ return err
+ }
+ return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
+
+ case ansiterm.ANSI_CARRIAGE_RETURN:
+ if h.wrapNext {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.clearWrap()
+ }
+ pos, _, err := h.getCurrentInfo()
+ if err != nil {
+ return err
+ }
+ if pos.X != 0 {
+ pos.X = 0
+ h.updatePos(pos)
+ h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN)
+ }
+ return nil
+
+ default:
+ return nil
+ }
+}
+
+func (h *windowsAnsiEventHandler) CUU(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("CUU: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorVertical(-param)
+}
+
+func (h *windowsAnsiEventHandler) CUD(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("CUD: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorVertical(param)
+}
+
+func (h *windowsAnsiEventHandler) CUF(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("CUF: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorHorizontal(param)
+}
+
+func (h *windowsAnsiEventHandler) CUB(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("CUB: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorHorizontal(-param)
+}
+
+func (h *windowsAnsiEventHandler) CNL(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("CNL: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorLine(param)
+}
+
+func (h *windowsAnsiEventHandler) CPL(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("CPL: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorLine(-param)
+}
+
+func (h *windowsAnsiEventHandler) CHA(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("CHA: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorColumn(param)
+}
+
+func (h *windowsAnsiEventHandler) VPA(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("VPA: [[%d]]", param)
+ h.clearWrap()
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+ window := h.getCursorWindow(info)
+ position := info.CursorPosition
+ position.Y = window.Top + int16(param) - 1
+ return h.setCursorPosition(position, window)
+}
+
+func (h *windowsAnsiEventHandler) CUP(row int, col int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("CUP: [[%d %d]]", row, col)
+ h.clearWrap()
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ window := h.getCursorWindow(info)
+ position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1}
+ return h.setCursorPosition(position, window)
+}
+
+func (h *windowsAnsiEventHandler) HVP(row int, col int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("HVP: [[%d %d]]", row, col)
+ h.clearWrap()
+ return h.CUP(row, col)
+}
+
+func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("DECTCEM: [%v]", []string{strconv.FormatBool(visible)})
+ h.clearWrap()
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) DECOM(enable bool) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("DECOM: [%v]", []string{strconv.FormatBool(enable)})
+ h.clearWrap()
+ h.originMode = enable
+ return h.CUP(1, 1)
+}
+
+func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("DECCOLM: [%v]", []string{strconv.FormatBool(use132)})
+ h.clearWrap()
+ if err := h.ED(2); err != nil {
+ return err
+ }
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+ targetWidth := int16(80)
+ if use132 {
+ targetWidth = 132
+ }
+ if info.Size.X < targetWidth {
+ if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
+ logger.Info("set buffer failed:", err)
+ return err
+ }
+ }
+ window := info.Window
+ window.Left = 0
+ window.Right = targetWidth - 1
+ if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
+ logger.Info("set window failed:", err)
+ return err
+ }
+ if info.Size.X > targetWidth {
+ if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
+ logger.Info("set buffer failed:", err)
+ return err
+ }
+ }
+ return SetConsoleCursorPosition(h.fd, COORD{0, 0})
+}
+
+func (h *windowsAnsiEventHandler) ED(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("ED: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+
+ // [J -- Erases from the cursor to the end of the screen, including the cursor position.
+ // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position.
+ // [2J -- Erases the complete display. The cursor does not move.
+ // Notes:
+ // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles
+
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ var start COORD
+ var end COORD
+
+ switch param {
+ case 0:
+ start = info.CursorPosition
+ end = COORD{info.Size.X - 1, info.Size.Y - 1}
+
+ case 1:
+ start = COORD{0, 0}
+ end = info.CursorPosition
+
+ case 2:
+ start = COORD{0, 0}
+ end = COORD{info.Size.X - 1, info.Size.Y - 1}
+ }
+
+ err = h.clearRange(h.attributes, start, end)
+ if err != nil {
+ return err
+ }
+
+ // If the whole buffer was cleared, move the window to the top while preserving
+ // the window-relative cursor position.
+ if param == 2 {
+ pos := info.CursorPosition
+ window := info.Window
+ pos.Y -= window.Top
+ window.Bottom -= window.Top
+ window.Top = 0
+ if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+ return err
+ }
+ if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) EL(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("EL: [%v]", strconv.Itoa(param))
+ h.clearWrap()
+
+ // [K -- Erases from the cursor to the end of the line, including the cursor position.
+ // [1K -- Erases from the beginning of the line to the cursor, including the cursor position.
+ // [2K -- Erases the complete line.
+
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ var start COORD
+ var end COORD
+
+ switch param {
+ case 0:
+ start = info.CursorPosition
+ end = COORD{info.Size.X, info.CursorPosition.Y}
+
+ case 1:
+ start = COORD{0, info.CursorPosition.Y}
+ end = info.CursorPosition
+
+ case 2:
+ start = COORD{0, info.CursorPosition.Y}
+ end = COORD{info.Size.X, info.CursorPosition.Y}
+ }
+
+ err = h.clearRange(h.attributes, start, end)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) IL(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("IL: [%v]", strconv.Itoa(param))
+ h.clearWrap()
+ return h.insertLines(param)
+}
+
+func (h *windowsAnsiEventHandler) DL(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("DL: [%v]", strconv.Itoa(param))
+ h.clearWrap()
+ return h.deleteLines(param)
+}
+
+func (h *windowsAnsiEventHandler) ICH(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("ICH: [%v]", strconv.Itoa(param))
+ h.clearWrap()
+ return h.insertCharacters(param)
+}
+
+func (h *windowsAnsiEventHandler) DCH(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("DCH: [%v]", strconv.Itoa(param))
+ h.clearWrap()
+ return h.deleteCharacters(param)
+}
+
+func (h *windowsAnsiEventHandler) SGR(params []int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ strings := []string{}
+ for _, v := range params {
+ strings = append(strings, strconv.Itoa(v))
+ }
+
+ logger.Infof("SGR: [%v]", strings)
+
+ if len(params) <= 0 {
+ h.attributes = h.infoReset.Attributes
+ h.inverted = false
+ } else {
+ for _, attr := range params {
+
+ if attr == ansiterm.ANSI_SGR_RESET {
+ h.attributes = h.infoReset.Attributes
+ h.inverted = false
+ continue
+ }
+
+ h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr))
+ }
+ }
+
+ attributes := h.attributes
+ if h.inverted {
+ attributes = invertAttributes(attributes)
+ }
+ err := SetConsoleTextAttribute(h.fd, attributes)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) SU(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("SU: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.scrollUp(param)
+}
+
+func (h *windowsAnsiEventHandler) SD(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("SD: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.scrollDown(param)
+}
+
+func (h *windowsAnsiEventHandler) DA(params []string) error {
+ logger.Infof("DA: [%v]", params)
+ // DA cannot be implemented because it must send data on the VT100 input stream,
+ // which is not available to go-ansiterm.
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Infof("DECSTBM: [%d, %d]", top, bottom)
+
+ // Windows is 0 indexed, Linux is 1 indexed
+ h.sr.top = int16(top - 1)
+ h.sr.bottom = int16(bottom - 1)
+
+ // This command also moves the cursor to the origin.
+ h.clearWrap()
+ return h.CUP(1, 1)
+}
+
+func (h *windowsAnsiEventHandler) RI() error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ logger.Info("RI: []")
+ h.clearWrap()
+
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ sr := h.effectiveSr(info.Window)
+ if info.CursorPosition.Y == sr.top {
+ return h.scrollDown(1)
+ }
+
+ return h.moveCursorVertical(-1)
+}
+
+func (h *windowsAnsiEventHandler) IND() error {
+ logger.Info("IND: []")
+ return h.executeLF()
+}
+
+func (h *windowsAnsiEventHandler) Flush() error {
+ h.curInfo = nil
+ if h.buffer.Len() > 0 {
+ logger.Infof("Flush: [%s]", h.buffer.Bytes())
+ if _, err := h.buffer.WriteTo(h.file); err != nil {
+ return err
+ }
+ }
+
+ if h.wrapNext && !h.drewMarginByte {
+ logger.Infof("Flush: drawing margin byte '%c'", h.marginByte)
+
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}}
+ size := COORD{1, 1}
+ position := COORD{0, 0}
+ region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y}
+ if err := WriteConsoleOutput(h.fd, charInfo, size, position, &region); err != nil {
+ return err
+ }
+ h.drewMarginByte = true
+ }
+ return nil
+}
+
+// cacheConsoleInfo ensures that the current console screen information has been queried
+// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos.
+func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) {
+ if h.curInfo == nil {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return COORD{}, nil, err
+ }
+ h.curInfo = info
+ h.curPos = info.CursorPosition
+ }
+ return h.curPos, h.curInfo, nil
+}
+
+func (h *windowsAnsiEventHandler) updatePos(pos COORD) {
+ if h.curInfo == nil {
+ panic("failed to call getCurrentInfo before calling updatePos")
+ }
+ h.curPos = pos
+}
+
+// clearWrap clears the state where the cursor is in the margin
+// waiting for the next character before wrapping the line. This must
+// be done before most operations that act on the cursor.
+func (h *windowsAnsiEventHandler) clearWrap() {
+ h.wrapNext = false
+ h.drewMarginByte = false
+}
diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING
new file mode 100644
index 000000000..5a8e33254
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/COPYING
@@ -0,0 +1,14 @@
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ Version 2, December 2004
+
+ Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
+
+ Everyone is permitted to copy and distribute verbatim or modified
+ copies of this license document, and changing it is allowed as long
+ as the name is changed.
+
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. You just DO WHAT THE FUCK YOU WANT TO.
+
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
new file mode 100644
index 000000000..5a5df6370
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/README.md
@@ -0,0 +1,220 @@
+## TOML parser and encoder for Go with reflection
+
+TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
+reflection interface similar to Go's standard library `json` and `xml`
+packages. This package also supports the `encoding.TextUnmarshaler` and
+`encoding.TextMarshaler` interfaces so that you can define custom data
+representations. (There is an example of this below.)
+
+Spec: https://github.com/mojombo/toml
+
+Compatible with TOML version
+[v0.2.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.2.0.md)
+
+Documentation: http://godoc.org/github.com/BurntSushi/toml
+
+Installation:
+
+```bash
+go get github.com/BurntSushi/toml
+```
+
+Try the toml validator:
+
+```bash
+go get github.com/BurntSushi/toml/cmd/tomlv
+tomlv some-toml-file.toml
+```
+
+[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml)
+
+
+### Testing
+
+This package passes all tests in
+[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
+and the encoder.
+
+### Examples
+
+This package works similarly to how the Go standard library handles `XML`
+and `JSON`. Namely, data is loaded into Go values via reflection.
+
+For the simplest example, consider some TOML file as just a list of keys
+and values:
+
+```toml
+Age = 25
+Cats = [ "Cauchy", "Plato" ]
+Pi = 3.14
+Perfection = [ 6, 28, 496, 8128 ]
+DOB = 1987-07-05T05:45:00Z
+```
+
+Which could be defined in Go as:
+
+```go
+type Config struct {
+ Age int
+ Cats []string
+ Pi float64
+ Perfection []int
+ DOB time.Time // requires `import time`
+}
+```
+
+And then decoded with:
+
+```go
+var conf Config
+if _, err := toml.Decode(tomlData, &conf); err != nil {
+ // handle error
+}
+```
+
+You can also use struct tags if your struct field name doesn't map to a TOML
+key value directly:
+
+```toml
+some_key_NAME = "wat"
+```
+
+```go
+type TOML struct {
+ ObscureKey string `toml:"some_key_NAME"`
+}
+```
+
+### Using the `encoding.TextUnmarshaler` interface
+
+Here's an example that automatically parses duration strings into
+`time.Duration` values:
+
+```toml
+[[song]]
+name = "Thunder Road"
+duration = "4m49s"
+
+[[song]]
+name = "Stairway to Heaven"
+duration = "8m03s"
+```
+
+Which can be decoded with:
+
+```go
+type song struct {
+ Name string
+ Duration duration
+}
+type songs struct {
+ Song []song
+}
+var favorites songs
+if _, err := toml.Decode(blob, &favorites); err != nil {
+ log.Fatal(err)
+}
+
+for _, s := range favorites.Song {
+ fmt.Printf("%s (%s)\n", s.Name, s.Duration)
+}
+```
+
+And you'll also need a `duration` type that satisfies the
+`encoding.TextUnmarshaler` interface:
+
+```go
+type duration struct {
+ time.Duration
+}
+
+func (d *duration) UnmarshalText(text []byte) error {
+ var err error
+ d.Duration, err = time.ParseDuration(string(text))
+ return err
+}
+```
+
+### More complex usage
+
+Here's an example of how to load the example from the official spec page:
+
+```toml
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+ # You can indent as you please. Tabs or spaces. TOML don't care.
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
+
+# Line breaks are OK when inside arrays
+hosts = [
+ "alpha",
+ "omega"
+]
+```
+
+And the corresponding Go types are:
+
+```go
+type tomlConfig struct {
+ Title string
+ Owner ownerInfo
+ DB database `toml:"database"`
+ Servers map[string]server
+ Clients clients
+}
+
+type ownerInfo struct {
+ Name string
+ Org string `toml:"organization"`
+ Bio string
+ DOB time.Time
+}
+
+type database struct {
+ Server string
+ Ports []int
+ ConnMax int `toml:"connection_max"`
+ Enabled bool
+}
+
+type server struct {
+ IP string
+ DC string
+}
+
+type clients struct {
+ Data [][]interface{}
+ Hosts []string
+}
+```
+
+Note that a case insensitive match will be tried if an exact match can't be
+found.
+
+A working example of the above can be found in `_examples/example.{go,toml}`.
+
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
new file mode 100644
index 000000000..c26b00c01
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/decode.go
@@ -0,0 +1,505 @@
+package toml
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "reflect"
+ "strings"
+ "time"
+)
+
+var e = fmt.Errorf
+
+// Unmarshaler is the interface implemented by objects that can unmarshal a
+// TOML description of themselves.
+type Unmarshaler interface {
+ UnmarshalTOML(interface{}) error
+}
+
+// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
+func Unmarshal(p []byte, v interface{}) error {
+ _, err := Decode(string(p), v)
+ return err
+}
+
+// Primitive is a TOML value that hasn't been decoded into a Go value.
+// When using the various `Decode*` functions, the type `Primitive` may
+// be given to any value, and its decoding will be delayed.
+//
+// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
+//
+// The underlying representation of a `Primitive` value is subject to change.
+// Do not rely on it.
+//
+// N.B. Primitive values are still parsed, so using them will only avoid
+// the overhead of reflection. They can be useful when you don't know the
+// exact type of TOML data until run time.
+type Primitive struct {
+ undecoded interface{}
+ context Key
+}
+
+// DEPRECATED!
+//
+// Use MetaData.PrimitiveDecode instead.
+func PrimitiveDecode(primValue Primitive, v interface{}) error {
+ md := MetaData{decoded: make(map[string]bool)}
+ return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// PrimitiveDecode is just like the other `Decode*` functions, except it
+// decodes a TOML value that has already been parsed. Valid primitive values
+// can *only* be obtained from values filled by the decoder functions,
+// including this method. (i.e., `v` may contain more `Primitive`
+// values.)
+//
+// Meta data for primitive values is included in the meta data returned by
+// the `Decode*` functions with one exception: keys returned by the Undecoded
+// method will only reflect keys that were decoded. Namely, any keys hidden
+// behind a Primitive will be considered undecoded. Executing this method will
+// update the undecoded keys in the meta data. (See the example.)
+func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
+ md.context = primValue.context
+ defer func() { md.context = nil }()
+ return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// Decode will decode the contents of `data` in TOML format into a pointer
+// `v`.
+//
+// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
+// used interchangeably.)
+//
+// TOML arrays of tables correspond to either a slice of structs or a slice
+// of maps.
+//
+// TOML datetimes correspond to Go `time.Time` values.
+//
+// All other TOML types (float, string, int, bool and array) correspond
+// to the obvious Go types.
+//
+// An exception to the above rules is if a type implements the
+// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
+// (floats, strings, integers, booleans and datetimes) will be converted to
+// a byte string and given to the value's UnmarshalText method. See the
+// Unmarshaler example for a demonstration with time duration strings.
+//
+// Key mapping
+//
+// TOML keys can map to either keys in a Go map or field names in a Go
+// struct. The special `toml` struct tag may be used to map TOML keys to
+// struct fields that don't match the key name exactly. (See the example.)
+// A case insensitive match to struct names will be tried if an exact match
+// can't be found.
+//
+// The mapping between TOML values and Go values is loose. That is, there
+// may exist TOML values that cannot be placed into your representation, and
+// there may be parts of your representation that do not correspond to
+// TOML values. This loose mapping can be made stricter by using the IsDefined
+// and/or Undecoded methods on the MetaData returned.
+//
+// This decoder will not handle cyclic types. If a cyclic type is passed,
+// `Decode` will not terminate.
+func Decode(data string, v interface{}) (MetaData, error) {
+ p, err := parse(data)
+ if err != nil {
+ return MetaData{}, err
+ }
+ md := MetaData{
+ p.mapping, p.types, p.ordered,
+ make(map[string]bool, len(p.ordered)), nil,
+ }
+ return md, md.unify(p.mapping, rvalue(v))
+}
+
+// DecodeFile is just like Decode, except it will automatically read the
+// contents of the file at `fpath` and decode it for you.
+func DecodeFile(fpath string, v interface{}) (MetaData, error) {
+ bs, err := ioutil.ReadFile(fpath)
+ if err != nil {
+ return MetaData{}, err
+ }
+ return Decode(string(bs), v)
+}
+
+// DecodeReader is just like Decode, except it will consume all bytes
+// from the reader and decode it for you.
+func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
+ bs, err := ioutil.ReadAll(r)
+ if err != nil {
+ return MetaData{}, err
+ }
+ return Decode(string(bs), v)
+}
+
+// unify performs a sort of type unification based on the structure of `rv`,
+// which is the client representation.
+//
+// Any type mismatch produces an error. Finding a type that we don't know
+// how to handle produces an unsupported type error.
+func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
+
+ // Special case. Look for a `Primitive` value.
+ if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
+ // Save the undecoded data and the key context into the primitive
+ // value.
+ context := make(Key, len(md.context))
+ copy(context, md.context)
+ rv.Set(reflect.ValueOf(Primitive{
+ undecoded: data,
+ context: context,
+ }))
+ return nil
+ }
+
+ // Special case. Unmarshaler Interface support.
+ if rv.CanAddr() {
+ if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
+ return v.UnmarshalTOML(data)
+ }
+ }
+
+ // Special case. Handle time.Time values specifically.
+ // TODO: Remove this code when we decide to drop support for Go 1.1.
+ // This isn't necessary in Go 1.2 because time.Time satisfies the encoding
+ // interfaces.
+ if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
+ return md.unifyDatetime(data, rv)
+ }
+
+ // Special case. Look for a value satisfying the TextUnmarshaler interface.
+ if v, ok := rv.Interface().(TextUnmarshaler); ok {
+ return md.unifyText(data, v)
+ }
+ // BUG(burntsushi)
+ // The behavior here is incorrect whenever a Go type satisfies the
+ // encoding.TextUnmarshaler interface but also corresponds to a TOML
+ // hash or array. In particular, the unmarshaler should only be applied
+ // to primitive TOML values. But at this point, it will be applied to
+ // all kinds of values and produce an incorrect error whenever those values
+ // are hashes or arrays (including arrays of tables).
+
+ k := rv.Kind()
+
+ // laziness
+ if k >= reflect.Int && k <= reflect.Uint64 {
+ return md.unifyInt(data, rv)
+ }
+ switch k {
+ case reflect.Ptr:
+ elem := reflect.New(rv.Type().Elem())
+ err := md.unify(data, reflect.Indirect(elem))
+ if err != nil {
+ return err
+ }
+ rv.Set(elem)
+ return nil
+ case reflect.Struct:
+ return md.unifyStruct(data, rv)
+ case reflect.Map:
+ return md.unifyMap(data, rv)
+ case reflect.Array:
+ return md.unifyArray(data, rv)
+ case reflect.Slice:
+ return md.unifySlice(data, rv)
+ case reflect.String:
+ return md.unifyString(data, rv)
+ case reflect.Bool:
+ return md.unifyBool(data, rv)
+ case reflect.Interface:
+ // we only support empty interfaces.
+ if rv.NumMethod() > 0 {
+ return e("Unsupported type '%s'.", rv.Kind())
+ }
+ return md.unifyAnything(data, rv)
+ case reflect.Float32:
+ fallthrough
+ case reflect.Float64:
+ return md.unifyFloat64(data, rv)
+ }
+ return e("Unsupported type '%s'.", rv.Kind())
+}
+
+func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
+ tmap, ok := mapping.(map[string]interface{})
+ if !ok {
+ if mapping == nil {
+ return nil
+ }
+ return mismatch(rv, "map", mapping)
+ }
+
+ for key, datum := range tmap {
+ var f *field
+ fields := cachedTypeFields(rv.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if ff.name == key {
+ f = ff
+ break
+ }
+ if f == nil && strings.EqualFold(ff.name, key) {
+ f = ff
+ }
+ }
+ if f != nil {
+ subv := rv
+ for _, i := range f.index {
+ subv = indirect(subv.Field(i))
+ }
+ if isUnifiable(subv) {
+ md.decoded[md.context.add(key).String()] = true
+ md.context = append(md.context, key)
+ if err := md.unify(datum, subv); err != nil {
+ return e("Type mismatch for '%s.%s': %s",
+ rv.Type().String(), f.name, err)
+ }
+ md.context = md.context[0 : len(md.context)-1]
+ } else if f.name != "" {
+ // Bad user! No soup for you!
+ return e("Field '%s.%s' is unexported, and therefore cannot "+
+ "be loaded with reflection.", rv.Type().String(), f.name)
+ }
+ }
+ }
+ return nil
+}
+
+func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
+ tmap, ok := mapping.(map[string]interface{})
+ if !ok {
+ if tmap == nil {
+ return nil
+ }
+ return badtype("map", mapping)
+ }
+ if rv.IsNil() {
+ rv.Set(reflect.MakeMap(rv.Type()))
+ }
+ for k, v := range tmap {
+ md.decoded[md.context.add(k).String()] = true
+ md.context = append(md.context, k)
+
+ rvkey := indirect(reflect.New(rv.Type().Key()))
+ rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
+ if err := md.unify(v, rvval); err != nil {
+ return err
+ }
+ md.context = md.context[0 : len(md.context)-1]
+
+ rvkey.SetString(k)
+ rv.SetMapIndex(rvkey, rvval)
+ }
+ return nil
+}
+
+func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
+ datav := reflect.ValueOf(data)
+ if datav.Kind() != reflect.Slice {
+ if !datav.IsValid() {
+ return nil
+ }
+ return badtype("slice", data)
+ }
+ sliceLen := datav.Len()
+ if sliceLen != rv.Len() {
+ return e("expected array length %d; got TOML array of length %d",
+ rv.Len(), sliceLen)
+ }
+ return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
+ datav := reflect.ValueOf(data)
+ if datav.Kind() != reflect.Slice {
+ if !datav.IsValid() {
+ return nil
+ }
+ return badtype("slice", data)
+ }
+ n := datav.Len()
+ if rv.IsNil() || rv.Cap() < n {
+ rv.Set(reflect.MakeSlice(rv.Type(), n, n))
+ }
+ rv.SetLen(n)
+ return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
+ sliceLen := data.Len()
+ for i := 0; i < sliceLen; i++ {
+ v := data.Index(i).Interface()
+ sliceval := indirect(rv.Index(i))
+ if err := md.unify(v, sliceval); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
+ if _, ok := data.(time.Time); ok {
+ rv.Set(reflect.ValueOf(data))
+ return nil
+ }
+ return badtype("time.Time", data)
+}
+
+func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
+ if s, ok := data.(string); ok {
+ rv.SetString(s)
+ return nil
+ }
+ return badtype("string", data)
+}
+
+func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
+ if num, ok := data.(float64); ok {
+ switch rv.Kind() {
+ case reflect.Float32:
+ fallthrough
+ case reflect.Float64:
+ rv.SetFloat(num)
+ default:
+ panic("bug")
+ }
+ return nil
+ }
+ return badtype("float", data)
+}
+
+func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
+ if num, ok := data.(int64); ok {
+ if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
+ switch rv.Kind() {
+ case reflect.Int, reflect.Int64:
+ // No bounds checking necessary.
+ case reflect.Int8:
+ if num < math.MinInt8 || num > math.MaxInt8 {
+ return e("Value '%d' is out of range for int8.", num)
+ }
+ case reflect.Int16:
+ if num < math.MinInt16 || num > math.MaxInt16 {
+ return e("Value '%d' is out of range for int16.", num)
+ }
+ case reflect.Int32:
+ if num < math.MinInt32 || num > math.MaxInt32 {
+ return e("Value '%d' is out of range for int32.", num)
+ }
+ }
+ rv.SetInt(num)
+ } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
+ unum := uint64(num)
+ switch rv.Kind() {
+ case reflect.Uint, reflect.Uint64:
+ // No bounds checking necessary.
+ case reflect.Uint8:
+ if num < 0 || unum > math.MaxUint8 {
+ return e("Value '%d' is out of range for uint8.", num)
+ }
+ case reflect.Uint16:
+ if num < 0 || unum > math.MaxUint16 {
+ return e("Value '%d' is out of range for uint16.", num)
+ }
+ case reflect.Uint32:
+ if num < 0 || unum > math.MaxUint32 {
+ return e("Value '%d' is out of range for uint32.", num)
+ }
+ }
+ rv.SetUint(unum)
+ } else {
+ panic("unreachable")
+ }
+ return nil
+ }
+ return badtype("integer", data)
+}
+
+func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
+ if b, ok := data.(bool); ok {
+ rv.SetBool(b)
+ return nil
+ }
+ return badtype("boolean", data)
+}
+
+func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
+ rv.Set(reflect.ValueOf(data))
+ return nil
+}
+
+func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
+ var s string
+ switch sdata := data.(type) {
+ case TextMarshaler:
+ text, err := sdata.MarshalText()
+ if err != nil {
+ return err
+ }
+ s = string(text)
+ case fmt.Stringer:
+ s = sdata.String()
+ case string:
+ s = sdata
+ case bool:
+ s = fmt.Sprintf("%v", sdata)
+ case int64:
+ s = fmt.Sprintf("%d", sdata)
+ case float64:
+ s = fmt.Sprintf("%f", sdata)
+ default:
+ return badtype("primitive (string-like)", data)
+ }
+ if err := v.UnmarshalText([]byte(s)); err != nil {
+ return err
+ }
+ return nil
+}
+
+// rvalue returns a reflect.Value of `v`. All pointers are resolved.
+func rvalue(v interface{}) reflect.Value {
+ return indirect(reflect.ValueOf(v))
+}
+
+// indirect returns the value pointed to by a pointer.
+// Pointers are followed until the value is not a pointer.
+// New values are allocated for each nil pointer.
+//
+// An exception to this rule is if the value satisfies an interface of
+// interest to us (like encoding.TextUnmarshaler).
+func indirect(v reflect.Value) reflect.Value {
+ if v.Kind() != reflect.Ptr {
+ if v.CanAddr() {
+ pv := v.Addr()
+ if _, ok := pv.Interface().(TextUnmarshaler); ok {
+ return pv
+ }
+ }
+ return v
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ return indirect(reflect.Indirect(v))
+}
+
+func isUnifiable(rv reflect.Value) bool {
+ if rv.CanSet() {
+ return true
+ }
+ if _, ok := rv.Interface().(TextUnmarshaler); ok {
+ return true
+ }
+ return false
+}
+
+func badtype(expected string, data interface{}) error {
+ return e("Expected %s but found '%T'.", expected, data)
+}
+
+func mismatch(user reflect.Value, expected string, data interface{}) error {
+ return e("Type mismatch for %s. Expected %s but found '%T'.",
+ user.Type().String(), expected, data)
+}
diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go
new file mode 100644
index 000000000..ef6f545fa
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/decode_meta.go
@@ -0,0 +1,122 @@
+package toml
+
+import "strings"
+
+// MetaData allows access to meta information about TOML data that may not
+// be inferrable via reflection. In particular, whether a key has been defined
+// and the TOML type of a key.
+type MetaData struct {
+ mapping map[string]interface{}
+ types map[string]tomlType
+ keys []Key
+ decoded map[string]bool
+ context Key // Used only during decoding.
+}
+
+// IsDefined returns true if the key given exists in the TOML data. The key
+// should be specified hierarchially. e.g.,
+//
+// // access the TOML key 'a.b.c'
+// IsDefined("a", "b", "c")
+//
+// IsDefined will return false if an empty key given. Keys are case sensitive.
+func (md *MetaData) IsDefined(key ...string) bool {
+ if len(key) == 0 {
+ return false
+ }
+
+ var hash map[string]interface{}
+ var ok bool
+ var hashOrVal interface{} = md.mapping
+ for _, k := range key {
+ if hash, ok = hashOrVal.(map[string]interface{}); !ok {
+ return false
+ }
+ if hashOrVal, ok = hash[k]; !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// Type returns a string representation of the type of the key specified.
+//
+// Type will return the empty string if given an empty key or a key that
+// does not exist. Keys are case sensitive.
+func (md *MetaData) Type(key ...string) string {
+ fullkey := strings.Join(key, ".")
+ if typ, ok := md.types[fullkey]; ok {
+ return typ.typeString()
+ }
+ return ""
+}
+
+// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
+// to get values of this type.
+type Key []string
+
+func (k Key) String() string {
+ return strings.Join(k, ".")
+}
+
+func (k Key) maybeQuotedAll() string {
+ var ss []string
+ for i := range k {
+ ss = append(ss, k.maybeQuoted(i))
+ }
+ return strings.Join(ss, ".")
+}
+
+func (k Key) maybeQuoted(i int) string {
+ quote := false
+ for _, c := range k[i] {
+ if !isBareKeyChar(c) {
+ quote = true
+ break
+ }
+ }
+ if quote {
+ return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
+ } else {
+ return k[i]
+ }
+}
+
+func (k Key) add(piece string) Key {
+ newKey := make(Key, len(k)+1)
+ copy(newKey, k)
+ newKey[len(k)] = piece
+ return newKey
+}
+
+// Keys returns a slice of every key in the TOML data, including key groups.
+// Each key is itself a slice, where the first element is the top of the
+// hierarchy and the last is the most specific.
+//
+// The list will have the same order as the keys appeared in the TOML data.
+//
+// All keys returned are non-empty.
+func (md *MetaData) Keys() []Key {
+ return md.keys
+}
+
+// Undecoded returns all keys that have not been decoded in the order in which
+// they appear in the original TOML document.
+//
+// This includes keys that haven't been decoded because of a Primitive value.
+// Once the Primitive value is decoded, the keys will be considered decoded.
+//
+// Also note that decoding into an empty interface will result in no decoding,
+// and so no keys will be considered decoded.
+//
+// In this sense, the Undecoded keys correspond to keys in the TOML document
+// that do not have a concrete type in your representation.
+func (md *MetaData) Undecoded() []Key {
+ undecoded := make([]Key, 0, len(md.keys))
+ for _, key := range md.keys {
+ if !md.decoded[key.String()] {
+ undecoded = append(undecoded, key)
+ }
+ }
+ return undecoded
+}
diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go
new file mode 100644
index 000000000..fe2680004
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/doc.go
@@ -0,0 +1,27 @@
+/*
+Package toml provides facilities for decoding and encoding TOML configuration
+files via reflection. There is also support for delaying decoding with
+the Primitive type, and querying the set of keys in a TOML document with the
+MetaData type.
+
+The specification implemented: https://github.com/mojombo/toml
+
+The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
+whether a file is a valid TOML document. It can also be used to print the
+type of each key in a TOML document.
+
+Testing
+
+There are two important types of tests used for this package. The first is
+contained inside '*_test.go' files and uses the standard Go unit testing
+framework. These tests are primarily devoted to holistically testing the
+decoder and encoder.
+
+The second type of testing is used to verify the implementation's adherence
+to the TOML specification. These tests have been factored into their own
+project: https://github.com/BurntSushi/toml-test
+
+The reason the tests are in a separate project is so that they can be used by
+any implementation of TOML. Namely, it is language agnostic.
+*/
+package toml
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
new file mode 100644
index 000000000..4e4c97aed
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encode.go
@@ -0,0 +1,549 @@
+package toml
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type tomlEncodeError struct{ error }
+
+var (
+ errArrayMixedElementTypes = errors.New(
+ "can't encode array with mixed element types")
+ errArrayNilElement = errors.New(
+ "can't encode array with nil element")
+ errNonString = errors.New(
+ "can't encode a map with non-string key type")
+ errAnonNonStruct = errors.New(
+ "can't encode an anonymous field that is not a struct")
+ errArrayNoTable = errors.New(
+ "TOML array element can't contain a table")
+ errNoKey = errors.New(
+ "top-level values must be a Go map or struct")
+ errAnything = errors.New("") // used in testing
+)
+
+var quotedReplacer = strings.NewReplacer(
+ "\t", "\\t",
+ "\n", "\\n",
+ "\r", "\\r",
+ "\"", "\\\"",
+ "\\", "\\\\",
+)
+
+// Encoder controls the encoding of Go values to a TOML document to some
+// io.Writer.
+//
+// The indentation level can be controlled with the Indent field.
+type Encoder struct {
+ // A single indentation level. By default it is two spaces.
+ Indent string
+
+ // hasWritten is whether we have written any output to w yet.
+ hasWritten bool
+ w *bufio.Writer
+}
+
+// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
+// given. By default, a single indentation level is 2 spaces.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ w: bufio.NewWriter(w),
+ Indent: " ",
+ }
+}
+
+// Encode writes a TOML representation of the Go value to the underlying
+// io.Writer. If the value given cannot be encoded to a valid TOML document,
+// then an error is returned.
+//
+// The mapping between Go values and TOML values should be precisely the same
+// as for the Decode* functions. Similarly, the TextMarshaler interface is
+// supported by encoding the resulting bytes as strings. (If you want to write
+// arbitrary binary data then you will need to use something like base64 since
+// TOML does not have any binary types.)
+//
+// When encoding TOML hashes (i.e., Go maps or structs), keys without any
+// sub-hashes are encoded first.
+//
+// If a Go map is encoded, then its keys are sorted alphabetically for
+// deterministic output. More control over this behavior may be provided if
+// there is demand for it.
+//
+// Encoding Go values without a corresponding TOML representation---like map
+// types with non-string keys---will cause an error to be returned. Similarly
+// for mixed arrays/slices, arrays/slices with nil elements, embedded
+// non-struct types and nested slices containing maps or structs.
+// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
+// and so is []map[string][]string.)
+func (enc *Encoder) Encode(v interface{}) error {
+ rv := eindirect(reflect.ValueOf(v))
+ if err := enc.safeEncode(Key([]string{}), rv); err != nil {
+ return err
+ }
+ return enc.w.Flush()
+}
+
+func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if terr, ok := r.(tomlEncodeError); ok {
+ err = terr.error
+ return
+ }
+ panic(r)
+ }
+ }()
+ enc.encode(key, rv)
+ return nil
+}
+
+func (enc *Encoder) encode(key Key, rv reflect.Value) {
+ // Special case. Time needs to be in ISO8601 format.
+ // Special case. If we can marshal the type to text, then we used that.
+ // Basically, this prevents the encoder for handling these types as
+ // generic structs (or whatever the underlying type of a TextMarshaler is).
+ switch rv.Interface().(type) {
+ case time.Time, TextMarshaler:
+ enc.keyEqElement(key, rv)
+ return
+ }
+
+ k := rv.Kind()
+ switch k {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+ reflect.Uint64,
+ reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
+ enc.keyEqElement(key, rv)
+ case reflect.Array, reflect.Slice:
+ if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
+ enc.eArrayOfTables(key, rv)
+ } else {
+ enc.keyEqElement(key, rv)
+ }
+ case reflect.Interface:
+ if rv.IsNil() {
+ return
+ }
+ enc.encode(key, rv.Elem())
+ case reflect.Map:
+ if rv.IsNil() {
+ return
+ }
+ enc.eTable(key, rv)
+ case reflect.Ptr:
+ if rv.IsNil() {
+ return
+ }
+ enc.encode(key, rv.Elem())
+ case reflect.Struct:
+ enc.eTable(key, rv)
+ default:
+ panic(e("Unsupported type for key '%s': %s", key, k))
+ }
+}
+
+// eElement encodes any value that can be an array element (primitives and
+// arrays).
+func (enc *Encoder) eElement(rv reflect.Value) {
+ switch v := rv.Interface().(type) {
+ case time.Time:
+ // Special case time.Time as a primitive. Has to come before
+ // TextMarshaler below because time.Time implements
+ // encoding.TextMarshaler, but we need to always use UTC.
+ enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z"))
+ return
+ case TextMarshaler:
+ // Special case. Use text marshaler if it's available for this value.
+ if s, err := v.MarshalText(); err != nil {
+ encPanic(err)
+ } else {
+ enc.writeQuoted(string(s))
+ }
+ return
+ }
+ switch rv.Kind() {
+ case reflect.Bool:
+ enc.wf(strconv.FormatBool(rv.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64:
+ enc.wf(strconv.FormatInt(rv.Int(), 10))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16,
+ reflect.Uint32, reflect.Uint64:
+ enc.wf(strconv.FormatUint(rv.Uint(), 10))
+ case reflect.Float32:
+ enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
+ case reflect.Float64:
+ enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
+ case reflect.Array, reflect.Slice:
+ enc.eArrayOrSliceElement(rv)
+ case reflect.Interface:
+ enc.eElement(rv.Elem())
+ case reflect.String:
+ enc.writeQuoted(rv.String())
+ default:
+ panic(e("Unexpected primitive type: %s", rv.Kind()))
+ }
+}
+
+// By the TOML spec, all floats must have a decimal with at least one
+// number on either side.
+func floatAddDecimal(fstr string) string {
+ if !strings.Contains(fstr, ".") {
+ return fstr + ".0"
+ }
+ return fstr
+}
+
+func (enc *Encoder) writeQuoted(s string) {
+ enc.wf("\"%s\"", quotedReplacer.Replace(s))
+}
+
+func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
+ length := rv.Len()
+ enc.wf("[")
+ for i := 0; i < length; i++ {
+ elem := rv.Index(i)
+ enc.eElement(elem)
+ if i != length-1 {
+ enc.wf(", ")
+ }
+ }
+ enc.wf("]")
+}
+
+func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
+ if len(key) == 0 {
+ encPanic(errNoKey)
+ }
+ for i := 0; i < rv.Len(); i++ {
+ trv := rv.Index(i)
+ if isNil(trv) {
+ continue
+ }
+ panicIfInvalidKey(key)
+ enc.newline()
+ enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
+ enc.newline()
+ enc.eMapOrStruct(key, trv)
+ }
+}
+
+func (enc *Encoder) eTable(key Key, rv reflect.Value) {
+ panicIfInvalidKey(key)
+ if len(key) == 1 {
+ // Output an extra new line between top-level tables.
+ // (The newline isn't written if nothing else has been written though.)
+ enc.newline()
+ }
+ if len(key) > 0 {
+ enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
+ enc.newline()
+ }
+ enc.eMapOrStruct(key, rv)
+}
+
+func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
+ switch rv := eindirect(rv); rv.Kind() {
+ case reflect.Map:
+ enc.eMap(key, rv)
+ case reflect.Struct:
+ enc.eStruct(key, rv)
+ default:
+ panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
+ }
+}
+
+func (enc *Encoder) eMap(key Key, rv reflect.Value) {
+ rt := rv.Type()
+ if rt.Key().Kind() != reflect.String {
+ encPanic(errNonString)
+ }
+
+ // Sort keys so that we have deterministic output. And write keys directly
+ // underneath this key first, before writing sub-structs or sub-maps.
+ var mapKeysDirect, mapKeysSub []string
+ for _, mapKey := range rv.MapKeys() {
+ k := mapKey.String()
+ if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
+ mapKeysSub = append(mapKeysSub, k)
+ } else {
+ mapKeysDirect = append(mapKeysDirect, k)
+ }
+ }
+
+ var writeMapKeys = func(mapKeys []string) {
+ sort.Strings(mapKeys)
+ for _, mapKey := range mapKeys {
+ mrv := rv.MapIndex(reflect.ValueOf(mapKey))
+ if isNil(mrv) {
+ // Don't write anything for nil fields.
+ continue
+ }
+ enc.encode(key.add(mapKey), mrv)
+ }
+ }
+ writeMapKeys(mapKeysDirect)
+ writeMapKeys(mapKeysSub)
+}
+
+func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
+ // Write keys for fields directly under this key first, because if we write
+ // a field that creates a new table, then all keys under it will be in that
+ // table (not the one we're writing here).
+ rt := rv.Type()
+ var fieldsDirect, fieldsSub [][]int
+ var addFields func(rt reflect.Type, rv reflect.Value, start []int)
+ addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
+ for i := 0; i < rt.NumField(); i++ {
+ f := rt.Field(i)
+ // skip unexported fields
+ if f.PkgPath != "" && !f.Anonymous {
+ continue
+ }
+ frv := rv.Field(i)
+ if f.Anonymous {
+ t := f.Type
+ switch t.Kind() {
+ case reflect.Struct:
+ addFields(t, frv, f.Index)
+ continue
+ case reflect.Ptr:
+ if t.Elem().Kind() == reflect.Struct {
+ if !frv.IsNil() {
+ addFields(t.Elem(), frv.Elem(), f.Index)
+ }
+ continue
+ }
+ // Fall through to the normal field encoding logic below
+ // for non-struct anonymous fields.
+ }
+ }
+
+ if typeIsHash(tomlTypeOfGo(frv)) {
+ fieldsSub = append(fieldsSub, append(start, f.Index...))
+ } else {
+ fieldsDirect = append(fieldsDirect, append(start, f.Index...))
+ }
+ }
+ }
+ addFields(rt, rv, nil)
+
+ var writeFields = func(fields [][]int) {
+ for _, fieldIndex := range fields {
+ sft := rt.FieldByIndex(fieldIndex)
+ sf := rv.FieldByIndex(fieldIndex)
+ if isNil(sf) {
+ // Don't write anything for nil fields.
+ continue
+ }
+
+ tag := sft.Tag.Get("toml")
+ if tag == "-" {
+ continue
+ }
+ keyName, opts := getOptions(tag)
+ if keyName == "" {
+ keyName = sft.Name
+ }
+ if _, ok := opts["omitempty"]; ok && isEmpty(sf) {
+ continue
+ } else if _, ok := opts["omitzero"]; ok && isZero(sf) {
+ continue
+ }
+
+ enc.encode(key.add(keyName), sf)
+ }
+ }
+ writeFields(fieldsDirect)
+ writeFields(fieldsSub)
+}
+
+// tomlTypeName returns the TOML type name of the Go value's type. It is
+// used to determine whether the types of array elements are mixed (which is
+// forbidden). If the Go value is nil, then it is illegal for it to be an array
+// element, and valueIsNil is returned as true.
+
+// Returns the TOML type of a Go value. The type may be `nil`, which means
+// no concrete TOML type could be found.
+func tomlTypeOfGo(rv reflect.Value) tomlType {
+ if isNil(rv) || !rv.IsValid() {
+ return nil
+ }
+ switch rv.Kind() {
+ case reflect.Bool:
+ return tomlBool
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+ reflect.Uint64:
+ return tomlInteger
+ case reflect.Float32, reflect.Float64:
+ return tomlFloat
+ case reflect.Array, reflect.Slice:
+ if typeEqual(tomlHash, tomlArrayType(rv)) {
+ return tomlArrayHash
+ } else {
+ return tomlArray
+ }
+ case reflect.Ptr, reflect.Interface:
+ return tomlTypeOfGo(rv.Elem())
+ case reflect.String:
+ return tomlString
+ case reflect.Map:
+ return tomlHash
+ case reflect.Struct:
+ switch rv.Interface().(type) {
+ case time.Time:
+ return tomlDatetime
+ case TextMarshaler:
+ return tomlString
+ default:
+ return tomlHash
+ }
+ default:
+ panic("unexpected reflect.Kind: " + rv.Kind().String())
+ }
+}
+
+// tomlArrayType returns the element type of a TOML array. The type returned
+// may be nil if it cannot be determined (e.g., a nil slice or a zero length
+// slize). This function may also panic if it finds a type that cannot be
+// expressed in TOML (such as nil elements, heterogeneous arrays or directly
+// nested arrays of tables).
+func tomlArrayType(rv reflect.Value) tomlType {
+ if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
+ return nil
+ }
+ firstType := tomlTypeOfGo(rv.Index(0))
+ if firstType == nil {
+ encPanic(errArrayNilElement)
+ }
+
+ rvlen := rv.Len()
+ for i := 1; i < rvlen; i++ {
+ elem := rv.Index(i)
+ switch elemType := tomlTypeOfGo(elem); {
+ case elemType == nil:
+ encPanic(errArrayNilElement)
+ case !typeEqual(firstType, elemType):
+ encPanic(errArrayMixedElementTypes)
+ }
+ }
+ // If we have a nested array, then we must make sure that the nested
+ // array contains ONLY primitives.
+ // This checks arbitrarily nested arrays.
+ if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
+ nest := tomlArrayType(eindirect(rv.Index(0)))
+ if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
+ encPanic(errArrayNoTable)
+ }
+ }
+ return firstType
+}
+
+func getOptions(keyName string) (string, map[string]struct{}) {
+ opts := make(map[string]struct{})
+ ss := strings.Split(keyName, ",")
+ name := ss[0]
+ if len(ss) > 1 {
+ for _, opt := range ss {
+ opts[opt] = struct{}{}
+ }
+ }
+
+ return name, opts
+}
+
+func isZero(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return rv.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return rv.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return rv.Float() == 0.0
+ }
+ return false
+}
+
+func isEmpty(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
+ return rv.Len() == 0
+ case reflect.Bool:
+ return !rv.Bool()
+ }
+ return false
+}
+
+func (enc *Encoder) newline() {
+ if enc.hasWritten {
+ enc.wf("\n")
+ }
+}
+
+func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
+ if len(key) == 0 {
+ encPanic(errNoKey)
+ }
+ panicIfInvalidKey(key)
+ enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
+ enc.eElement(val)
+ enc.newline()
+}
+
+func (enc *Encoder) wf(format string, v ...interface{}) {
+ if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
+ encPanic(err)
+ }
+ enc.hasWritten = true
+}
+
+func (enc *Encoder) indentStr(key Key) string {
+ return strings.Repeat(enc.Indent, len(key)-1)
+}
+
+func encPanic(err error) {
+ panic(tomlEncodeError{err})
+}
+
+func eindirect(v reflect.Value) reflect.Value {
+ switch v.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ return eindirect(v.Elem())
+ default:
+ return v
+ }
+}
+
+func isNil(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return rv.IsNil()
+ default:
+ return false
+ }
+}
+
+func panicIfInvalidKey(key Key) {
+ for _, k := range key {
+ if len(k) == 0 {
+ encPanic(e("Key '%s' is not a valid table name. Key names "+
+ "cannot be empty.", key.maybeQuotedAll()))
+ }
+ }
+}
+
+func isValidKeyName(s string) bool {
+ return len(s) != 0
+}
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go
new file mode 100644
index 000000000..d36e1dd60
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encoding_types.go
@@ -0,0 +1,19 @@
+// +build go1.2
+
+package toml
+
+// In order to support Go 1.1, we define our own TextMarshaler and
+// TextUnmarshaler types. For Go 1.2+, we just alias them with the
+// standard library interfaces.
+
+import (
+ "encoding"
+)
+
+// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
+// so that Go 1.1 can be supported.
+type TextMarshaler encoding.TextMarshaler
+
+// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
+// here so that Go 1.1 can be supported.
+type TextUnmarshaler encoding.TextUnmarshaler
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
new file mode 100644
index 000000000..e8d503d04
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
@@ -0,0 +1,18 @@
+// +build !go1.2
+
+package toml
+
+// These interfaces were introduced in Go 1.2, so we add them manually when
+// compiling for Go 1.1.
+
+// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
+// so that Go 1.1 can be supported.
+type TextMarshaler interface {
+ MarshalText() (text []byte, err error)
+}
+
+// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
+// here so that Go 1.1 can be supported.
+type TextUnmarshaler interface {
+ UnmarshalText(text []byte) error
+}
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
new file mode 100644
index 000000000..9b20b3a81
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/lex.go
@@ -0,0 +1,871 @@
+package toml
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+type itemType int
+
+const (
+ itemError itemType = iota
+ itemNIL // used in the parser to indicate no type
+ itemEOF
+ itemText
+ itemString
+ itemRawString
+ itemMultilineString
+ itemRawMultilineString
+ itemBool
+ itemInteger
+ itemFloat
+ itemDatetime
+ itemArray // the start of an array
+ itemArrayEnd
+ itemTableStart
+ itemTableEnd
+ itemArrayTableStart
+ itemArrayTableEnd
+ itemKeyStart
+ itemCommentStart
+)
+
+const (
+ eof = 0
+ tableStart = '['
+ tableEnd = ']'
+ arrayTableStart = '['
+ arrayTableEnd = ']'
+ tableSep = '.'
+ keySep = '='
+ arrayStart = '['
+ arrayEnd = ']'
+ arrayValTerm = ','
+ commentStart = '#'
+ stringStart = '"'
+ stringEnd = '"'
+ rawStringStart = '\''
+ rawStringEnd = '\''
+)
+
+type stateFn func(lx *lexer) stateFn
+
+type lexer struct {
+ input string
+ start int
+ pos int
+ width int
+ line int
+ state stateFn
+ items chan item
+
+ // A stack of state functions used to maintain context.
+ // The idea is to reuse parts of the state machine in various places.
+ // For example, values can appear at the top level or within arbitrarily
+ // nested arrays. The last state on the stack is used after a value has
+ // been lexed. Similarly for comments.
+ stack []stateFn
+}
+
+type item struct {
+ typ itemType
+ val string
+ line int
+}
+
+func (lx *lexer) nextItem() item {
+ for {
+ select {
+ case item := <-lx.items:
+ return item
+ default:
+ lx.state = lx.state(lx)
+ }
+ }
+}
+
+func lex(input string) *lexer {
+ lx := &lexer{
+ input: input + "\n",
+ state: lexTop,
+ line: 1,
+ items: make(chan item, 10),
+ stack: make([]stateFn, 0, 10),
+ }
+ return lx
+}
+
+func (lx *lexer) push(state stateFn) {
+ lx.stack = append(lx.stack, state)
+}
+
+func (lx *lexer) pop() stateFn {
+ if len(lx.stack) == 0 {
+ return lx.errorf("BUG in lexer: no states to pop.")
+ }
+ last := lx.stack[len(lx.stack)-1]
+ lx.stack = lx.stack[0 : len(lx.stack)-1]
+ return last
+}
+
+func (lx *lexer) current() string {
+ return lx.input[lx.start:lx.pos]
+}
+
+func (lx *lexer) emit(typ itemType) {
+ lx.items <- item{typ, lx.current(), lx.line}
+ lx.start = lx.pos
+}
+
+func (lx *lexer) emitTrim(typ itemType) {
+ lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
+ lx.start = lx.pos
+}
+
+func (lx *lexer) next() (r rune) {
+ if lx.pos >= len(lx.input) {
+ lx.width = 0
+ return eof
+ }
+
+ if lx.input[lx.pos] == '\n' {
+ lx.line++
+ }
+ r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:])
+ lx.pos += lx.width
+ return r
+}
+
+// ignore skips over the pending input before this point.
+func (lx *lexer) ignore() {
+ lx.start = lx.pos
+}
+
+// backup steps back one rune. Can be called only once per call of next.
+func (lx *lexer) backup() {
+ lx.pos -= lx.width
+ if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
+ lx.line--
+ }
+}
+
+// accept consumes the next rune if it's equal to `valid`.
+func (lx *lexer) accept(valid rune) bool {
+ if lx.next() == valid {
+ return true
+ }
+ lx.backup()
+ return false
+}
+
+// peek returns but does not consume the next rune in the input.
+func (lx *lexer) peek() rune {
+ r := lx.next()
+ lx.backup()
+ return r
+}
+
+// errorf stops all lexing by emitting an error and returning `nil`.
+// Note that any value that is a character is escaped if it's a special
+// character (new lines, tabs, etc.).
+func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
+ lx.items <- item{
+ itemError,
+ fmt.Sprintf(format, values...),
+ lx.line,
+ }
+ return nil
+}
+
+// lexTop consumes elements at the top level of TOML data.
+func lexTop(lx *lexer) stateFn {
+ r := lx.next()
+ if isWhitespace(r) || isNL(r) {
+ return lexSkip(lx, lexTop)
+ }
+
+ switch r {
+ case commentStart:
+ lx.push(lexTop)
+ return lexCommentStart
+ case tableStart:
+ return lexTableStart
+ case eof:
+ if lx.pos > lx.start {
+ return lx.errorf("Unexpected EOF.")
+ }
+ lx.emit(itemEOF)
+ return nil
+ }
+
+ // At this point, the only valid item can be a key, so we back up
+ // and let the key lexer do the rest.
+ lx.backup()
+ lx.push(lexTopEnd)
+ return lexKeyStart
+}
+
+// lexTopEnd is entered whenever a top-level item has been consumed. (A value
+// or a table.) It must see only whitespace, and will turn back to lexTop
+// upon a new line. If it sees EOF, it will quit the lexer successfully.
+func lexTopEnd(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == commentStart:
+ // a comment will read to a new line for us.
+ lx.push(lexTop)
+ return lexCommentStart
+ case isWhitespace(r):
+ return lexTopEnd
+ case isNL(r):
+ lx.ignore()
+ return lexTop
+ case r == eof:
+ lx.ignore()
+ return lexTop
+ }
+ return lx.errorf("Expected a top-level item to end with a new line, "+
+ "comment or EOF, but got %q instead.", r)
+}
+
+// lexTable lexes the beginning of a table. Namely, it makes sure that
+// it starts with a character other than '.' and ']'.
+// It assumes that '[' has already been consumed.
+// It also handles the case that this is an item in an array of tables.
+// e.g., '[[name]]'.
+func lexTableStart(lx *lexer) stateFn {
+ if lx.peek() == arrayTableStart {
+ lx.next()
+ lx.emit(itemArrayTableStart)
+ lx.push(lexArrayTableEnd)
+ } else {
+ lx.emit(itemTableStart)
+ lx.push(lexTableEnd)
+ }
+ return lexTableNameStart
+}
+
+func lexTableEnd(lx *lexer) stateFn {
+ lx.emit(itemTableEnd)
+ return lexTopEnd
+}
+
+func lexArrayTableEnd(lx *lexer) stateFn {
+ if r := lx.next(); r != arrayTableEnd {
+ return lx.errorf("Expected end of table array name delimiter %q, "+
+ "but got %q instead.", arrayTableEnd, r)
+ }
+ lx.emit(itemArrayTableEnd)
+ return lexTopEnd
+}
+
+func lexTableNameStart(lx *lexer) stateFn {
+ switch r := lx.peek(); {
+ case r == tableEnd || r == eof:
+ return lx.errorf("Unexpected end of table name. (Table names cannot " +
+ "be empty.)")
+ case r == tableSep:
+ return lx.errorf("Unexpected table separator. (Table names cannot " +
+ "be empty.)")
+ case r == stringStart || r == rawStringStart:
+ lx.ignore()
+ lx.push(lexTableNameEnd)
+ return lexValue // reuse string lexing
+ default:
+ return lexBareTableName
+ }
+}
+
+// lexTableName lexes the name of a table. It assumes that at least one
+// valid character for the table has already been read.
+func lexBareTableName(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case isBareKeyChar(r):
+ return lexBareTableName
+ case r == tableSep || r == tableEnd:
+ lx.backup()
+ lx.emitTrim(itemText)
+ return lexTableNameEnd
+ default:
+ return lx.errorf("Bare keys cannot contain %q.", r)
+ }
+}
+
+// lexTableNameEnd reads the end of a piece of a table name, optionally
+// consuming whitespace.
+func lexTableNameEnd(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case isWhitespace(r):
+ return lexTableNameEnd
+ case r == tableSep:
+ lx.ignore()
+ return lexTableNameStart
+ case r == tableEnd:
+ return lx.pop()
+ default:
+ return lx.errorf("Expected '.' or ']' to end table name, but got %q "+
+ "instead.", r)
+ }
+}
+
+// lexKeyStart consumes a key name up until the first non-whitespace character.
+// lexKeyStart will ignore whitespace.
+func lexKeyStart(lx *lexer) stateFn {
+ r := lx.peek()
+ switch {
+ case r == keySep:
+ return lx.errorf("Unexpected key separator %q.", keySep)
+ case isWhitespace(r) || isNL(r):
+ lx.next()
+ return lexSkip(lx, lexKeyStart)
+ case r == stringStart || r == rawStringStart:
+ lx.ignore()
+ lx.emit(itemKeyStart)
+ lx.push(lexKeyEnd)
+ return lexValue // reuse string lexing
+ default:
+ lx.ignore()
+ lx.emit(itemKeyStart)
+ return lexBareKey
+ }
+}
+
+// lexBareKey consumes the text of a bare key. Assumes that the first character
+// (which is not whitespace) has not yet been consumed.
+func lexBareKey(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case isBareKeyChar(r):
+ return lexBareKey
+ case isWhitespace(r):
+ lx.emitTrim(itemText)
+ return lexKeyEnd
+ case r == keySep:
+ lx.backup()
+ lx.emitTrim(itemText)
+ return lexKeyEnd
+ default:
+ return lx.errorf("Bare keys cannot contain %q.", r)
+ }
+}
+
+// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
+// separator).
+func lexKeyEnd(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case r == keySep:
+ return lexSkip(lx, lexValue)
+ case isWhitespace(r):
+ return lexSkip(lx, lexKeyEnd)
+ default:
+ return lx.errorf("Expected key separator %q, but got %q instead.",
+ keySep, r)
+ }
+}
+
+// lexValue starts the consumption of a value anywhere a value is expected.
+// lexValue will ignore whitespace.
+// After a value is lexed, the last state on the next is popped and returned.
+func lexValue(lx *lexer) stateFn {
+ // We allow whitespace to precede a value, but NOT new lines.
+ // In array syntax, the array states are responsible for ignoring new
+ // lines.
+ r := lx.next()
+ if isWhitespace(r) {
+ return lexSkip(lx, lexValue)
+ }
+
+ switch {
+ case r == arrayStart:
+ lx.ignore()
+ lx.emit(itemArray)
+ return lexArrayValue
+ case r == stringStart:
+ if lx.accept(stringStart) {
+ if lx.accept(stringStart) {
+ lx.ignore() // Ignore """
+ return lexMultilineString
+ }
+ lx.backup()
+ }
+ lx.ignore() // ignore the '"'
+ return lexString
+ case r == rawStringStart:
+ if lx.accept(rawStringStart) {
+ if lx.accept(rawStringStart) {
+ lx.ignore() // Ignore """
+ return lexMultilineRawString
+ }
+ lx.backup()
+ }
+ lx.ignore() // ignore the "'"
+ return lexRawString
+ case r == 't':
+ return lexTrue
+ case r == 'f':
+ return lexFalse
+ case r == '-':
+ return lexNumberStart
+ case isDigit(r):
+ lx.backup() // avoid an extra state and use the same as above
+ return lexNumberOrDateStart
+ case r == '.': // special error case, be kind to users
+ return lx.errorf("Floats must start with a digit, not '.'.")
+ }
+ return lx.errorf("Expected value but found %q instead.", r)
+}
+
+// lexArrayValue consumes one value in an array. It assumes that '[' or ','
+// have already been consumed. All whitespace and new lines are ignored.
+func lexArrayValue(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r) || isNL(r):
+ return lexSkip(lx, lexArrayValue)
+ case r == commentStart:
+ lx.push(lexArrayValue)
+ return lexCommentStart
+ case r == arrayValTerm:
+ return lx.errorf("Unexpected array value terminator %q.",
+ arrayValTerm)
+ case r == arrayEnd:
+ return lexArrayEnd
+ }
+
+ lx.backup()
+ lx.push(lexArrayValueEnd)
+ return lexValue
+}
+
+// lexArrayValueEnd consumes the cruft between values of an array. Namely,
+// it ignores whitespace and expects either a ',' or a ']'.
+func lexArrayValueEnd(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r) || isNL(r):
+ return lexSkip(lx, lexArrayValueEnd)
+ case r == commentStart:
+ lx.push(lexArrayValueEnd)
+ return lexCommentStart
+ case r == arrayValTerm:
+ lx.ignore()
+ return lexArrayValue // move on to the next value
+ case r == arrayEnd:
+ return lexArrayEnd
+ }
+ return lx.errorf("Expected an array value terminator %q or an array "+
+ "terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r)
+}
+
+// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has
+// just been consumed.
+func lexArrayEnd(lx *lexer) stateFn {
+ lx.ignore()
+ lx.emit(itemArrayEnd)
+ return lx.pop()
+}
+
+// lexString consumes the inner contents of a string. It assumes that the
+// beginning '"' has already been consumed and ignored.
+func lexString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isNL(r):
+ return lx.errorf("Strings cannot contain new lines.")
+ case r == '\\':
+ lx.push(lexString)
+ return lexStringEscape
+ case r == stringEnd:
+ lx.backup()
+ lx.emit(itemString)
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ return lexString
+}
+
+// lexMultilineString consumes the inner contents of a string. It assumes that
+// the beginning '"""' has already been consumed and ignored.
+func lexMultilineString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == '\\':
+ return lexMultilineStringEscape
+ case r == stringEnd:
+ if lx.accept(stringEnd) {
+ if lx.accept(stringEnd) {
+ lx.backup()
+ lx.backup()
+ lx.backup()
+ lx.emit(itemMultilineString)
+ lx.next()
+ lx.next()
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ lx.backup()
+ }
+ }
+ return lexMultilineString
+}
+
+// lexRawString consumes a raw string. Nothing can be escaped in such a string.
+// It assumes that the beginning "'" has already been consumed and ignored.
+func lexRawString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isNL(r):
+ return lx.errorf("Strings cannot contain new lines.")
+ case r == rawStringEnd:
+ lx.backup()
+ lx.emit(itemRawString)
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ return lexRawString
+}
+
+// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
+// a string. It assumes that the beginning "'" has already been consumed and
+// ignored.
+func lexMultilineRawString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == rawStringEnd:
+ if lx.accept(rawStringEnd) {
+ if lx.accept(rawStringEnd) {
+ lx.backup()
+ lx.backup()
+ lx.backup()
+ lx.emit(itemRawMultilineString)
+ lx.next()
+ lx.next()
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ lx.backup()
+ }
+ }
+ return lexMultilineRawString
+}
+
+// lexMultilineStringEscape consumes an escaped character. It assumes that the
+// preceding '\\' has already been consumed.
+func lexMultilineStringEscape(lx *lexer) stateFn {
+ // Handle the special case first:
+ if isNL(lx.next()) {
+ return lexMultilineString
+ } else {
+ lx.backup()
+ lx.push(lexMultilineString)
+ return lexStringEscape(lx)
+ }
+}
+
+func lexStringEscape(lx *lexer) stateFn {
+ r := lx.next()
+ switch r {
+ case 'b':
+ fallthrough
+ case 't':
+ fallthrough
+ case 'n':
+ fallthrough
+ case 'f':
+ fallthrough
+ case 'r':
+ fallthrough
+ case '"':
+ fallthrough
+ case '\\':
+ return lx.pop()
+ case 'u':
+ return lexShortUnicodeEscape
+ case 'U':
+ return lexLongUnicodeEscape
+ }
+ return lx.errorf("Invalid escape character %q. Only the following "+
+ "escape characters are allowed: "+
+ "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+
+ "\\uXXXX and \\UXXXXXXXX.", r)
+}
+
+func lexShortUnicodeEscape(lx *lexer) stateFn {
+ var r rune
+ for i := 0; i < 4; i++ {
+ r = lx.next()
+ if !isHexadecimal(r) {
+ return lx.errorf("Expected four hexadecimal digits after '\\u', "+
+ "but got '%s' instead.", lx.current())
+ }
+ }
+ return lx.pop()
+}
+
+func lexLongUnicodeEscape(lx *lexer) stateFn {
+ var r rune
+ for i := 0; i < 8; i++ {
+ r = lx.next()
+ if !isHexadecimal(r) {
+ return lx.errorf("Expected eight hexadecimal digits after '\\U', "+
+ "but got '%s' instead.", lx.current())
+ }
+ }
+ return lx.pop()
+}
+
+// lexNumberOrDateStart consumes either a (positive) integer, float or
+// datetime. It assumes that NO negative sign has been consumed.
+func lexNumberOrDateStart(lx *lexer) stateFn {
+ r := lx.next()
+ if !isDigit(r) {
+ if r == '.' {
+ return lx.errorf("Floats must start with a digit, not '.'.")
+ } else {
+ return lx.errorf("Expected a digit but got %q.", r)
+ }
+ }
+ return lexNumberOrDate
+}
+
+// lexNumberOrDate consumes either a (positive) integer, float or datetime.
+func lexNumberOrDate(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == '-':
+ if lx.pos-lx.start != 5 {
+ return lx.errorf("All ISO8601 dates must be in full Zulu form.")
+ }
+ return lexDateAfterYear
+ case isDigit(r):
+ return lexNumberOrDate
+ case r == '.':
+ return lexFloatStart
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format.
+// It assumes that "YYYY-" has already been consumed.
+func lexDateAfterYear(lx *lexer) stateFn {
+ formats := []rune{
+ // digits are '0'.
+ // everything else is direct equality.
+ '0', '0', '-', '0', '0',
+ 'T',
+ '0', '0', ':', '0', '0', ':', '0', '0',
+ 'Z',
+ }
+ for _, f := range formats {
+ r := lx.next()
+ if f == '0' {
+ if !isDigit(r) {
+ return lx.errorf("Expected digit in ISO8601 datetime, "+
+ "but found %q instead.", r)
+ }
+ } else if f != r {
+ return lx.errorf("Expected %q in ISO8601 datetime, "+
+ "but found %q instead.", f, r)
+ }
+ }
+ lx.emit(itemDatetime)
+ return lx.pop()
+}
+
+// lexNumberStart consumes either an integer or a float. It assumes that
+// a negative sign has already been read, but that *no* digits have been
+// consumed. lexNumberStart will move to the appropriate integer or float
+// states.
+func lexNumberStart(lx *lexer) stateFn {
+ // we MUST see a digit. Even floats have to start with a digit.
+ r := lx.next()
+ if !isDigit(r) {
+ if r == '.' {
+ return lx.errorf("Floats must start with a digit, not '.'.")
+ } else {
+ return lx.errorf("Expected a digit but got %q.", r)
+ }
+ }
+ return lexNumber
+}
+
+// lexNumber consumes an integer or a float after seeing the first digit.
+func lexNumber(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isDigit(r):
+ return lexNumber
+ case r == '.':
+ return lexFloatStart
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexFloatStart starts the consumption of digits of a float after a '.'.
+// Namely, at least one digit is required.
+func lexFloatStart(lx *lexer) stateFn {
+ r := lx.next()
+ if !isDigit(r) {
+ return lx.errorf("Floats must have a digit after the '.', but got "+
+ "%q instead.", r)
+ }
+ return lexFloat
+}
+
+// lexFloat consumes the digits of a float after a '.'.
+// Assumes that one digit has been consumed after a '.' already.
+func lexFloat(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexFloat
+ }
+
+ lx.backup()
+ lx.emit(itemFloat)
+ return lx.pop()
+}
+
+// lexConst consumes the s[1:] in s. It assumes that s[0] has already been
+// consumed.
+func lexConst(lx *lexer, s string) stateFn {
+ for i := range s[1:] {
+ if r := lx.next(); r != rune(s[i+1]) {
+ return lx.errorf("Expected %q, but found %q instead.", s[:i+1],
+ s[:i]+string(r))
+ }
+ }
+ return nil
+}
+
+// lexTrue consumes the "rue" in "true". It assumes that 't' has already
+// been consumed.
+func lexTrue(lx *lexer) stateFn {
+ if fn := lexConst(lx, "true"); fn != nil {
+ return fn
+ }
+ lx.emit(itemBool)
+ return lx.pop()
+}
+
+// lexFalse consumes the "alse" in "false". It assumes that 'f' has already
+// been consumed.
+func lexFalse(lx *lexer) stateFn {
+ if fn := lexConst(lx, "false"); fn != nil {
+ return fn
+ }
+ lx.emit(itemBool)
+ return lx.pop()
+}
+
+// lexCommentStart begins the lexing of a comment. It will emit
+// itemCommentStart and consume no characters, passing control to lexComment.
+func lexCommentStart(lx *lexer) stateFn {
+ lx.ignore()
+ lx.emit(itemCommentStart)
+ return lexComment
+}
+
+// lexComment lexes an entire comment. It assumes that '#' has been consumed.
+// It will consume *up to* the first new line character, and pass control
+// back to the last state on the stack.
+func lexComment(lx *lexer) stateFn {
+ r := lx.peek()
+ if isNL(r) || r == eof {
+ lx.emit(itemText)
+ return lx.pop()
+ }
+ lx.next()
+ return lexComment
+}
+
+// lexSkip ignores all slurped input and moves on to the next state.
+func lexSkip(lx *lexer, nextState stateFn) stateFn {
+ return func(lx *lexer) stateFn {
+ lx.ignore()
+ return nextState
+ }
+}
+
+// isWhitespace returns true if `r` is a whitespace character according
+// to the spec.
+func isWhitespace(r rune) bool {
+ return r == '\t' || r == ' '
+}
+
+func isNL(r rune) bool {
+ return r == '\n' || r == '\r'
+}
+
+func isDigit(r rune) bool {
+ return r >= '0' && r <= '9'
+}
+
+func isHexadecimal(r rune) bool {
+ return (r >= '0' && r <= '9') ||
+ (r >= 'a' && r <= 'f') ||
+ (r >= 'A' && r <= 'F')
+}
+
+func isBareKeyChar(r rune) bool {
+ return (r >= 'A' && r <= 'Z') ||
+ (r >= 'a' && r <= 'z') ||
+ (r >= '0' && r <= '9') ||
+ r == '_' ||
+ r == '-'
+}
+
+func (itype itemType) String() string {
+ switch itype {
+ case itemError:
+ return "Error"
+ case itemNIL:
+ return "NIL"
+ case itemEOF:
+ return "EOF"
+ case itemText:
+ return "Text"
+ case itemString:
+ return "String"
+ case itemRawString:
+ return "String"
+ case itemMultilineString:
+ return "String"
+ case itemRawMultilineString:
+ return "String"
+ case itemBool:
+ return "Bool"
+ case itemInteger:
+ return "Integer"
+ case itemFloat:
+ return "Float"
+ case itemDatetime:
+ return "DateTime"
+ case itemTableStart:
+ return "TableStart"
+ case itemTableEnd:
+ return "TableEnd"
+ case itemKeyStart:
+ return "KeyStart"
+ case itemArray:
+ return "Array"
+ case itemArrayEnd:
+ return "ArrayEnd"
+ case itemCommentStart:
+ return "CommentStart"
+ }
+ panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
+}
+
+func (item item) String() string {
+ return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
+}
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
new file mode 100644
index 000000000..6a82e84f6
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/parse.go
@@ -0,0 +1,493 @@
+package toml
+
+import (
+ "fmt"
+ "log"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+ "unicode/utf8"
+)
+
+type parser struct {
+ mapping map[string]interface{}
+ types map[string]tomlType
+ lx *lexer
+
+ // A list of keys in the order that they appear in the TOML data.
+ ordered []Key
+
+ // the full key for the current hash in scope
+ context Key
+
+ // the base key name for everything except hashes
+ currentKey string
+
+ // rough approximation of line number
+ approxLine int
+
+ // A map of 'key.group.names' to whether they were created implicitly.
+ implicits map[string]bool
+}
+
+type parseError string
+
+func (pe parseError) Error() string {
+ return string(pe)
+}
+
+func parse(data string) (p *parser, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ var ok bool
+ if err, ok = r.(parseError); ok {
+ return
+ }
+ panic(r)
+ }
+ }()
+
+ p = &parser{
+ mapping: make(map[string]interface{}),
+ types: make(map[string]tomlType),
+ lx: lex(data),
+ ordered: make([]Key, 0),
+ implicits: make(map[string]bool),
+ }
+ for {
+ item := p.next()
+ if item.typ == itemEOF {
+ break
+ }
+ p.topLevel(item)
+ }
+
+ return p, nil
+}
+
+func (p *parser) panicf(format string, v ...interface{}) {
+ msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
+ p.approxLine, p.current(), fmt.Sprintf(format, v...))
+ panic(parseError(msg))
+}
+
+func (p *parser) next() item {
+ it := p.lx.nextItem()
+ if it.typ == itemError {
+ p.panicf("%s", it.val)
+ }
+ return it
+}
+
+func (p *parser) bug(format string, v ...interface{}) {
+ log.Panicf("BUG: %s\n\n", fmt.Sprintf(format, v...))
+}
+
+func (p *parser) expect(typ itemType) item {
+ it := p.next()
+ p.assertEqual(typ, it.typ)
+ return it
+}
+
+func (p *parser) assertEqual(expected, got itemType) {
+ if expected != got {
+ p.bug("Expected '%s' but got '%s'.", expected, got)
+ }
+}
+
+func (p *parser) topLevel(item item) {
+ switch item.typ {
+ case itemCommentStart:
+ p.approxLine = item.line
+ p.expect(itemText)
+ case itemTableStart:
+ kg := p.next()
+ p.approxLine = kg.line
+
+ var key Key
+ for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
+ key = append(key, p.keyString(kg))
+ }
+ p.assertEqual(itemTableEnd, kg.typ)
+
+ p.establishContext(key, false)
+ p.setType("", tomlHash)
+ p.ordered = append(p.ordered, key)
+ case itemArrayTableStart:
+ kg := p.next()
+ p.approxLine = kg.line
+
+ var key Key
+ for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
+ key = append(key, p.keyString(kg))
+ }
+ p.assertEqual(itemArrayTableEnd, kg.typ)
+
+ p.establishContext(key, true)
+ p.setType("", tomlArrayHash)
+ p.ordered = append(p.ordered, key)
+ case itemKeyStart:
+ kname := p.next()
+ p.approxLine = kname.line
+ p.currentKey = p.keyString(kname)
+
+ val, typ := p.value(p.next())
+ p.setValue(p.currentKey, val)
+ p.setType(p.currentKey, typ)
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
+ p.currentKey = ""
+ default:
+ p.bug("Unexpected type at top level: %s", item.typ)
+ }
+}
+
+// Gets a string for a key (or part of a key in a table name).
+func (p *parser) keyString(it item) string {
+ switch it.typ {
+ case itemText:
+ return it.val
+ case itemString, itemMultilineString,
+ itemRawString, itemRawMultilineString:
+ s, _ := p.value(it)
+ return s.(string)
+ default:
+ p.bug("Unexpected key type: %s", it.typ)
+ panic("unreachable")
+ }
+}
+
+// value translates an expected value from the lexer into a Go value wrapped
+// as an empty interface.
+func (p *parser) value(it item) (interface{}, tomlType) {
+ switch it.typ {
+ case itemString:
+ return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
+ case itemMultilineString:
+ trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
+ return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
+ case itemRawString:
+ return it.val, p.typeOfPrimitive(it)
+ case itemRawMultilineString:
+ return stripFirstNewline(it.val), p.typeOfPrimitive(it)
+ case itemBool:
+ switch it.val {
+ case "true":
+ return true, p.typeOfPrimitive(it)
+ case "false":
+ return false, p.typeOfPrimitive(it)
+ }
+ p.bug("Expected boolean value, but got '%s'.", it.val)
+ case itemInteger:
+ num, err := strconv.ParseInt(it.val, 10, 64)
+ if err != nil {
+ // See comment below for floats describing why we make a
+ // distinction between a bug and a user error.
+ if e, ok := err.(*strconv.NumError); ok &&
+ e.Err == strconv.ErrRange {
+
+ p.panicf("Integer '%s' is out of the range of 64-bit "+
+ "signed integers.", it.val)
+ } else {
+ p.bug("Expected integer value, but got '%s'.", it.val)
+ }
+ }
+ return num, p.typeOfPrimitive(it)
+ case itemFloat:
+ num, err := strconv.ParseFloat(it.val, 64)
+ if err != nil {
+ // Distinguish float values. Normally, it'd be a bug if the lexer
+ // provides an invalid float, but it's possible that the float is
+ // out of range of valid values (which the lexer cannot determine).
+ // So mark the former as a bug but the latter as a legitimate user
+ // error.
+ //
+ // This is also true for integers.
+ if e, ok := err.(*strconv.NumError); ok &&
+ e.Err == strconv.ErrRange {
+
+ p.panicf("Float '%s' is out of the range of 64-bit "+
+ "IEEE-754 floating-point numbers.", it.val)
+ } else {
+ p.bug("Expected float value, but got '%s'.", it.val)
+ }
+ }
+ return num, p.typeOfPrimitive(it)
+ case itemDatetime:
+ t, err := time.Parse("2006-01-02T15:04:05Z", it.val)
+ if err != nil {
+ p.panicf("Invalid RFC3339 Zulu DateTime: '%s'.", it.val)
+ }
+ return t, p.typeOfPrimitive(it)
+ case itemArray:
+ array := make([]interface{}, 0)
+ types := make([]tomlType, 0)
+
+ for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
+ if it.typ == itemCommentStart {
+ p.expect(itemText)
+ continue
+ }
+
+ val, typ := p.value(it)
+ array = append(array, val)
+ types = append(types, typ)
+ }
+ return array, p.typeOfArray(types)
+ }
+ p.bug("Unexpected value type: %s", it.typ)
+ panic("unreachable")
+}
+
+// establishContext sets the current context of the parser,
+// where the context is either a hash or an array of hashes. Which one is
+// set depends on the value of the `array` parameter.
+//
+// Establishing the context also makes sure that the key isn't a duplicate, and
+// will create implicit hashes automatically.
+func (p *parser) establishContext(key Key, array bool) {
+ var ok bool
+
+ // Always start at the top level and drill down for our context.
+ hashContext := p.mapping
+ keyContext := make(Key, 0)
+
+ // We only need implicit hashes for key[0:-1]
+ for _, k := range key[0 : len(key)-1] {
+ _, ok = hashContext[k]
+ keyContext = append(keyContext, k)
+
+ // No key? Make an implicit hash and move on.
+ if !ok {
+ p.addImplicit(keyContext)
+ hashContext[k] = make(map[string]interface{})
+ }
+
+ // If the hash context is actually an array of tables, then set
+ // the hash context to the last element in that array.
+ //
+ // Otherwise, it better be a table, since this MUST be a key group (by
+ // virtue of it not being the last element in a key).
+ switch t := hashContext[k].(type) {
+ case []map[string]interface{}:
+ hashContext = t[len(t)-1]
+ case map[string]interface{}:
+ hashContext = t
+ default:
+ p.panicf("Key '%s' was already created as a hash.", keyContext)
+ }
+ }
+
+ p.context = keyContext
+ if array {
+ // If this is the first element for this array, then allocate a new
+ // list of tables for it.
+ k := key[len(key)-1]
+ if _, ok := hashContext[k]; !ok {
+ hashContext[k] = make([]map[string]interface{}, 0, 5)
+ }
+
+ // Add a new table. But make sure the key hasn't already been used
+ // for something else.
+ if hash, ok := hashContext[k].([]map[string]interface{}); ok {
+ hashContext[k] = append(hash, make(map[string]interface{}))
+ } else {
+ p.panicf("Key '%s' was already created and cannot be used as "+
+ "an array.", keyContext)
+ }
+ } else {
+ p.setValue(key[len(key)-1], make(map[string]interface{}))
+ }
+ p.context = append(p.context, key[len(key)-1])
+}
+
+// setValue sets the given key to the given value in the current context.
+// It will make sure that the key hasn't already been defined, account for
+// implicit key groups.
+func (p *parser) setValue(key string, value interface{}) {
+ var tmpHash interface{}
+ var ok bool
+
+ hash := p.mapping
+ keyContext := make(Key, 0)
+ for _, k := range p.context {
+ keyContext = append(keyContext, k)
+ if tmpHash, ok = hash[k]; !ok {
+ p.bug("Context for key '%s' has not been established.", keyContext)
+ }
+ switch t := tmpHash.(type) {
+ case []map[string]interface{}:
+ // The context is a table of hashes. Pick the most recent table
+ // defined as the current hash.
+ hash = t[len(t)-1]
+ case map[string]interface{}:
+ hash = t
+ default:
+ p.bug("Expected hash to have type 'map[string]interface{}', but "+
+ "it has '%T' instead.", tmpHash)
+ }
+ }
+ keyContext = append(keyContext, key)
+
+ if _, ok := hash[key]; ok {
+ // Typically, if the given key has already been set, then we have
+ // to raise an error since duplicate keys are disallowed. However,
+ // it's possible that a key was previously defined implicitly. In this
+ // case, it is allowed to be redefined concretely. (See the
+ // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
+ //
+ // But we have to make sure to stop marking it as an implicit. (So that
+ // another redefinition provokes an error.)
+ //
+ // Note that since it has already been defined (as a hash), we don't
+ // want to overwrite it. So our business is done.
+ if p.isImplicit(keyContext) {
+ p.removeImplicit(keyContext)
+ return
+ }
+
+ // Otherwise, we have a concrete key trying to override a previous
+ // key, which is *always* wrong.
+ p.panicf("Key '%s' has already been defined.", keyContext)
+ }
+ hash[key] = value
+}
+
+// setType sets the type of a particular value at a given key.
+// It should be called immediately AFTER setValue.
+//
+// Note that if `key` is empty, then the type given will be applied to the
+// current context (which is either a table or an array of tables).
+func (p *parser) setType(key string, typ tomlType) {
+ keyContext := make(Key, 0, len(p.context)+1)
+ for _, k := range p.context {
+ keyContext = append(keyContext, k)
+ }
+ if len(key) > 0 { // allow type setting for hashes
+ keyContext = append(keyContext, key)
+ }
+ p.types[keyContext.String()] = typ
+}
+
+// addImplicit sets the given Key as having been created implicitly.
+func (p *parser) addImplicit(key Key) {
+ p.implicits[key.String()] = true
+}
+
+// removeImplicit stops tagging the given key as having been implicitly
+// created.
+func (p *parser) removeImplicit(key Key) {
+ p.implicits[key.String()] = false
+}
+
+// isImplicit returns true if the key group pointed to by the key was created
+// implicitly.
+func (p *parser) isImplicit(key Key) bool {
+ return p.implicits[key.String()]
+}
+
+// current returns the full key name of the current context.
+func (p *parser) current() string {
+ if len(p.currentKey) == 0 {
+ return p.context.String()
+ }
+ if len(p.context) == 0 {
+ return p.currentKey
+ }
+ return fmt.Sprintf("%s.%s", p.context, p.currentKey)
+}
+
+func stripFirstNewline(s string) string {
+ if len(s) == 0 || s[0] != '\n' {
+ return s
+ }
+ return s[1:]
+}
+
+func stripEscapedWhitespace(s string) string {
+ esc := strings.Split(s, "\\\n")
+ if len(esc) > 1 {
+ for i := 1; i < len(esc); i++ {
+ esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
+ }
+ }
+ return strings.Join(esc, "")
+}
+
+func (p *parser) replaceEscapes(str string) string {
+ var replaced []rune
+ s := []byte(str)
+ r := 0
+ for r < len(s) {
+ if s[r] != '\\' {
+ c, size := utf8.DecodeRune(s[r:])
+ r += size
+ replaced = append(replaced, c)
+ continue
+ }
+ r += 1
+ if r >= len(s) {
+ p.bug("Escape sequence at end of string.")
+ return ""
+ }
+ switch s[r] {
+ default:
+ p.bug("Expected valid escape code after \\, but got %q.", s[r])
+ return ""
+ case 'b':
+ replaced = append(replaced, rune(0x0008))
+ r += 1
+ case 't':
+ replaced = append(replaced, rune(0x0009))
+ r += 1
+ case 'n':
+ replaced = append(replaced, rune(0x000A))
+ r += 1
+ case 'f':
+ replaced = append(replaced, rune(0x000C))
+ r += 1
+ case 'r':
+ replaced = append(replaced, rune(0x000D))
+ r += 1
+ case '"':
+ replaced = append(replaced, rune(0x0022))
+ r += 1
+ case '\\':
+ replaced = append(replaced, rune(0x005C))
+ r += 1
+ case 'u':
+ // At this point, we know we have a Unicode escape of the form
+ // `uXXXX` at [r, r+5). (Because the lexer guarantees this
+ // for us.)
+ escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
+ replaced = append(replaced, escaped)
+ r += 5
+ case 'U':
+ // At this point, we know we have a Unicode escape of the form
+ // `uXXXX` at [r, r+9). (Because the lexer guarantees this
+ // for us.)
+ escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
+ replaced = append(replaced, escaped)
+ r += 9
+ }
+ }
+ return string(replaced)
+}
+
+func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
+ s := string(bs)
+ hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
+ if err != nil {
+ p.bug("Could not parse '%s' as a hexadecimal number, but the "+
+ "lexer claims it's OK: %s", s, err)
+ }
+ if !utf8.ValidRune(rune(hex)) {
+ p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
+ }
+ return rune(hex)
+}
+
+func isStringType(ty itemType) bool {
+ return ty == itemString || ty == itemMultilineString ||
+ ty == itemRawString || ty == itemRawMultilineString
+}
diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go
new file mode 100644
index 000000000..c73f8afc1
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/type_check.go
@@ -0,0 +1,91 @@
+package toml
+
+// tomlType represents any Go type that corresponds to a TOML type.
+// While the first draft of the TOML spec has a simplistic type system that
+// probably doesn't need this level of sophistication, we seem to be militating
+// toward adding real composite types.
+type tomlType interface {
+ typeString() string
+}
+
+// typeEqual accepts any two types and returns true if they are equal.
+func typeEqual(t1, t2 tomlType) bool {
+ if t1 == nil || t2 == nil {
+ return false
+ }
+ return t1.typeString() == t2.typeString()
+}
+
+func typeIsHash(t tomlType) bool {
+ return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
+}
+
+type tomlBaseType string
+
+func (btype tomlBaseType) typeString() string {
+ return string(btype)
+}
+
+func (btype tomlBaseType) String() string {
+ return btype.typeString()
+}
+
+var (
+ tomlInteger tomlBaseType = "Integer"
+ tomlFloat tomlBaseType = "Float"
+ tomlDatetime tomlBaseType = "Datetime"
+ tomlString tomlBaseType = "String"
+ tomlBool tomlBaseType = "Bool"
+ tomlArray tomlBaseType = "Array"
+ tomlHash tomlBaseType = "Hash"
+ tomlArrayHash tomlBaseType = "ArrayHash"
+)
+
+// typeOfPrimitive returns a tomlType of any primitive value in TOML.
+// Primitive values are: Integer, Float, Datetime, String and Bool.
+//
+// Passing a lexer item other than the following will cause a BUG message
+// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
+func (p *parser) typeOfPrimitive(lexItem item) tomlType {
+ switch lexItem.typ {
+ case itemInteger:
+ return tomlInteger
+ case itemFloat:
+ return tomlFloat
+ case itemDatetime:
+ return tomlDatetime
+ case itemString:
+ return tomlString
+ case itemMultilineString:
+ return tomlString
+ case itemRawString:
+ return tomlString
+ case itemRawMultilineString:
+ return tomlString
+ case itemBool:
+ return tomlBool
+ }
+ p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
+ panic("unreachable")
+}
+
+// typeOfArray returns a tomlType for an array given a list of types of its
+// values.
+//
+// In the current spec, if an array is homogeneous, then its type is always
+// "Array". If the array is not homogeneous, an error is generated.
+func (p *parser) typeOfArray(types []tomlType) tomlType {
+ // Empty arrays are cool.
+ if len(types) == 0 {
+ return tomlArray
+ }
+
+ theType := types[0]
+ for _, t := range types[1:] {
+ if !typeEqual(theType, t) {
+ p.panicf("Array contains values of type '%s' and '%s', but "+
+ "arrays must be homogeneous.", theType, t)
+ }
+ }
+ return tomlArray
+}
diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go
new file mode 100644
index 000000000..6da608af4
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/type_fields.go
@@ -0,0 +1,241 @@
+package toml
+
+// Struct field handling is adapted from code in encoding/json:
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the Go distribution.
+
+import (
+ "reflect"
+ "sort"
+ "sync"
+)
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string // the name of the field (`toml` tag included)
+ tag bool // whether field has a `toml` tag
+ index []int // represents the depth of an anonymous field
+ typ reflect.Type // the type of the field
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from toml tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that TOML should recognize for the given
+// type. The algorithm is breadth-first search over the set of structs to
+// include - the top struct and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" && !sf.Anonymous { // unexported
+ continue
+ }
+ name, _ := getOptions(sf.Tag.Get("toml"))
+ if name == "-" {
+ continue
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, field{name, tagged, index, ft})
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ f := field{name: ft.Name(), index: index, typ: ft}
+ next = append(next, f)
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with TOML tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// TOML tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
diff --git a/vendor/github.com/Microsoft/go-winio/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE
new file mode 100644
index 000000000..b8b569d77
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Microsoft
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md
new file mode 100644
index 000000000..568001057
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/README.md
@@ -0,0 +1,22 @@
+# go-winio
+
+This repository contains utilities for efficiently performing Win32 IO operations in
+Go. Currently, this is focused on accessing named pipes and other file handles, and
+for using named pipes as a net transport.
+
+This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go
+to reuse the thread to schedule another goroutine. This limits support to Windows Vista and
+newer operating systems. This is similar to the implementation of network sockets in Go's net
+package.
+
+Please see the LICENSE file for licensing information.
+
+This project has adopted the [Microsoft Open Source Code of
+Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
+see the [Code of Conduct
+FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
+[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
+questions or comments.
+
+Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
+for another named pipe implementation.
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE b/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE
new file mode 100644
index 000000000..744875676
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/common.go b/vendor/github.com/Microsoft/go-winio/archive/tar/common.go
new file mode 100644
index 000000000..0378401c0
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/archive/tar/common.go
@@ -0,0 +1,344 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tar implements access to tar archives.
+// It aims to cover most of the variations, including those produced
+// by GNU and BSD tars.
+//
+// References:
+// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
+// http://www.gnu.org/software/tar/manual/html_node/Standard.html
+// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
+package tar
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "path"
+ "time"
+)
+
+const (
+ blockSize = 512
+
+ // Types
+ TypeReg = '0' // regular file
+ TypeRegA = '\x00' // regular file
+ TypeLink = '1' // hard link
+ TypeSymlink = '2' // symbolic link
+ TypeChar = '3' // character device node
+ TypeBlock = '4' // block device node
+ TypeDir = '5' // directory
+ TypeFifo = '6' // fifo node
+ TypeCont = '7' // reserved
+ TypeXHeader = 'x' // extended header
+ TypeXGlobalHeader = 'g' // global extended header
+ TypeGNULongName = 'L' // Next file has a long name
+ TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
+ TypeGNUSparse = 'S' // sparse file
+)
+
+// A Header represents a single header in a tar archive.
+// Some fields may not be populated.
+type Header struct {
+ Name string // name of header file entry
+ Mode int64 // permission and mode bits
+ Uid int // user id of owner
+ Gid int // group id of owner
+ Size int64 // length in bytes
+ ModTime time.Time // modified time
+ Typeflag byte // type of header entry
+ Linkname string // target name of link
+ Uname string // user name of owner
+ Gname string // group name of owner
+ Devmajor int64 // major number of character or block device
+ Devminor int64 // minor number of character or block device
+ AccessTime time.Time // access time
+ ChangeTime time.Time // status change time
+ CreationTime time.Time // creation time
+ Xattrs map[string]string
+ Winheaders map[string]string
+}
+
+// File name constants from the tar spec.
+const (
+ fileNameSize = 100 // Maximum number of bytes in a standard tar name.
+ fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
+)
+
+// FileInfo returns an os.FileInfo for the Header.
+func (h *Header) FileInfo() os.FileInfo {
+ return headerFileInfo{h}
+}
+
+// headerFileInfo implements os.FileInfo.
+type headerFileInfo struct {
+ h *Header
+}
+
+func (fi headerFileInfo) Size() int64 { return fi.h.Size }
+func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
+func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
+func (fi headerFileInfo) Sys() interface{} { return fi.h }
+
+// Name returns the base name of the file.
+func (fi headerFileInfo) Name() string {
+ if fi.IsDir() {
+ return path.Base(path.Clean(fi.h.Name))
+ }
+ return path.Base(fi.h.Name)
+}
+
+// Mode returns the permission and mode bits for the headerFileInfo.
+func (fi headerFileInfo) Mode() (mode os.FileMode) {
+ // Set file permission bits.
+ mode = os.FileMode(fi.h.Mode).Perm()
+
+ // Set setuid, setgid and sticky bits.
+ if fi.h.Mode&c_ISUID != 0 {
+ // setuid
+ mode |= os.ModeSetuid
+ }
+ if fi.h.Mode&c_ISGID != 0 {
+ // setgid
+ mode |= os.ModeSetgid
+ }
+ if fi.h.Mode&c_ISVTX != 0 {
+ // sticky
+ mode |= os.ModeSticky
+ }
+
+ // Set file mode bits.
+ // clear perm, setuid, setgid and sticky bits.
+ m := os.FileMode(fi.h.Mode) &^ 07777
+ if m == c_ISDIR {
+ // directory
+ mode |= os.ModeDir
+ }
+ if m == c_ISFIFO {
+ // named pipe (FIFO)
+ mode |= os.ModeNamedPipe
+ }
+ if m == c_ISLNK {
+ // symbolic link
+ mode |= os.ModeSymlink
+ }
+ if m == c_ISBLK {
+ // device file
+ mode |= os.ModeDevice
+ }
+ if m == c_ISCHR {
+ // Unix character device
+ mode |= os.ModeDevice
+ mode |= os.ModeCharDevice
+ }
+ if m == c_ISSOCK {
+ // Unix domain socket
+ mode |= os.ModeSocket
+ }
+
+ switch fi.h.Typeflag {
+ case TypeSymlink:
+ // symbolic link
+ mode |= os.ModeSymlink
+ case TypeChar:
+ // character device node
+ mode |= os.ModeDevice
+ mode |= os.ModeCharDevice
+ case TypeBlock:
+ // block device node
+ mode |= os.ModeDevice
+ case TypeDir:
+ // directory
+ mode |= os.ModeDir
+ case TypeFifo:
+ // fifo node
+ mode |= os.ModeNamedPipe
+ }
+
+ return mode
+}
+
+// sysStat, if non-nil, populates h from system-dependent fields of fi.
+var sysStat func(fi os.FileInfo, h *Header) error
+
+// Mode constants from the tar spec.
+const (
+ c_ISUID = 04000 // Set uid
+ c_ISGID = 02000 // Set gid
+ c_ISVTX = 01000 // Save text (sticky bit)
+ c_ISDIR = 040000 // Directory
+ c_ISFIFO = 010000 // FIFO
+ c_ISREG = 0100000 // Regular file
+ c_ISLNK = 0120000 // Symbolic link
+ c_ISBLK = 060000 // Block special file
+ c_ISCHR = 020000 // Character special file
+ c_ISSOCK = 0140000 // Socket
+)
+
+// Keywords for the PAX Extended Header
+const (
+ paxAtime = "atime"
+ paxCharset = "charset"
+ paxComment = "comment"
+ paxCtime = "ctime" // please note that ctime is not a valid pax header.
+ paxCreationTime = "LIBARCHIVE.creationtime"
+ paxGid = "gid"
+ paxGname = "gname"
+ paxLinkpath = "linkpath"
+ paxMtime = "mtime"
+ paxPath = "path"
+ paxSize = "size"
+ paxUid = "uid"
+ paxUname = "uname"
+ paxXattr = "SCHILY.xattr."
+ paxWindows = "MSWINDOWS."
+ paxNone = ""
+)
+
+// FileInfoHeader creates a partially-populated Header from fi.
+// If fi describes a symlink, FileInfoHeader records link as the link target.
+// If fi describes a directory, a slash is appended to the name.
+// Because os.FileInfo's Name method returns only the base name of
+// the file it describes, it may be necessary to modify the Name field
+// of the returned header to provide the full path name of the file.
+func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
+ if fi == nil {
+ return nil, errors.New("tar: FileInfo is nil")
+ }
+ fm := fi.Mode()
+ h := &Header{
+ Name: fi.Name(),
+ ModTime: fi.ModTime(),
+ Mode: int64(fm.Perm()), // or'd with c_IS* constants later
+ }
+ switch {
+ case fm.IsRegular():
+ h.Mode |= c_ISREG
+ h.Typeflag = TypeReg
+ h.Size = fi.Size()
+ case fi.IsDir():
+ h.Typeflag = TypeDir
+ h.Mode |= c_ISDIR
+ h.Name += "/"
+ case fm&os.ModeSymlink != 0:
+ h.Typeflag = TypeSymlink
+ h.Mode |= c_ISLNK
+ h.Linkname = link
+ case fm&os.ModeDevice != 0:
+ if fm&os.ModeCharDevice != 0 {
+ h.Mode |= c_ISCHR
+ h.Typeflag = TypeChar
+ } else {
+ h.Mode |= c_ISBLK
+ h.Typeflag = TypeBlock
+ }
+ case fm&os.ModeNamedPipe != 0:
+ h.Typeflag = TypeFifo
+ h.Mode |= c_ISFIFO
+ case fm&os.ModeSocket != 0:
+ h.Mode |= c_ISSOCK
+ default:
+ return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
+ }
+ if fm&os.ModeSetuid != 0 {
+ h.Mode |= c_ISUID
+ }
+ if fm&os.ModeSetgid != 0 {
+ h.Mode |= c_ISGID
+ }
+ if fm&os.ModeSticky != 0 {
+ h.Mode |= c_ISVTX
+ }
+ // If possible, populate additional fields from OS-specific
+ // FileInfo fields.
+ if sys, ok := fi.Sys().(*Header); ok {
+ // This FileInfo came from a Header (not the OS). Use the
+ // original Header to populate all remaining fields.
+ h.Uid = sys.Uid
+ h.Gid = sys.Gid
+ h.Uname = sys.Uname
+ h.Gname = sys.Gname
+ h.AccessTime = sys.AccessTime
+ h.ChangeTime = sys.ChangeTime
+ if sys.Xattrs != nil {
+ h.Xattrs = make(map[string]string)
+ for k, v := range sys.Xattrs {
+ h.Xattrs[k] = v
+ }
+ }
+ if sys.Typeflag == TypeLink {
+ // hard link
+ h.Typeflag = TypeLink
+ h.Size = 0
+ h.Linkname = sys.Linkname
+ }
+ }
+ if sysStat != nil {
+ return h, sysStat(fi, h)
+ }
+ return h, nil
+}
+
+var zeroBlock = make([]byte, blockSize)
+
+// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
+// We compute and return both.
+func checksum(header []byte) (unsigned int64, signed int64) {
+ for i := 0; i < len(header); i++ {
+ if i == 148 {
+ // The chksum field (header[148:156]) is special: it should be treated as space bytes.
+ unsigned += ' ' * 8
+ signed += ' ' * 8
+ i += 7
+ continue
+ }
+ unsigned += int64(header[i])
+ signed += int64(int8(header[i]))
+ }
+ return
+}
+
+type slicer []byte
+
+func (sp *slicer) next(n int) (b []byte) {
+ s := *sp
+ b, *sp = s[0:n], s[n:]
+ return
+}
+
+func isASCII(s string) bool {
+ for _, c := range s {
+ if c >= 0x80 {
+ return false
+ }
+ }
+ return true
+}
+
+func toASCII(s string) string {
+ if isASCII(s) {
+ return s
+ }
+ var buf bytes.Buffer
+ for _, c := range s {
+ if c < 0x80 {
+ buf.WriteByte(byte(c))
+ }
+ }
+ return buf.String()
+}
+
+// isHeaderOnlyType checks if the given type flag is of the type that has no
+// data section even if a size is specified.
+func isHeaderOnlyType(flag byte) bool {
+ switch flag {
+ case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go b/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go
new file mode 100644
index 000000000..e210c618a
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go
@@ -0,0 +1,1002 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+// TODO(dsymonds):
+// - pax extensions
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "io/ioutil"
+ "math"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ ErrHeader = errors.New("archive/tar: invalid tar header")
+)
+
+const maxNanoSecondIntSize = 9
+
+// A Reader provides sequential access to the contents of a tar archive.
+// A tar archive consists of a sequence of files.
+// The Next method advances to the next file in the archive (including the first),
+// and then it can be treated as an io.Reader to access the file's data.
+type Reader struct {
+ r io.Reader
+ err error
+ pad int64 // amount of padding (ignored) after current file entry
+ curr numBytesReader // reader for current file entry
+ hdrBuff [blockSize]byte // buffer to use in readHeader
+}
+
+type parser struct {
+ err error // Last error seen
+}
+
+// A numBytesReader is an io.Reader with a numBytes method, returning the number
+// of bytes remaining in the underlying encoded data.
+type numBytesReader interface {
+ io.Reader
+ numBytes() int64
+}
+
+// A regFileReader is a numBytesReader for reading file data from a tar archive.
+type regFileReader struct {
+ r io.Reader // underlying reader
+ nb int64 // number of unread bytes for current file entry
+}
+
+// A sparseFileReader is a numBytesReader for reading sparse file data from a
+// tar archive.
+type sparseFileReader struct {
+ rfr numBytesReader // Reads the sparse-encoded file data
+ sp []sparseEntry // The sparse map for the file
+ pos int64 // Keeps track of file position
+ total int64 // Total size of the file
+}
+
+// A sparseEntry holds a single entry in a sparse file's sparse map.
+//
+// Sparse files are represented using a series of sparseEntrys.
+// Despite the name, a sparseEntry represents an actual data fragment that
+// references data found in the underlying archive stream. All regions not
+// covered by a sparseEntry are logically filled with zeros.
+//
+// For example, if the underlying raw file contains the 10-byte data:
+// var compactData = "abcdefgh"
+//
+// And the sparse map has the following entries:
+// var sp = []sparseEntry{
+// {offset: 2, numBytes: 5} // Data fragment for [2..7]
+// {offset: 18, numBytes: 3} // Data fragment for [18..21]
+// }
+//
+// Then the content of the resulting sparse file with a "real" size of 25 is:
+// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
+type sparseEntry struct {
+ offset int64 // Starting position of the fragment
+ numBytes int64 // Length of the fragment
+}
+
+// Keywords for GNU sparse files in a PAX extended header
+const (
+ paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
+ paxGNUSparseOffset = "GNU.sparse.offset"
+ paxGNUSparseNumBytes = "GNU.sparse.numbytes"
+ paxGNUSparseMap = "GNU.sparse.map"
+ paxGNUSparseName = "GNU.sparse.name"
+ paxGNUSparseMajor = "GNU.sparse.major"
+ paxGNUSparseMinor = "GNU.sparse.minor"
+ paxGNUSparseSize = "GNU.sparse.size"
+ paxGNUSparseRealSize = "GNU.sparse.realsize"
+)
+
+// Keywords for old GNU sparse headers
+const (
+ oldGNUSparseMainHeaderOffset = 386
+ oldGNUSparseMainHeaderIsExtendedOffset = 482
+ oldGNUSparseMainHeaderNumEntries = 4
+ oldGNUSparseExtendedHeaderIsExtendedOffset = 504
+ oldGNUSparseExtendedHeaderNumEntries = 21
+ oldGNUSparseOffsetSize = 12
+ oldGNUSparseNumBytesSize = 12
+)
+
+// NewReader creates a new Reader reading from r.
+func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
+
+// Next advances to the next entry in the tar archive.
+//
+// io.EOF is returned at the end of the input.
+func (tr *Reader) Next() (*Header, error) {
+ if tr.err != nil {
+ return nil, tr.err
+ }
+
+ var hdr *Header
+ var extHdrs map[string]string
+
+ // Externally, Next iterates through the tar archive as if it is a series of
+ // files. Internally, the tar format often uses fake "files" to add meta
+ // data that describes the next file. These meta data "files" should not
+ // normally be visible to the outside. As such, this loop iterates through
+ // one or more "header files" until it finds a "normal file".
+loop:
+ for {
+ tr.err = tr.skipUnread()
+ if tr.err != nil {
+ return nil, tr.err
+ }
+
+ hdr = tr.readHeader()
+ if tr.err != nil {
+ return nil, tr.err
+ }
+
+ // Check for PAX/GNU special headers and files.
+ switch hdr.Typeflag {
+ case TypeXHeader:
+ extHdrs, tr.err = parsePAX(tr)
+ if tr.err != nil {
+ return nil, tr.err
+ }
+ continue loop // This is a meta header affecting the next header
+ case TypeGNULongName, TypeGNULongLink:
+ var realname []byte
+ realname, tr.err = ioutil.ReadAll(tr)
+ if tr.err != nil {
+ return nil, tr.err
+ }
+
+ // Convert GNU extensions to use PAX headers.
+ if extHdrs == nil {
+ extHdrs = make(map[string]string)
+ }
+ var p parser
+ switch hdr.Typeflag {
+ case TypeGNULongName:
+ extHdrs[paxPath] = p.parseString(realname)
+ case TypeGNULongLink:
+ extHdrs[paxLinkpath] = p.parseString(realname)
+ }
+ if p.err != nil {
+ tr.err = p.err
+ return nil, tr.err
+ }
+ continue loop // This is a meta header affecting the next header
+ default:
+ mergePAX(hdr, extHdrs)
+
+ // Check for a PAX format sparse file
+ sp, err := tr.checkForGNUSparsePAXHeaders(hdr, extHdrs)
+ if err != nil {
+ tr.err = err
+ return nil, err
+ }
+ if sp != nil {
+ // Current file is a PAX format GNU sparse file.
+ // Set the current file reader to a sparse file reader.
+ tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
+ if tr.err != nil {
+ return nil, tr.err
+ }
+ }
+ break loop // This is a file, so stop
+ }
+ }
+ return hdr, nil
+}
+
+// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
+// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to
+// be treated as a regular file.
+func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) {
+ var sparseFormat string
+
+ // Check for sparse format indicators
+ major, majorOk := headers[paxGNUSparseMajor]
+ minor, minorOk := headers[paxGNUSparseMinor]
+ sparseName, sparseNameOk := headers[paxGNUSparseName]
+ _, sparseMapOk := headers[paxGNUSparseMap]
+ sparseSize, sparseSizeOk := headers[paxGNUSparseSize]
+ sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize]
+
+ // Identify which, if any, sparse format applies from which PAX headers are set
+ if majorOk && minorOk {
+ sparseFormat = major + "." + minor
+ } else if sparseNameOk && sparseMapOk {
+ sparseFormat = "0.1"
+ } else if sparseSizeOk {
+ sparseFormat = "0.0"
+ } else {
+ // Not a PAX format GNU sparse file.
+ return nil, nil
+ }
+
+ // Check for unknown sparse format
+ if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" {
+ return nil, nil
+ }
+
+ // Update hdr from GNU sparse PAX headers
+ if sparseNameOk {
+ hdr.Name = sparseName
+ }
+ if sparseSizeOk {
+ realSize, err := strconv.ParseInt(sparseSize, 10, 0)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ hdr.Size = realSize
+ } else if sparseRealSizeOk {
+ realSize, err := strconv.ParseInt(sparseRealSize, 10, 0)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ hdr.Size = realSize
+ }
+
+ // Set up the sparse map, according to the particular sparse format in use
+ var sp []sparseEntry
+ var err error
+ switch sparseFormat {
+ case "0.0", "0.1":
+ sp, err = readGNUSparseMap0x1(headers)
+ case "1.0":
+ sp, err = readGNUSparseMap1x0(tr.curr)
+ }
+ return sp, err
+}
+
+// mergePAX merges well known headers according to PAX standard.
+// In general headers with the same name as those found
+// in the header struct overwrite those found in the header
+// struct with higher precision or longer values. Esp. useful
+// for name and linkname fields.
+func mergePAX(hdr *Header, headers map[string]string) error {
+ for k, v := range headers {
+ switch k {
+ case paxPath:
+ hdr.Name = v
+ case paxLinkpath:
+ hdr.Linkname = v
+ case paxGname:
+ hdr.Gname = v
+ case paxUname:
+ hdr.Uname = v
+ case paxUid:
+ uid, err := strconv.ParseInt(v, 10, 0)
+ if err != nil {
+ return err
+ }
+ hdr.Uid = int(uid)
+ case paxGid:
+ gid, err := strconv.ParseInt(v, 10, 0)
+ if err != nil {
+ return err
+ }
+ hdr.Gid = int(gid)
+ case paxAtime:
+ t, err := parsePAXTime(v)
+ if err != nil {
+ return err
+ }
+ hdr.AccessTime = t
+ case paxMtime:
+ t, err := parsePAXTime(v)
+ if err != nil {
+ return err
+ }
+ hdr.ModTime = t
+ case paxCtime:
+ t, err := parsePAXTime(v)
+ if err != nil {
+ return err
+ }
+ hdr.ChangeTime = t
+ case paxCreationTime:
+ t, err := parsePAXTime(v)
+ if err != nil {
+ return err
+ }
+ hdr.CreationTime = t
+ case paxSize:
+ size, err := strconv.ParseInt(v, 10, 0)
+ if err != nil {
+ return err
+ }
+ hdr.Size = int64(size)
+ default:
+ if strings.HasPrefix(k, paxXattr) {
+ if hdr.Xattrs == nil {
+ hdr.Xattrs = make(map[string]string)
+ }
+ hdr.Xattrs[k[len(paxXattr):]] = v
+ } else if strings.HasPrefix(k, paxWindows) {
+ if hdr.Winheaders == nil {
+ hdr.Winheaders = make(map[string]string)
+ }
+ hdr.Winheaders[k[len(paxWindows):]] = v
+ }
+ }
+ }
+ return nil
+}
+
+// parsePAXTime takes a string of the form %d.%d as described in
+// the PAX specification.
+func parsePAXTime(t string) (time.Time, error) {
+ buf := []byte(t)
+ pos := bytes.IndexByte(buf, '.')
+ var seconds, nanoseconds int64
+ var err error
+ if pos == -1 {
+ seconds, err = strconv.ParseInt(t, 10, 0)
+ if err != nil {
+ return time.Time{}, err
+ }
+ } else {
+ seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0)
+ if err != nil {
+ return time.Time{}, err
+ }
+ nano_buf := string(buf[pos+1:])
+ // Pad as needed before converting to a decimal.
+ // For example .030 -> .030000000 -> 30000000 nanoseconds
+ if len(nano_buf) < maxNanoSecondIntSize {
+ // Right pad
+ nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf))
+ } else if len(nano_buf) > maxNanoSecondIntSize {
+ // Right truncate
+ nano_buf = nano_buf[:maxNanoSecondIntSize]
+ }
+ nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0)
+ if err != nil {
+ return time.Time{}, err
+ }
+ }
+ ts := time.Unix(seconds, nanoseconds)
+ return ts, nil
+}
+
+// parsePAX parses PAX headers.
+// If an extended header (type 'x') is invalid, ErrHeader is returned
+func parsePAX(r io.Reader) (map[string]string, error) {
+ buf, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ sbuf := string(buf)
+
+ // For GNU PAX sparse format 0.0 support.
+ // This function transforms the sparse format 0.0 headers into sparse format 0.1 headers.
+ var sparseMap bytes.Buffer
+
+ headers := make(map[string]string)
+ // Each record is constructed as
+ // "%d %s=%s\n", length, keyword, value
+ for len(sbuf) > 0 {
+ key, value, residual, err := parsePAXRecord(sbuf)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ sbuf = residual
+
+ keyStr := string(key)
+ if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes {
+ // GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map.
+ sparseMap.WriteString(value)
+ sparseMap.Write([]byte{','})
+ } else {
+ // Normal key. Set the value in the headers map.
+ headers[keyStr] = string(value)
+ }
+ }
+ if sparseMap.Len() != 0 {
+ // Add sparse info to headers, chopping off the extra comma
+ sparseMap.Truncate(sparseMap.Len() - 1)
+ headers[paxGNUSparseMap] = sparseMap.String()
+ }
+ return headers, nil
+}
+
+// parsePAXRecord parses the input PAX record string into a key-value pair.
+// If parsing is successful, it will slice off the currently read record and
+// return the remainder as r.
+//
+// A PAX record is of the following form:
+// "%d %s=%s\n" % (size, key, value)
+func parsePAXRecord(s string) (k, v, r string, err error) {
+ // The size field ends at the first space.
+ sp := strings.IndexByte(s, ' ')
+ if sp == -1 {
+ return "", "", s, ErrHeader
+ }
+
+ // Parse the first token as a decimal integer.
+ n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int
+ if perr != nil || n < 5 || int64(len(s)) < n {
+ return "", "", s, ErrHeader
+ }
+
+ // Extract everything between the space and the final newline.
+ rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:]
+ if nl != "\n" {
+ return "", "", s, ErrHeader
+ }
+
+ // The first equals separates the key from the value.
+ eq := strings.IndexByte(rec, '=')
+ if eq == -1 {
+ return "", "", s, ErrHeader
+ }
+ return rec[:eq], rec[eq+1:], rem, nil
+}
+
+// parseString parses bytes as a NUL-terminated C-style string.
+// If a NUL byte is not found then the whole slice is returned as a string.
+func (*parser) parseString(b []byte) string {
+ n := 0
+ for n < len(b) && b[n] != 0 {
+ n++
+ }
+ return string(b[0:n])
+}
+
+// parseNumeric parses the input as being encoded in either base-256 or octal.
+// This function may return negative numbers.
+// If parsing fails or an integer overflow occurs, err will be set.
+func (p *parser) parseNumeric(b []byte) int64 {
+ // Check for base-256 (binary) format first.
+ // If the first bit is set, then all following bits constitute a two's
+ // complement encoded number in big-endian byte order.
+ if len(b) > 0 && b[0]&0x80 != 0 {
+ // Handling negative numbers relies on the following identity:
+ // -a-1 == ^a
+ //
+ // If the number is negative, we use an inversion mask to invert the
+ // data bytes and treat the value as an unsigned number.
+ var inv byte // 0x00 if positive or zero, 0xff if negative
+ if b[0]&0x40 != 0 {
+ inv = 0xff
+ }
+
+ var x uint64
+ for i, c := range b {
+ c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing
+ if i == 0 {
+ c &= 0x7f // Ignore signal bit in first byte
+ }
+ if (x >> 56) > 0 {
+ p.err = ErrHeader // Integer overflow
+ return 0
+ }
+ x = x<<8 | uint64(c)
+ }
+ if (x >> 63) > 0 {
+ p.err = ErrHeader // Integer overflow
+ return 0
+ }
+ if inv == 0xff {
+ return ^int64(x)
+ }
+ return int64(x)
+ }
+
+ // Normal case is base-8 (octal) format.
+ return p.parseOctal(b)
+}
+
+func (p *parser) parseOctal(b []byte) int64 {
+ // Because unused fields are filled with NULs, we need
+ // to skip leading NULs. Fields may also be padded with
+ // spaces or NULs.
+ // So we remove leading and trailing NULs and spaces to
+ // be sure.
+ b = bytes.Trim(b, " \x00")
+
+ if len(b) == 0 {
+ return 0
+ }
+ x, perr := strconv.ParseUint(p.parseString(b), 8, 64)
+ if perr != nil {
+ p.err = ErrHeader
+ }
+ return int64(x)
+}
+
+// skipUnread skips any unread bytes in the existing file entry, as well as any
+// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is
+// encountered in the data portion; it is okay to hit io.EOF in the padding.
+//
+// Note that this function still works properly even when sparse files are being
+// used since numBytes returns the bytes remaining in the underlying io.Reader.
+func (tr *Reader) skipUnread() error {
+ dataSkip := tr.numBytes() // Number of data bytes to skip
+ totalSkip := dataSkip + tr.pad // Total number of bytes to skip
+ tr.curr, tr.pad = nil, 0
+
+ // If possible, Seek to the last byte before the end of the data section.
+ // Do this because Seek is often lazy about reporting errors; this will mask
+ // the fact that the tar stream may be truncated. We can rely on the
+ // io.CopyN done shortly afterwards to trigger any IO errors.
+ var seekSkipped int64 // Number of bytes skipped via Seek
+ if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 {
+ // Not all io.Seeker can actually Seek. For example, os.Stdin implements
+ // io.Seeker, but calling Seek always returns an error and performs
+ // no action. Thus, we try an innocent seek to the current position
+ // to see if Seek is really supported.
+ pos1, err := sr.Seek(0, os.SEEK_CUR)
+ if err == nil {
+ // Seek seems supported, so perform the real Seek.
+ pos2, err := sr.Seek(dataSkip-1, os.SEEK_CUR)
+ if err != nil {
+ tr.err = err
+ return tr.err
+ }
+ seekSkipped = pos2 - pos1
+ }
+ }
+
+ var copySkipped int64 // Number of bytes skipped via CopyN
+ copySkipped, tr.err = io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped)
+ if tr.err == io.EOF && seekSkipped+copySkipped < dataSkip {
+ tr.err = io.ErrUnexpectedEOF
+ }
+ return tr.err
+}
+
+func (tr *Reader) verifyChecksum(header []byte) bool {
+ if tr.err != nil {
+ return false
+ }
+
+ var p parser
+ given := p.parseOctal(header[148:156])
+ unsigned, signed := checksum(header)
+ return p.err == nil && (given == unsigned || given == signed)
+}
+
+// readHeader reads the next block header and assumes that the underlying reader
+// is already aligned to a block boundary.
+//
+// The err will be set to io.EOF only when one of the following occurs:
+// * Exactly 0 bytes are read and EOF is hit.
+// * Exactly 1 block of zeros is read and EOF is hit.
+// * At least 2 blocks of zeros are read.
+func (tr *Reader) readHeader() *Header {
+ header := tr.hdrBuff[:]
+ copy(header, zeroBlock)
+
+ if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
+ return nil // io.EOF is okay here
+ }
+
+ // Two blocks of zero bytes marks the end of the archive.
+ if bytes.Equal(header, zeroBlock[0:blockSize]) {
+ if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
+ return nil // io.EOF is okay here
+ }
+ if bytes.Equal(header, zeroBlock[0:blockSize]) {
+ tr.err = io.EOF
+ } else {
+ tr.err = ErrHeader // zero block and then non-zero block
+ }
+ return nil
+ }
+
+ if !tr.verifyChecksum(header) {
+ tr.err = ErrHeader
+ return nil
+ }
+
+ // Unpack
+ var p parser
+ hdr := new(Header)
+ s := slicer(header)
+
+ hdr.Name = p.parseString(s.next(100))
+ hdr.Mode = p.parseNumeric(s.next(8))
+ hdr.Uid = int(p.parseNumeric(s.next(8)))
+ hdr.Gid = int(p.parseNumeric(s.next(8)))
+ hdr.Size = p.parseNumeric(s.next(12))
+ hdr.ModTime = time.Unix(p.parseNumeric(s.next(12)), 0)
+ s.next(8) // chksum
+ hdr.Typeflag = s.next(1)[0]
+ hdr.Linkname = p.parseString(s.next(100))
+
+ // The remainder of the header depends on the value of magic.
+ // The original (v7) version of tar had no explicit magic field,
+ // so its magic bytes, like the rest of the block, are NULs.
+ magic := string(s.next(8)) // contains version field as well.
+ var format string
+ switch {
+ case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988)
+ if string(header[508:512]) == "tar\x00" {
+ format = "star"
+ } else {
+ format = "posix"
+ }
+ case magic == "ustar \x00": // old GNU tar
+ format = "gnu"
+ }
+
+ switch format {
+ case "posix", "gnu", "star":
+ hdr.Uname = p.parseString(s.next(32))
+ hdr.Gname = p.parseString(s.next(32))
+ devmajor := s.next(8)
+ devminor := s.next(8)
+ if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
+ hdr.Devmajor = p.parseNumeric(devmajor)
+ hdr.Devminor = p.parseNumeric(devminor)
+ }
+ var prefix string
+ switch format {
+ case "posix", "gnu":
+ prefix = p.parseString(s.next(155))
+ case "star":
+ prefix = p.parseString(s.next(131))
+ hdr.AccessTime = time.Unix(p.parseNumeric(s.next(12)), 0)
+ hdr.ChangeTime = time.Unix(p.parseNumeric(s.next(12)), 0)
+ }
+ if len(prefix) > 0 {
+ hdr.Name = prefix + "/" + hdr.Name
+ }
+ }
+
+ if p.err != nil {
+ tr.err = p.err
+ return nil
+ }
+
+ nb := hdr.Size
+ if isHeaderOnlyType(hdr.Typeflag) {
+ nb = 0
+ }
+ if nb < 0 {
+ tr.err = ErrHeader
+ return nil
+ }
+
+ // Set the current file reader.
+ tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
+ tr.curr = &regFileReader{r: tr.r, nb: nb}
+
+ // Check for old GNU sparse format entry.
+ if hdr.Typeflag == TypeGNUSparse {
+ // Get the real size of the file.
+ hdr.Size = p.parseNumeric(header[483:495])
+ if p.err != nil {
+ tr.err = p.err
+ return nil
+ }
+
+ // Read the sparse map.
+ sp := tr.readOldGNUSparseMap(header)
+ if tr.err != nil {
+ return nil
+ }
+
+ // Current file is a GNU sparse file. Update the current file reader.
+ tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
+ if tr.err != nil {
+ return nil
+ }
+ }
+
+ return hdr
+}
+
+// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
+// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
+// then one or more extension headers are used to store the rest of the sparse map.
+func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
+ var p parser
+ isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0
+ spCap := oldGNUSparseMainHeaderNumEntries
+ if isExtended {
+ spCap += oldGNUSparseExtendedHeaderNumEntries
+ }
+ sp := make([]sparseEntry, 0, spCap)
+ s := slicer(header[oldGNUSparseMainHeaderOffset:])
+
+ // Read the four entries from the main tar header
+ for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ {
+ offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
+ numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
+ if p.err != nil {
+ tr.err = p.err
+ return nil
+ }
+ if offset == 0 && numBytes == 0 {
+ break
+ }
+ sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+ }
+
+ for isExtended {
+ // There are more entries. Read an extension header and parse its entries.
+ sparseHeader := make([]byte, blockSize)
+ if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil {
+ return nil
+ }
+ isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0
+ s = slicer(sparseHeader)
+ for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ {
+ offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
+ numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
+ if p.err != nil {
+ tr.err = p.err
+ return nil
+ }
+ if offset == 0 && numBytes == 0 {
+ break
+ }
+ sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+ }
+ }
+ return sp
+}
+
+// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
+// version 1.0. The format of the sparse map consists of a series of
+// newline-terminated numeric fields. The first field is the number of entries
+// and is always present. Following this are the entries, consisting of two
+// fields (offset, numBytes). This function must stop reading at the end
+// boundary of the block containing the last newline.
+//
+// Note that the GNU manual says that numeric values should be encoded in octal
+// format. However, the GNU tar utility itself outputs these values in decimal.
+// As such, this library treats values as being encoded in decimal.
+func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
+ var cntNewline int64
+ var buf bytes.Buffer
+ var blk = make([]byte, blockSize)
+
+ // feedTokens copies data in numBlock chunks from r into buf until there are
+ // at least cnt newlines in buf. It will not read more blocks than needed.
+ var feedTokens = func(cnt int64) error {
+ for cntNewline < cnt {
+ if _, err := io.ReadFull(r, blk); err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+ }
+ buf.Write(blk)
+ for _, c := range blk {
+ if c == '\n' {
+ cntNewline++
+ }
+ }
+ }
+ return nil
+ }
+
+ // nextToken gets the next token delimited by a newline. This assumes that
+ // at least one newline exists in the buffer.
+ var nextToken = func() string {
+ cntNewline--
+ tok, _ := buf.ReadString('\n')
+ return tok[:len(tok)-1] // Cut off newline
+ }
+
+ // Parse for the number of entries.
+ // Use integer overflow resistant math to check this.
+ if err := feedTokens(1); err != nil {
+ return nil, err
+ }
+ numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int
+ if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
+ return nil, ErrHeader
+ }
+
+ // Parse for all member entries.
+ // numEntries is trusted after this since a potential attacker must have
+ // committed resources proportional to what this library used.
+ if err := feedTokens(2 * numEntries); err != nil {
+ return nil, err
+ }
+ sp := make([]sparseEntry, 0, numEntries)
+ for i := int64(0); i < numEntries; i++ {
+ offset, err := strconv.ParseInt(nextToken(), 10, 64)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ numBytes, err := strconv.ParseInt(nextToken(), 10, 64)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+ }
+ return sp, nil
+}
+
+// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
+// version 0.1. The sparse map is stored in the PAX headers.
+func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) {
+ // Get number of entries.
+ // Use integer overflow resistant math to check this.
+ numEntriesStr := extHdrs[paxGNUSparseNumBlocks]
+ numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
+ if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
+ return nil, ErrHeader
+ }
+
+ // There should be two numbers in sparseMap for each entry.
+ sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",")
+ if int64(len(sparseMap)) != 2*numEntries {
+ return nil, ErrHeader
+ }
+
+ // Loop through the entries in the sparse map.
+ // numEntries is trusted now.
+ sp := make([]sparseEntry, 0, numEntries)
+ for i := int64(0); i < numEntries; i++ {
+ offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+ }
+ return sp, nil
+}
+
+// numBytes returns the number of bytes left to read in the current file's entry
+// in the tar archive, or 0 if there is no current file.
+func (tr *Reader) numBytes() int64 {
+ if tr.curr == nil {
+ // No current file, so no bytes
+ return 0
+ }
+ return tr.curr.numBytes()
+}
+
+// Read reads from the current entry in the tar archive.
+// It returns 0, io.EOF when it reaches the end of that entry,
+// until Next is called to advance to the next entry.
+//
+// Calling Read on special types like TypeLink, TypeSymLink, TypeChar,
+// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what
+// the Header.Size claims.
+func (tr *Reader) Read(b []byte) (n int, err error) {
+ if tr.err != nil {
+ return 0, tr.err
+ }
+ if tr.curr == nil {
+ return 0, io.EOF
+ }
+
+ n, err = tr.curr.Read(b)
+ if err != nil && err != io.EOF {
+ tr.err = err
+ }
+ return
+}
+
+func (rfr *regFileReader) Read(b []byte) (n int, err error) {
+ if rfr.nb == 0 {
+ // file consumed
+ return 0, io.EOF
+ }
+ if int64(len(b)) > rfr.nb {
+ b = b[0:rfr.nb]
+ }
+ n, err = rfr.r.Read(b)
+ rfr.nb -= int64(n)
+
+ if err == io.EOF && rfr.nb > 0 {
+ err = io.ErrUnexpectedEOF
+ }
+ return
+}
+
+// numBytes returns the number of bytes left to read in the file's data in the tar archive.
+func (rfr *regFileReader) numBytes() int64 {
+ return rfr.nb
+}
+
+// newSparseFileReader creates a new sparseFileReader, but validates all of the
+// sparse entries before doing so.
+func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) {
+ if total < 0 {
+ return nil, ErrHeader // Total size cannot be negative
+ }
+
+ // Validate all sparse entries. These are the same checks as performed by
+ // the BSD tar utility.
+ for i, s := range sp {
+ switch {
+ case s.offset < 0 || s.numBytes < 0:
+ return nil, ErrHeader // Negative values are never okay
+ case s.offset > math.MaxInt64-s.numBytes:
+ return nil, ErrHeader // Integer overflow with large length
+ case s.offset+s.numBytes > total:
+ return nil, ErrHeader // Region extends beyond the "real" size
+ case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset:
+ return nil, ErrHeader // Regions can't overlap and must be in order
+ }
+ }
+ return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil
+}
+
+// readHole reads a sparse hole ending at endOffset.
+func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int {
+ n64 := endOffset - sfr.pos
+ if n64 > int64(len(b)) {
+ n64 = int64(len(b))
+ }
+ n := int(n64)
+ for i := 0; i < n; i++ {
+ b[i] = 0
+ }
+ sfr.pos += n64
+ return n
+}
+
+// Read reads the sparse file data in expanded form.
+func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
+ // Skip past all empty fragments.
+ for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 {
+ sfr.sp = sfr.sp[1:]
+ }
+
+ // If there are no more fragments, then it is possible that there
+ // is one last sparse hole.
+ if len(sfr.sp) == 0 {
+ // This behavior matches the BSD tar utility.
+ // However, GNU tar stops returning data even if sfr.total is unmet.
+ if sfr.pos < sfr.total {
+ return sfr.readHole(b, sfr.total), nil
+ }
+ return 0, io.EOF
+ }
+
+ // In front of a data fragment, so read a hole.
+ if sfr.pos < sfr.sp[0].offset {
+ return sfr.readHole(b, sfr.sp[0].offset), nil
+ }
+
+ // In a data fragment, so read from it.
+ // This math is overflow free since we verify that offset and numBytes can
+ // be safely added when creating the sparseFileReader.
+ endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment
+ bytesLeft := endPos - sfr.pos // Bytes left in fragment
+ if int64(len(b)) > bytesLeft {
+ b = b[:bytesLeft]
+ }
+
+ n, err = sfr.rfr.Read(b)
+ sfr.pos += int64(n)
+ if err == io.EOF {
+ if sfr.pos < endPos {
+ err = io.ErrUnexpectedEOF // There was supposed to be more data
+ } else if sfr.pos < sfr.total {
+ err = nil // There is still an implicit sparse hole at the end
+ }
+ }
+
+ if sfr.pos == endPos {
+ sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it
+ }
+ return n, err
+}
+
+// numBytes returns the number of bytes left to read in the sparse file's
+// sparse-encoded data in the tar archive.
+func (sfr *sparseFileReader) numBytes() int64 {
+ return sfr.rfr.numBytes()
+}
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go
new file mode 100644
index 000000000..cf9cc79c5
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux dragonfly openbsd solaris
+
+package tar
+
+import (
+ "syscall"
+ "time"
+)
+
+func statAtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Atim.Unix())
+}
+
+func statCtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Ctim.Unix())
+}
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go
new file mode 100644
index 000000000..6f17dbe30
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd netbsd
+
+package tar
+
+import (
+ "syscall"
+ "time"
+)
+
+func statAtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Atimespec.Unix())
+}
+
+func statCtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Ctimespec.Unix())
+}
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go
new file mode 100644
index 000000000..cb843db4c
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go
@@ -0,0 +1,32 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin dragonfly freebsd openbsd netbsd solaris
+
+package tar
+
+import (
+ "os"
+ "syscall"
+)
+
+func init() {
+ sysStat = statUnix
+}
+
+func statUnix(fi os.FileInfo, h *Header) error {
+ sys, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ return nil
+ }
+ h.Uid = int(sys.Uid)
+ h.Gid = int(sys.Gid)
+ // TODO(bradfitz): populate username & group. os/user
+ // doesn't cache LookupId lookups, and lacks group
+ // lookup functions.
+ h.AccessTime = statAtime(sys)
+ h.ChangeTime = statCtime(sys)
+ // TODO(bradfitz): major/minor device numbers?
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go b/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go
new file mode 100644
index 000000000..30d7e606d
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go
@@ -0,0 +1,444 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+// TODO(dsymonds):
+// - catch more errors (no first header, etc.)
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ ErrWriteTooLong = errors.New("archive/tar: write too long")
+ ErrFieldTooLong = errors.New("archive/tar: header field too long")
+ ErrWriteAfterClose = errors.New("archive/tar: write after close")
+ errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
+)
+
+// A Writer provides sequential writing of a tar archive in POSIX.1 format.
+// A tar archive consists of a sequence of files.
+// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
+// writing at most hdr.Size bytes in total.
+type Writer struct {
+ w io.Writer
+ err error
+ nb int64 // number of unwritten bytes for current file entry
+ pad int64 // amount of padding to write after current file entry
+ closed bool
+ usedBinary bool // whether the binary numeric field extension was used
+ preferPax bool // use pax header instead of binary numeric header
+ hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
+ paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
+}
+
+type formatter struct {
+ err error // Last error seen
+}
+
+// NewWriter creates a new Writer writing to w.
+func NewWriter(w io.Writer) *Writer { return &Writer{w: w, preferPax: true} }
+
+// Flush finishes writing the current file (optional).
+func (tw *Writer) Flush() error {
+ if tw.nb > 0 {
+ tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
+ return tw.err
+ }
+
+ n := tw.nb + tw.pad
+ for n > 0 && tw.err == nil {
+ nr := n
+ if nr > blockSize {
+ nr = blockSize
+ }
+ var nw int
+ nw, tw.err = tw.w.Write(zeroBlock[0:nr])
+ n -= int64(nw)
+ }
+ tw.nb = 0
+ tw.pad = 0
+ return tw.err
+}
+
+// Write s into b, terminating it with a NUL if there is room.
+func (f *formatter) formatString(b []byte, s string) {
+ if len(s) > len(b) {
+ f.err = ErrFieldTooLong
+ return
+ }
+ ascii := toASCII(s)
+ copy(b, ascii)
+ if len(ascii) < len(b) {
+ b[len(ascii)] = 0
+ }
+}
+
+// Encode x as an octal ASCII string and write it into b with leading zeros.
+func (f *formatter) formatOctal(b []byte, x int64) {
+ s := strconv.FormatInt(x, 8)
+ // leading zeros, but leave room for a NUL.
+ for len(s)+1 < len(b) {
+ s = "0" + s
+ }
+ f.formatString(b, s)
+}
+
+// fitsInBase256 reports whether x can be encoded into n bytes using base-256
+// encoding. Unlike octal encoding, base-256 encoding does not require that the
+// string ends with a NUL character. Thus, all n bytes are available for output.
+//
+// If operating in binary mode, this assumes strict GNU binary mode; which means
+// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
+// equivalent to the sign bit in two's complement form.
+func fitsInBase256(n int, x int64) bool {
+ var binBits = uint(n-1) * 8
+ return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
+}
+
+// Write x into b, as binary (GNUtar/star extension).
+func (f *formatter) formatNumeric(b []byte, x int64) {
+ if fitsInBase256(len(b), x) {
+ for i := len(b) - 1; i >= 0; i-- {
+ b[i] = byte(x)
+ x >>= 8
+ }
+ b[0] |= 0x80 // Highest bit indicates binary format
+ return
+ }
+
+ f.formatOctal(b, 0) // Last resort, just write zero
+ f.err = ErrFieldTooLong
+}
+
+var (
+ minTime = time.Unix(0, 0)
+ // There is room for 11 octal digits (33 bits) of mtime.
+ maxTime = minTime.Add((1<<33 - 1) * time.Second)
+)
+
+// WriteHeader writes hdr and prepares to accept the file's contents.
+// WriteHeader calls Flush if it is not the first header.
+// Calling after a Close will return ErrWriteAfterClose.
+func (tw *Writer) WriteHeader(hdr *Header) error {
+ return tw.writeHeader(hdr, true)
+}
+
+// WriteHeader writes hdr and prepares to accept the file's contents.
+// WriteHeader calls Flush if it is not the first header.
+// Calling after a Close will return ErrWriteAfterClose.
+// As this method is called internally by writePax header to allow it to
+// suppress writing the pax header.
+func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
+ if tw.closed {
+ return ErrWriteAfterClose
+ }
+ if tw.err == nil {
+ tw.Flush()
+ }
+ if tw.err != nil {
+ return tw.err
+ }
+
+ // a map to hold pax header records, if any are needed
+ paxHeaders := make(map[string]string)
+
+ // TODO(shanemhansen): we might want to use PAX headers for
+ // subsecond time resolution, but for now let's just capture
+ // too long fields or non ascii characters
+
+ var f formatter
+ var header []byte
+
+ // We need to select which scratch buffer to use carefully,
+ // since this method is called recursively to write PAX headers.
+ // If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
+ // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
+ // already being used by the non-recursive call, so we must use paxHdrBuff.
+ header = tw.hdrBuff[:]
+ if !allowPax {
+ header = tw.paxHdrBuff[:]
+ }
+ copy(header, zeroBlock)
+ s := slicer(header)
+
+ // Wrappers around formatter that automatically sets paxHeaders if the
+ // argument extends beyond the capacity of the input byte slice.
+ var formatString = func(b []byte, s string, paxKeyword string) {
+ needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
+ if needsPaxHeader {
+ paxHeaders[paxKeyword] = s
+ return
+ }
+ f.formatString(b, s)
+ }
+ var formatNumeric = func(b []byte, x int64, paxKeyword string) {
+ // Try octal first.
+ s := strconv.FormatInt(x, 8)
+ if len(s) < len(b) {
+ f.formatOctal(b, x)
+ return
+ }
+
+ // If it is too long for octal, and PAX is preferred, use a PAX header.
+ if paxKeyword != paxNone && tw.preferPax {
+ f.formatOctal(b, 0)
+ s := strconv.FormatInt(x, 10)
+ paxHeaders[paxKeyword] = s
+ return
+ }
+
+ tw.usedBinary = true
+ f.formatNumeric(b, x)
+ }
+ var formatTime = func(b []byte, t time.Time, paxKeyword string) {
+ var unixTime int64
+ if !t.Before(minTime) && !t.After(maxTime) {
+ unixTime = t.Unix()
+ }
+ formatNumeric(b, unixTime, paxNone)
+
+ // Write a PAX header if the time didn't fit precisely.
+ if paxKeyword != "" && tw.preferPax && allowPax && (t.Nanosecond() != 0 || !t.Before(minTime) || !t.After(maxTime)) {
+ paxHeaders[paxKeyword] = formatPAXTime(t)
+ }
+ }
+
+ // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
+ pathHeaderBytes := s.next(fileNameSize)
+
+ formatString(pathHeaderBytes, hdr.Name, paxPath)
+
+ f.formatOctal(s.next(8), hdr.Mode) // 100:108
+ formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116
+ formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124
+ formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136
+ formatTime(s.next(12), hdr.ModTime, paxMtime) // 136:148
+ s.next(8) // chksum (148:156)
+ s.next(1)[0] = hdr.Typeflag // 156:157
+
+ formatString(s.next(100), hdr.Linkname, paxLinkpath)
+
+ copy(s.next(8), []byte("ustar\x0000")) // 257:265
+ formatString(s.next(32), hdr.Uname, paxUname) // 265:297
+ formatString(s.next(32), hdr.Gname, paxGname) // 297:329
+ formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337
+ formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345
+
+ // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
+ prefixHeaderBytes := s.next(155)
+ formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix
+
+ // Use the GNU magic instead of POSIX magic if we used any GNU extensions.
+ if tw.usedBinary {
+ copy(header[257:265], []byte("ustar \x00"))
+ }
+
+ _, paxPathUsed := paxHeaders[paxPath]
+ // try to use a ustar header when only the name is too long
+ if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
+ prefix, suffix, ok := splitUSTARPath(hdr.Name)
+ if ok {
+ // Since we can encode in USTAR format, disable PAX header.
+ delete(paxHeaders, paxPath)
+
+ // Update the path fields
+ formatString(pathHeaderBytes, suffix, paxNone)
+ formatString(prefixHeaderBytes, prefix, paxNone)
+ }
+ }
+
+ // The chksum field is terminated by a NUL and a space.
+ // This is different from the other octal fields.
+ chksum, _ := checksum(header)
+ f.formatOctal(header[148:155], chksum) // Never fails
+ header[155] = ' '
+
+ // Check if there were any formatting errors.
+ if f.err != nil {
+ tw.err = f.err
+ return tw.err
+ }
+
+ if allowPax {
+ if !hdr.AccessTime.IsZero() {
+ paxHeaders[paxAtime] = formatPAXTime(hdr.AccessTime)
+ }
+ if !hdr.ChangeTime.IsZero() {
+ paxHeaders[paxCtime] = formatPAXTime(hdr.ChangeTime)
+ }
+ if !hdr.CreationTime.IsZero() {
+ paxHeaders[paxCreationTime] = formatPAXTime(hdr.CreationTime)
+ }
+ for k, v := range hdr.Xattrs {
+ paxHeaders[paxXattr+k] = v
+ }
+ for k, v := range hdr.Winheaders {
+ paxHeaders[paxWindows+k] = v
+ }
+ }
+
+ if len(paxHeaders) > 0 {
+ if !allowPax {
+ return errInvalidHeader
+ }
+ if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
+ return err
+ }
+ }
+ tw.nb = int64(hdr.Size)
+ tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
+
+ _, tw.err = tw.w.Write(header)
+ return tw.err
+}
+
+func formatPAXTime(t time.Time) string {
+ sec := t.Unix()
+ usec := t.Nanosecond()
+ s := strconv.FormatInt(sec, 10)
+ if usec != 0 {
+ s = fmt.Sprintf("%s.%09d", s, usec)
+ }
+ return s
+}
+
+// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
+// If the path is not splittable, then it will return ("", "", false).
+func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
+ length := len(name)
+ if length <= fileNameSize || !isASCII(name) {
+ return "", "", false
+ } else if length > fileNamePrefixSize+1 {
+ length = fileNamePrefixSize + 1
+ } else if name[length-1] == '/' {
+ length--
+ }
+
+ i := strings.LastIndex(name[:length], "/")
+ nlen := len(name) - i - 1 // nlen is length of suffix
+ plen := i // plen is length of prefix
+ if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
+ return "", "", false
+ }
+ return name[:i], name[i+1:], true
+}
+
+// writePaxHeader writes an extended pax header to the
+// archive.
+func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
+ // Prepare extended header
+ ext := new(Header)
+ ext.Typeflag = TypeXHeader
+ // Setting ModTime is required for reader parsing to
+ // succeed, and seems harmless enough.
+ ext.ModTime = hdr.ModTime
+ // The spec asks that we namespace our pseudo files
+ // with the current pid. However, this results in differing outputs
+ // for identical inputs. As such, the constant 0 is now used instead.
+ // golang.org/issue/12358
+ dir, file := path.Split(hdr.Name)
+ fullName := path.Join(dir, "PaxHeaders.0", file)
+
+ ascii := toASCII(fullName)
+ if len(ascii) > 100 {
+ ascii = ascii[:100]
+ }
+ ext.Name = ascii
+ // Construct the body
+ var buf bytes.Buffer
+
+ // Keys are sorted before writing to body to allow deterministic output.
+ var keys []string
+ for k := range paxHeaders {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
+ }
+
+ ext.Size = int64(len(buf.Bytes()))
+ if err := tw.writeHeader(ext, false); err != nil {
+ return err
+ }
+ if _, err := tw.Write(buf.Bytes()); err != nil {
+ return err
+ }
+ if err := tw.Flush(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// formatPAXRecord formats a single PAX record, prefixing it with the
+// appropriate length.
+func formatPAXRecord(k, v string) string {
+ const padding = 3 // Extra padding for ' ', '=', and '\n'
+ size := len(k) + len(v) + padding
+ size += len(strconv.Itoa(size))
+ record := fmt.Sprintf("%d %s=%s\n", size, k, v)
+
+ // Final adjustment if adding size field increased the record size.
+ if len(record) != size {
+ size = len(record)
+ record = fmt.Sprintf("%d %s=%s\n", size, k, v)
+ }
+ return record
+}
+
+// Write writes to the current entry in the tar archive.
+// Write returns the error ErrWriteTooLong if more than
+// hdr.Size bytes are written after WriteHeader.
+func (tw *Writer) Write(b []byte) (n int, err error) {
+ if tw.closed {
+ err = ErrWriteAfterClose
+ return
+ }
+ overwrite := false
+ if int64(len(b)) > tw.nb {
+ b = b[0:tw.nb]
+ overwrite = true
+ }
+ n, err = tw.w.Write(b)
+ tw.nb -= int64(n)
+ if err == nil && overwrite {
+ err = ErrWriteTooLong
+ return
+ }
+ tw.err = err
+ return
+}
+
+// Close closes the tar archive, flushing any unwritten
+// data to the underlying writer.
+func (tw *Writer) Close() error {
+ if tw.err != nil || tw.closed {
+ return tw.err
+ }
+ tw.Flush()
+ tw.closed = true
+ if tw.err != nil {
+ return tw.err
+ }
+
+ // trailer: two zero blocks
+ for i := 0; i < 2; i++ {
+ _, tw.err = tw.w.Write(zeroBlock)
+ if tw.err != nil {
+ break
+ }
+ }
+ return tw.err
+}
diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go
new file mode 100644
index 000000000..2be34af43
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/backup.go
@@ -0,0 +1,280 @@
+// +build windows
+
+package winio
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "syscall"
+ "unicode/utf16"
+)
+
+//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
+//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
+
+const (
+ BackupData = uint32(iota + 1)
+ BackupEaData
+ BackupSecurity
+ BackupAlternateData
+ BackupLink
+ BackupPropertyData
+ BackupObjectId
+ BackupReparseData
+ BackupSparseBlock
+ BackupTxfsData
+)
+
+const (
+ StreamSparseAttributes = uint32(8)
+)
+
+const (
+ WRITE_DAC = 0x40000
+ WRITE_OWNER = 0x80000
+ ACCESS_SYSTEM_SECURITY = 0x1000000
+)
+
+// BackupHeader represents a backup stream of a file.
+type BackupHeader struct {
+ Id uint32 // The backup stream ID
+ Attributes uint32 // Stream attributes
+ Size int64 // The size of the stream in bytes
+ Name string // The name of the stream (for BackupAlternateData only).
+ Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
+}
+
+type win32StreamId struct {
+ StreamId uint32
+ Attributes uint32
+ Size uint64
+ NameSize uint32
+}
+
+// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series
+// of BackupHeader values.
+type BackupStreamReader struct {
+ r io.Reader
+ bytesLeft int64
+}
+
+// NewBackupStreamReader produces a BackupStreamReader from any io.Reader.
+func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
+ return &BackupStreamReader{r, 0}
+}
+
+// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
+// it was not completely read.
+func (r *BackupStreamReader) Next() (*BackupHeader, error) {
+ if r.bytesLeft > 0 {
+ if s, ok := r.r.(io.Seeker); ok {
+ // Make sure Seek on io.SeekCurrent sometimes succeeds
+ // before trying the actual seek.
+ if _, err := s.Seek(0, io.SeekCurrent); err == nil {
+ if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil {
+ return nil, err
+ }
+ r.bytesLeft = 0
+ }
+ }
+ if _, err := io.Copy(ioutil.Discard, r); err != nil {
+ return nil, err
+ }
+ }
+ var wsi win32StreamId
+ if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
+ return nil, err
+ }
+ hdr := &BackupHeader{
+ Id: wsi.StreamId,
+ Attributes: wsi.Attributes,
+ Size: int64(wsi.Size),
+ }
+ if wsi.NameSize != 0 {
+ name := make([]uint16, int(wsi.NameSize/2))
+ if err := binary.Read(r.r, binary.LittleEndian, name); err != nil {
+ return nil, err
+ }
+ hdr.Name = syscall.UTF16ToString(name)
+ }
+ if wsi.StreamId == BackupSparseBlock {
+ if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
+ return nil, err
+ }
+ hdr.Size -= 8
+ }
+ r.bytesLeft = hdr.Size
+ return hdr, nil
+}
+
+// Read reads from the current backup stream.
+func (r *BackupStreamReader) Read(b []byte) (int, error) {
+ if r.bytesLeft == 0 {
+ return 0, io.EOF
+ }
+ if int64(len(b)) > r.bytesLeft {
+ b = b[:r.bytesLeft]
+ }
+ n, err := r.r.Read(b)
+ r.bytesLeft -= int64(n)
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ } else if r.bytesLeft == 0 && err == nil {
+ err = io.EOF
+ }
+ return n, err
+}
+
+// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API.
+type BackupStreamWriter struct {
+ w io.Writer
+ bytesLeft int64
+}
+
+// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer.
+func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter {
+ return &BackupStreamWriter{w, 0}
+}
+
+// WriteHeader writes the next backup stream header and prepares for calls to Write().
+func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
+ if w.bytesLeft != 0 {
+ return fmt.Errorf("missing %d bytes", w.bytesLeft)
+ }
+ name := utf16.Encode([]rune(hdr.Name))
+ wsi := win32StreamId{
+ StreamId: hdr.Id,
+ Attributes: hdr.Attributes,
+ Size: uint64(hdr.Size),
+ NameSize: uint32(len(name) * 2),
+ }
+ if hdr.Id == BackupSparseBlock {
+ // Include space for the int64 block offset
+ wsi.Size += 8
+ }
+ if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil {
+ return err
+ }
+ if len(name) != 0 {
+ if err := binary.Write(w.w, binary.LittleEndian, name); err != nil {
+ return err
+ }
+ }
+ if hdr.Id == BackupSparseBlock {
+ if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil {
+ return err
+ }
+ }
+ w.bytesLeft = hdr.Size
+ return nil
+}
+
+// Write writes to the current backup stream.
+func (w *BackupStreamWriter) Write(b []byte) (int, error) {
+ if w.bytesLeft < int64(len(b)) {
+ return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft)
+ }
+ n, err := w.w.Write(b)
+ w.bytesLeft -= int64(n)
+ return n, err
+}
+
+// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API.
+type BackupFileReader struct {
+ f *os.File
+ includeSecurity bool
+ ctx uintptr
+}
+
+// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true,
+// Read will attempt to read the security descriptor of the file.
+func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader {
+ r := &BackupFileReader{f, includeSecurity, 0}
+ return r
+}
+
+// Read reads a backup stream from the file by calling the Win32 API BackupRead().
+func (r *BackupFileReader) Read(b []byte) (int, error) {
+ var bytesRead uint32
+ err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
+ if err != nil {
+ return 0, &os.PathError{"BackupRead", r.f.Name(), err}
+ }
+ runtime.KeepAlive(r.f)
+ if bytesRead == 0 {
+ return 0, io.EOF
+ }
+ return int(bytesRead), nil
+}
+
+// Close frees Win32 resources associated with the BackupFileReader. It does not close
+// the underlying file.
+func (r *BackupFileReader) Close() error {
+ if r.ctx != 0 {
+ backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
+ runtime.KeepAlive(r.f)
+ r.ctx = 0
+ }
+ return nil
+}
+
+// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API.
+type BackupFileWriter struct {
+ f *os.File
+ includeSecurity bool
+ ctx uintptr
+}
+
+// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true,
+// Write() will attempt to restore the security descriptor from the stream.
+func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
+ w := &BackupFileWriter{f, includeSecurity, 0}
+ return w
+}
+
+// Write restores a portion of the file using the provided backup stream.
+func (w *BackupFileWriter) Write(b []byte) (int, error) {
+ var bytesWritten uint32
+ err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
+ if err != nil {
+ return 0, &os.PathError{"BackupWrite", w.f.Name(), err}
+ }
+ runtime.KeepAlive(w.f)
+ if int(bytesWritten) != len(b) {
+ return int(bytesWritten), errors.New("not all bytes could be written")
+ }
+ return len(b), nil
+}
+
+// Close frees Win32 resources associated with the BackupFileWriter. It does not
+// close the underlying file.
+func (w *BackupFileWriter) Close() error {
+ if w.ctx != 0 {
+ backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
+ runtime.KeepAlive(w.f)
+ w.ctx = 0
+ }
+ return nil
+}
+
+// OpenForBackup opens a file or directory, potentially skipping access checks if the backup
+// or restore privileges have been acquired.
+//
+// If the file opened was a directory, it cannot be used with Readdir().
+func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) {
+ winPath, err := syscall.UTF16FromString(path)
+ if err != nil {
+ return nil, err
+ }
+ h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0)
+ if err != nil {
+ err = &os.PathError{Op: "open", Path: path, Err: err}
+ return nil, err
+ }
+ return os.NewFile(uintptr(h), path), nil
+}
diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/noop.go b/vendor/github.com/Microsoft/go-winio/backuptar/noop.go
new file mode 100644
index 000000000..d39eccf02
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/backuptar/noop.go
@@ -0,0 +1,4 @@
+// +build !windows
+// This file only exists to allow go get on non-Windows platforms.
+
+package backuptar
diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go
new file mode 100644
index 000000000..53da908f1
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go
@@ -0,0 +1,439 @@
+// +build windows
+
+package backuptar
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/Microsoft/go-winio"
+ "github.com/Microsoft/go-winio/archive/tar" // until archive/tar supports pax extensions in its interface
+)
+
+const (
+ c_ISUID = 04000 // Set uid
+ c_ISGID = 02000 // Set gid
+ c_ISVTX = 01000 // Save text (sticky bit)
+ c_ISDIR = 040000 // Directory
+ c_ISFIFO = 010000 // FIFO
+ c_ISREG = 0100000 // Regular file
+ c_ISLNK = 0120000 // Symbolic link
+ c_ISBLK = 060000 // Block special file
+ c_ISCHR = 020000 // Character special file
+ c_ISSOCK = 0140000 // Socket
+)
+
+const (
+ hdrFileAttributes = "fileattr"
+ hdrSecurityDescriptor = "sd"
+ hdrRawSecurityDescriptor = "rawsd"
+ hdrMountPoint = "mountpoint"
+ hdrEaPrefix = "xattr."
+)
+
+func writeZeroes(w io.Writer, count int64) error {
+ buf := make([]byte, 8192)
+ c := len(buf)
+ for i := int64(0); i < count; i += int64(c) {
+ if int64(c) > count-i {
+ c = int(count - i)
+ }
+ _, err := w.Write(buf[:c])
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
+ curOffset := int64(0)
+ for {
+ bhdr, err := br.Next()
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ if err != nil {
+ return err
+ }
+ if bhdr.Id != winio.BackupSparseBlock {
+ return fmt.Errorf("unexpected stream %d", bhdr.Id)
+ }
+
+ // archive/tar does not support writing sparse files
+ // so just write zeroes to catch up to the current offset.
+ err = writeZeroes(t, bhdr.Offset-curOffset)
+ if bhdr.Size == 0 {
+ break
+ }
+ n, err := io.Copy(t, br)
+ if err != nil {
+ return err
+ }
+ curOffset = bhdr.Offset + n
+ }
+ return nil
+}
+
+// BasicInfoHeader creates a tar header from basic file information.
+func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header {
+ hdr := &tar.Header{
+ Name: filepath.ToSlash(name),
+ Size: size,
+ Typeflag: tar.TypeReg,
+ ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()),
+ ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()),
+ AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()),
+ CreationTime: time.Unix(0, fileInfo.CreationTime.Nanoseconds()),
+ Winheaders: make(map[string]string),
+ }
+ hdr.Winheaders[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
+
+ if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
+ hdr.Mode |= c_ISDIR
+ hdr.Size = 0
+ hdr.Typeflag = tar.TypeDir
+ }
+ return hdr
+}
+
+// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream.
+//
+// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS.
+//
+// The additional Win32 metadata is:
+//
+// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value
+//
+// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format
+//
+// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink)
+func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error {
+ name = filepath.ToSlash(name)
+ hdr := BasicInfoHeader(name, size, fileInfo)
+
+ // If r can be seeked, then this function is two-pass: pass 1 collects the
+ // tar header data, and pass 2 copies the data stream. If r cannot be
+ // seeked, then some header data (in particular EAs) will be silently lost.
+ var (
+ restartPos int64
+ err error
+ )
+ sr, readTwice := r.(io.Seeker)
+ if readTwice {
+ if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil {
+ readTwice = false
+ }
+ }
+
+ br := winio.NewBackupStreamReader(r)
+ var dataHdr *winio.BackupHeader
+ for dataHdr == nil {
+ bhdr, err := br.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ switch bhdr.Id {
+ case winio.BackupData:
+ hdr.Mode |= c_ISREG
+ if !readTwice {
+ dataHdr = bhdr
+ }
+ case winio.BackupSecurity:
+ sd, err := ioutil.ReadAll(br)
+ if err != nil {
+ return err
+ }
+ hdr.Winheaders[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
+
+ case winio.BackupReparseData:
+ hdr.Mode |= c_ISLNK
+ hdr.Typeflag = tar.TypeSymlink
+ reparseBuffer, err := ioutil.ReadAll(br)
+ rp, err := winio.DecodeReparsePoint(reparseBuffer)
+ if err != nil {
+ return err
+ }
+ if rp.IsMountPoint {
+ hdr.Winheaders[hdrMountPoint] = "1"
+ }
+ hdr.Linkname = rp.Target
+
+ case winio.BackupEaData:
+ eab, err := ioutil.ReadAll(br)
+ if err != nil {
+ return err
+ }
+ eas, err := winio.DecodeExtendedAttributes(eab)
+ if err != nil {
+ return err
+ }
+ for _, ea := range eas {
+ // Use base64 encoding for the binary value. Note that there
+ // is no way to encode the EA's flags, since their use doesn't
+ // make any sense for persisted EAs.
+ hdr.Winheaders[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value)
+ }
+
+ case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
+ // ignore these streams
+ default:
+ return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id)
+ }
+ }
+
+ err = t.WriteHeader(hdr)
+ if err != nil {
+ return err
+ }
+
+ if readTwice {
+ // Get back to the data stream.
+ if _, err = sr.Seek(restartPos, io.SeekStart); err != nil {
+ return err
+ }
+ for dataHdr == nil {
+ bhdr, err := br.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ if bhdr.Id == winio.BackupData {
+ dataHdr = bhdr
+ }
+ }
+ }
+
+ if dataHdr != nil {
+ // A data stream was found. Copy the data.
+ if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 {
+ if size != dataHdr.Size {
+ return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size)
+ }
+ _, err = io.Copy(t, br)
+ if err != nil {
+ return err
+ }
+ } else {
+ err = copySparse(t, br)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ // Look for streams after the data stream. The only ones we handle are alternate data streams.
+ // Other streams may have metadata that could be serialized, but the tar header has already
+ // been written. In practice, this means that we don't get EA or TXF metadata.
+ for {
+ bhdr, err := br.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ switch bhdr.Id {
+ case winio.BackupAlternateData:
+ altName := bhdr.Name
+ if strings.HasSuffix(altName, ":$DATA") {
+ altName = altName[:len(altName)-len(":$DATA")]
+ }
+ if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 {
+ hdr = &tar.Header{
+ Name: name + altName,
+ Mode: hdr.Mode,
+ Typeflag: tar.TypeReg,
+ Size: bhdr.Size,
+ ModTime: hdr.ModTime,
+ AccessTime: hdr.AccessTime,
+ ChangeTime: hdr.ChangeTime,
+ }
+ err = t.WriteHeader(hdr)
+ if err != nil {
+ return err
+ }
+ _, err = io.Copy(t, br)
+ if err != nil {
+ return err
+ }
+
+ } else {
+ // Unsupported for now, since the size of the alternate stream is not present
+ // in the backup stream until after the data has been read.
+ return errors.New("tar of sparse alternate data streams is unsupported")
+ }
+ case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
+ // ignore these streams
+ default:
+ return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id)
+ }
+ }
+ return nil
+}
+
+// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by
+// WriteTarFileFromBackupStream.
+func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) {
+ name = hdr.Name
+ if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
+ size = hdr.Size
+ }
+ fileInfo = &winio.FileBasicInfo{
+ LastAccessTime: syscall.NsecToFiletime(hdr.AccessTime.UnixNano()),
+ LastWriteTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()),
+ ChangeTime: syscall.NsecToFiletime(hdr.ChangeTime.UnixNano()),
+ CreationTime: syscall.NsecToFiletime(hdr.CreationTime.UnixNano()),
+ }
+ if attrStr, ok := hdr.Winheaders[hdrFileAttributes]; ok {
+ attr, err := strconv.ParseUint(attrStr, 10, 32)
+ if err != nil {
+ return "", 0, nil, err
+ }
+ fileInfo.FileAttributes = uintptr(attr)
+ } else {
+ if hdr.Typeflag == tar.TypeDir {
+ fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
+ }
+ }
+ return
+}
+
+// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple
+// tar file entries in order to collect all the alternate data streams for the file, it returns the next
+// tar file that was not processed, or io.EOF is there are no more.
+func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) {
+ bw := winio.NewBackupStreamWriter(w)
+ var sd []byte
+ var err error
+ // Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written
+ // by this library will have raw binary for the security descriptor.
+ if sddl, ok := hdr.Winheaders[hdrSecurityDescriptor]; ok {
+ sd, err = winio.SddlToSecurityDescriptor(sddl)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if sdraw, ok := hdr.Winheaders[hdrRawSecurityDescriptor]; ok {
+ sd, err = base64.StdEncoding.DecodeString(sdraw)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if len(sd) != 0 {
+ bhdr := winio.BackupHeader{
+ Id: winio.BackupSecurity,
+ Size: int64(len(sd)),
+ }
+ err := bw.WriteHeader(&bhdr)
+ if err != nil {
+ return nil, err
+ }
+ _, err = bw.Write(sd)
+ if err != nil {
+ return nil, err
+ }
+ }
+ var eas []winio.ExtendedAttribute
+ for k, v := range hdr.Winheaders {
+ if !strings.HasPrefix(k, hdrEaPrefix) {
+ continue
+ }
+ data, err := base64.StdEncoding.DecodeString(v)
+ if err != nil {
+ return nil, err
+ }
+ eas = append(eas, winio.ExtendedAttribute{
+ Name: k[len(hdrEaPrefix):],
+ Value: data,
+ })
+ }
+ if len(eas) != 0 {
+ eadata, err := winio.EncodeExtendedAttributes(eas)
+ if err != nil {
+ return nil, err
+ }
+ bhdr := winio.BackupHeader{
+ Id: winio.BackupEaData,
+ Size: int64(len(eadata)),
+ }
+ err = bw.WriteHeader(&bhdr)
+ if err != nil {
+ return nil, err
+ }
+ _, err = bw.Write(eadata)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if hdr.Typeflag == tar.TypeSymlink {
+ _, isMountPoint := hdr.Winheaders[hdrMountPoint]
+ rp := winio.ReparsePoint{
+ Target: filepath.FromSlash(hdr.Linkname),
+ IsMountPoint: isMountPoint,
+ }
+ reparse := winio.EncodeReparsePoint(&rp)
+ bhdr := winio.BackupHeader{
+ Id: winio.BackupReparseData,
+ Size: int64(len(reparse)),
+ }
+ err := bw.WriteHeader(&bhdr)
+ if err != nil {
+ return nil, err
+ }
+ _, err = bw.Write(reparse)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
+ bhdr := winio.BackupHeader{
+ Id: winio.BackupData,
+ Size: hdr.Size,
+ }
+ err := bw.WriteHeader(&bhdr)
+ if err != nil {
+ return nil, err
+ }
+ _, err = io.Copy(bw, t)
+ if err != nil {
+ return nil, err
+ }
+ }
+ // Copy all the alternate data streams and return the next non-ADS header.
+ for {
+ ahdr, err := t.Next()
+ if err != nil {
+ return nil, err
+ }
+ if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") {
+ return ahdr, nil
+ }
+ bhdr := winio.BackupHeader{
+ Id: winio.BackupAlternateData,
+ Size: ahdr.Size,
+ Name: ahdr.Name[len(hdr.Name):] + ":$DATA",
+ }
+ err = bw.WriteHeader(&bhdr)
+ if err != nil {
+ return nil, err
+ }
+ _, err = io.Copy(bw, t)
+ if err != nil {
+ return nil, err
+ }
+ }
+}
diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go
new file mode 100644
index 000000000..b37e930d6
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/ea.go
@@ -0,0 +1,137 @@
+package winio
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+)
+
+type fileFullEaInformation struct {
+ NextEntryOffset uint32
+ Flags uint8
+ NameLength uint8
+ ValueLength uint16
+}
+
+var (
+ fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
+
+ errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
+ errEaNameTooLarge = errors.New("extended attribute name too large")
+ errEaValueTooLarge = errors.New("extended attribute value too large")
+)
+
+// ExtendedAttribute represents a single Windows EA.
+type ExtendedAttribute struct {
+ Name string
+ Value []byte
+ Flags uint8
+}
+
+func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
+ var info fileFullEaInformation
+ err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
+ if err != nil {
+ err = errInvalidEaBuffer
+ return
+ }
+
+ nameOffset := fileFullEaInformationSize
+ nameLen := int(info.NameLength)
+ valueOffset := nameOffset + int(info.NameLength) + 1
+ valueLen := int(info.ValueLength)
+ nextOffset := int(info.NextEntryOffset)
+ if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
+ err = errInvalidEaBuffer
+ return
+ }
+
+ ea.Name = string(b[nameOffset : nameOffset+nameLen])
+ ea.Value = b[valueOffset : valueOffset+valueLen]
+ ea.Flags = info.Flags
+ if info.NextEntryOffset != 0 {
+ nb = b[info.NextEntryOffset:]
+ }
+ return
+}
+
+// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
+// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
+func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
+ for len(b) != 0 {
+ ea, nb, err := parseEa(b)
+ if err != nil {
+ return nil, err
+ }
+
+ eas = append(eas, ea)
+ b = nb
+ }
+ return
+}
+
+func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
+ if int(uint8(len(ea.Name))) != len(ea.Name) {
+ return errEaNameTooLarge
+ }
+ if int(uint16(len(ea.Value))) != len(ea.Value) {
+ return errEaValueTooLarge
+ }
+ entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
+ withPadding := (entrySize + 3) &^ 3
+ nextOffset := uint32(0)
+ if !last {
+ nextOffset = withPadding
+ }
+ info := fileFullEaInformation{
+ NextEntryOffset: nextOffset,
+ Flags: ea.Flags,
+ NameLength: uint8(len(ea.Name)),
+ ValueLength: uint16(len(ea.Value)),
+ }
+
+ err := binary.Write(buf, binary.LittleEndian, &info)
+ if err != nil {
+ return err
+ }
+
+ _, err = buf.Write([]byte(ea.Name))
+ if err != nil {
+ return err
+ }
+
+ err = buf.WriteByte(0)
+ if err != nil {
+ return err
+ }
+
+ _, err = buf.Write(ea.Value)
+ if err != nil {
+ return err
+ }
+
+ _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
+// buffer for use with BackupWrite, ZwSetEaFile, etc.
+func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
+ var buf bytes.Buffer
+ for i := range eas {
+ last := false
+ if i == len(eas)-1 {
+ last = true
+ }
+
+ err := writeEa(&buf, &eas[i], last)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go
new file mode 100644
index 000000000..57ac3696a
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/file.go
@@ -0,0 +1,310 @@
+// +build windows
+
+package winio
+
+import (
+ "errors"
+ "io"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "time"
+)
+
+//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
+//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
+//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
+//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
+//sys timeBeginPeriod(period uint32) (n int32) = winmm.timeBeginPeriod
+
+type atomicBool int32
+
+func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
+func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
+func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
+func (b *atomicBool) swap(new bool) bool {
+ var newInt int32
+ if new {
+ newInt = 1
+ }
+ return atomic.SwapInt32((*int32)(b), newInt) == 1
+}
+
+const (
+ cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
+ cFILE_SKIP_SET_EVENT_ON_HANDLE = 2
+)
+
+var (
+ ErrFileClosed = errors.New("file has already been closed")
+ ErrTimeout = &timeoutError{}
+)
+
+type timeoutError struct{}
+
+func (e *timeoutError) Error() string { return "i/o timeout" }
+func (e *timeoutError) Timeout() bool { return true }
+func (e *timeoutError) Temporary() bool { return true }
+
+type timeoutChan chan struct{}
+
+var ioInitOnce sync.Once
+var ioCompletionPort syscall.Handle
+
+// ioResult contains the result of an asynchronous IO operation
+type ioResult struct {
+ bytes uint32
+ err error
+}
+
+// ioOperation represents an outstanding asynchronous Win32 IO
+type ioOperation struct {
+ o syscall.Overlapped
+ ch chan ioResult
+}
+
+func initIo() {
+ h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff)
+ if err != nil {
+ panic(err)
+ }
+ ioCompletionPort = h
+ go ioCompletionProcessor(h)
+}
+
+// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
+// It takes ownership of this handle and will close it if it is garbage collected.
+type win32File struct {
+ handle syscall.Handle
+ wg sync.WaitGroup
+ wgLock sync.RWMutex
+ closing atomicBool
+ readDeadline deadlineHandler
+ writeDeadline deadlineHandler
+}
+
+type deadlineHandler struct {
+ setLock sync.Mutex
+ channel timeoutChan
+ channelLock sync.RWMutex
+ timer *time.Timer
+ timedout atomicBool
+}
+
+// makeWin32File makes a new win32File from an existing file handle
+func makeWin32File(h syscall.Handle) (*win32File, error) {
+ f := &win32File{handle: h}
+ ioInitOnce.Do(initIo)
+ _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
+ if err != nil {
+ return nil, err
+ }
+ err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE)
+ if err != nil {
+ return nil, err
+ }
+ f.readDeadline.channel = make(timeoutChan)
+ f.writeDeadline.channel = make(timeoutChan)
+ return f, nil
+}
+
+func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
+ return makeWin32File(h)
+}
+
+// closeHandle closes the resources associated with a Win32 handle
+func (f *win32File) closeHandle() {
+ f.wgLock.Lock()
+ // Atomically set that we are closing, releasing the resources only once.
+ if !f.closing.swap(true) {
+ f.wgLock.Unlock()
+ // cancel all IO and wait for it to complete
+ cancelIoEx(f.handle, nil)
+ f.wg.Wait()
+ // at this point, no new IO can start
+ syscall.Close(f.handle)
+ f.handle = 0
+ } else {
+ f.wgLock.Unlock()
+ }
+}
+
+// Close closes a win32File.
+func (f *win32File) Close() error {
+ f.closeHandle()
+ return nil
+}
+
+// prepareIo prepares for a new IO operation.
+// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
+func (f *win32File) prepareIo() (*ioOperation, error) {
+ f.wgLock.RLock()
+ if f.closing.isSet() {
+ f.wgLock.RUnlock()
+ return nil, ErrFileClosed
+ }
+ f.wg.Add(1)
+ f.wgLock.RUnlock()
+ c := &ioOperation{}
+ c.ch = make(chan ioResult)
+ return c, nil
+}
+
+// ioCompletionProcessor processes completed async IOs forever
+func ioCompletionProcessor(h syscall.Handle) {
+ // Set the timer resolution to 1. This fixes a performance regression in golang 1.6.
+ timeBeginPeriod(1)
+ for {
+ var bytes uint32
+ var key uintptr
+ var op *ioOperation
+ err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE)
+ if op == nil {
+ panic(err)
+ }
+ op.ch <- ioResult{bytes, err}
+ }
+}
+
+// asyncIo processes the return value from ReadFile or WriteFile, blocking until
+// the operation has actually completed.
+func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
+ if err != syscall.ERROR_IO_PENDING {
+ return int(bytes), err
+ }
+
+ if f.closing.isSet() {
+ cancelIoEx(f.handle, &c.o)
+ }
+
+ var timeout timeoutChan
+ if d != nil {
+ d.channelLock.Lock()
+ timeout = d.channel
+ d.channelLock.Unlock()
+ }
+
+ var r ioResult
+ select {
+ case r = <-c.ch:
+ err = r.err
+ if err == syscall.ERROR_OPERATION_ABORTED {
+ if f.closing.isSet() {
+ err = ErrFileClosed
+ }
+ }
+ case <-timeout:
+ cancelIoEx(f.handle, &c.o)
+ r = <-c.ch
+ err = r.err
+ if err == syscall.ERROR_OPERATION_ABORTED {
+ err = ErrTimeout
+ }
+ }
+
+ // runtime.KeepAlive is needed, as c is passed via native
+ // code to ioCompletionProcessor, c must remain alive
+ // until the channel read is complete.
+ runtime.KeepAlive(c)
+ return int(r.bytes), err
+}
+
+// Read reads from a file handle.
+func (f *win32File) Read(b []byte) (int, error) {
+ c, err := f.prepareIo()
+ if err != nil {
+ return 0, err
+ }
+ defer f.wg.Done()
+
+ if f.readDeadline.timedout.isSet() {
+ return 0, ErrTimeout
+ }
+
+ var bytes uint32
+ err = syscall.ReadFile(f.handle, b, &bytes, &c.o)
+ n, err := f.asyncIo(c, &f.readDeadline, bytes, err)
+ runtime.KeepAlive(b)
+
+ // Handle EOF conditions.
+ if err == nil && n == 0 && len(b) != 0 {
+ return 0, io.EOF
+ } else if err == syscall.ERROR_BROKEN_PIPE {
+ return 0, io.EOF
+ } else {
+ return n, err
+ }
+}
+
+// Write writes to a file handle.
+func (f *win32File) Write(b []byte) (int, error) {
+ c, err := f.prepareIo()
+ if err != nil {
+ return 0, err
+ }
+ defer f.wg.Done()
+
+ if f.writeDeadline.timedout.isSet() {
+ return 0, ErrTimeout
+ }
+
+ var bytes uint32
+ err = syscall.WriteFile(f.handle, b, &bytes, &c.o)
+ n, err := f.asyncIo(c, &f.writeDeadline, bytes, err)
+ runtime.KeepAlive(b)
+ return n, err
+}
+
+func (f *win32File) SetReadDeadline(deadline time.Time) error {
+ return f.readDeadline.set(deadline)
+}
+
+func (f *win32File) SetWriteDeadline(deadline time.Time) error {
+ return f.writeDeadline.set(deadline)
+}
+
+func (f *win32File) Flush() error {
+ return syscall.FlushFileBuffers(f.handle)
+}
+
+func (d *deadlineHandler) set(deadline time.Time) error {
+ d.setLock.Lock()
+ defer d.setLock.Unlock()
+
+ if d.timer != nil {
+ if !d.timer.Stop() {
+ <-d.channel
+ }
+ d.timer = nil
+ }
+ d.timedout.setFalse()
+
+ select {
+ case <-d.channel:
+ d.channelLock.Lock()
+ d.channel = make(chan struct{})
+ d.channelLock.Unlock()
+ default:
+ }
+
+ if deadline.IsZero() {
+ return nil
+ }
+
+ timeoutIO := func() {
+ d.timedout.setTrue()
+ close(d.channel)
+ }
+
+ now := time.Now()
+ duration := deadline.Sub(now)
+ if deadline.After(now) {
+ // Deadline is in the future, set a timer to wait
+ d.timer = time.AfterFunc(duration, timeoutIO)
+ } else {
+ // Deadline is in the past. Cancel all pending IO now.
+ timeoutIO()
+ }
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go
new file mode 100644
index 000000000..b1d60abb8
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go
@@ -0,0 +1,60 @@
+// +build windows
+
+package winio
+
+import (
+ "os"
+ "runtime"
+ "syscall"
+ "unsafe"
+)
+
+//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx
+//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle
+
+const (
+ fileBasicInfo = 0
+ fileIDInfo = 0x12
+)
+
+// FileBasicInfo contains file access time and file attributes information.
+type FileBasicInfo struct {
+ CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
+ FileAttributes uintptr // includes padding
+}
+
+// GetFileBasicInfo retrieves times and attributes for a file.
+func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
+ bi := &FileBasicInfo{}
+ if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
+ return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
+ }
+ runtime.KeepAlive(f)
+ return bi, nil
+}
+
+// SetFileBasicInfo sets times and attributes for a file.
+func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
+ if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
+ return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
+ }
+ runtime.KeepAlive(f)
+ return nil
+}
+
+// FileIDInfo contains the volume serial number and file ID for a file. This pair should be
+// unique on a system.
+type FileIDInfo struct {
+ VolumeSerialNumber uint64
+ FileID [16]byte
+}
+
+// GetFileID retrieves the unique (volume, file ID) pair for a file.
+func GetFileID(f *os.File) (*FileIDInfo, error) {
+ fileID := &FileIDInfo{}
+ if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
+ return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
+ }
+ runtime.KeepAlive(f)
+ return fileID, nil
+}
diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go
new file mode 100644
index 000000000..44340b816
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/pipe.go
@@ -0,0 +1,404 @@
+// +build windows
+
+package winio
+
+import (
+ "errors"
+ "io"
+ "net"
+ "os"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
+//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
+//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
+//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW
+//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
+//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
+//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
+
+const (
+ cERROR_PIPE_BUSY = syscall.Errno(231)
+ cERROR_PIPE_CONNECTED = syscall.Errno(535)
+ cERROR_SEM_TIMEOUT = syscall.Errno(121)
+
+ cPIPE_ACCESS_DUPLEX = 0x3
+ cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000
+ cSECURITY_SQOS_PRESENT = 0x100000
+ cSECURITY_ANONYMOUS = 0
+
+ cPIPE_REJECT_REMOTE_CLIENTS = 0x8
+
+ cPIPE_UNLIMITED_INSTANCES = 255
+
+ cNMPWAIT_USE_DEFAULT_WAIT = 0
+ cNMPWAIT_NOWAIT = 1
+
+ cPIPE_TYPE_MESSAGE = 4
+
+ cPIPE_READMODE_MESSAGE = 2
+)
+
+var (
+ // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed.
+ // This error should match net.errClosing since docker takes a dependency on its text.
+ ErrPipeListenerClosed = errors.New("use of closed network connection")
+
+ errPipeWriteClosed = errors.New("pipe has been closed for write")
+)
+
+type win32Pipe struct {
+ *win32File
+ path string
+}
+
+type win32MessageBytePipe struct {
+ win32Pipe
+ writeClosed bool
+ readEOF bool
+}
+
+type pipeAddress string
+
+func (f *win32Pipe) LocalAddr() net.Addr {
+ return pipeAddress(f.path)
+}
+
+func (f *win32Pipe) RemoteAddr() net.Addr {
+ return pipeAddress(f.path)
+}
+
+func (f *win32Pipe) SetDeadline(t time.Time) error {
+ f.SetReadDeadline(t)
+ f.SetWriteDeadline(t)
+ return nil
+}
+
+// CloseWrite closes the write side of a message pipe in byte mode.
+func (f *win32MessageBytePipe) CloseWrite() error {
+ if f.writeClosed {
+ return errPipeWriteClosed
+ }
+ err := f.win32File.Flush()
+ if err != nil {
+ return err
+ }
+ _, err = f.win32File.Write(nil)
+ if err != nil {
+ return err
+ }
+ f.writeClosed = true
+ return nil
+}
+
+// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since
+// they are used to implement CloseWrite().
+func (f *win32MessageBytePipe) Write(b []byte) (int, error) {
+ if f.writeClosed {
+ return 0, errPipeWriteClosed
+ }
+ if len(b) == 0 {
+ return 0, nil
+ }
+ return f.win32File.Write(b)
+}
+
+// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message
+// mode pipe will return io.EOF, as will all subsequent reads.
+func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
+ if f.readEOF {
+ return 0, io.EOF
+ }
+ n, err := f.win32File.Read(b)
+ if err == io.EOF {
+ // If this was the result of a zero-byte read, then
+ // it is possible that the read was due to a zero-size
+ // message. Since we are simulating CloseWrite with a
+ // zero-byte message, ensure that all future Read() calls
+ // also return EOF.
+ f.readEOF = true
+ }
+ return n, err
+}
+
+func (s pipeAddress) Network() string {
+ return "pipe"
+}
+
+func (s pipeAddress) String() string {
+ return string(s)
+}
+
+// DialPipe connects to a named pipe by path, timing out if the connection
+// takes longer than the specified duration. If timeout is nil, then the timeout
+// is the default timeout established by the pipe server.
+func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
+ var absTimeout time.Time
+ if timeout != nil {
+ absTimeout = time.Now().Add(*timeout)
+ }
+ var err error
+ var h syscall.Handle
+ for {
+ h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
+ if err != cERROR_PIPE_BUSY {
+ break
+ }
+ now := time.Now()
+ var ms uint32
+ if absTimeout.IsZero() {
+ ms = cNMPWAIT_USE_DEFAULT_WAIT
+ } else if now.After(absTimeout) {
+ ms = cNMPWAIT_NOWAIT
+ } else {
+ ms = uint32(absTimeout.Sub(now).Nanoseconds() / 1000 / 1000)
+ }
+ err = waitNamedPipe(path, ms)
+ if err != nil {
+ if err == cERROR_SEM_TIMEOUT {
+ return nil, ErrTimeout
+ }
+ break
+ }
+ }
+ if err != nil {
+ return nil, &os.PathError{Op: "open", Path: path, Err: err}
+ }
+
+ var flags uint32
+ err = getNamedPipeInfo(h, &flags, nil, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var state uint32
+ err = getNamedPipeHandleState(h, &state, nil, nil, nil, nil, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ if state&cPIPE_READMODE_MESSAGE != 0 {
+ return nil, &os.PathError{Op: "open", Path: path, Err: errors.New("message readmode pipes not supported")}
+ }
+
+ f, err := makeWin32File(h)
+ if err != nil {
+ syscall.Close(h)
+ return nil, err
+ }
+
+ // If the pipe is in message mode, return a message byte pipe, which
+ // supports CloseWrite().
+ if flags&cPIPE_TYPE_MESSAGE != 0 {
+ return &win32MessageBytePipe{
+ win32Pipe: win32Pipe{win32File: f, path: path},
+ }, nil
+ }
+ return &win32Pipe{win32File: f, path: path}, nil
+}
+
+type acceptResponse struct {
+ f *win32File
+ err error
+}
+
+type win32PipeListener struct {
+ firstHandle syscall.Handle
+ path string
+ securityDescriptor []byte
+ config PipeConfig
+ acceptCh chan (chan acceptResponse)
+ closeCh chan int
+ doneCh chan int
+}
+
+func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
+ var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED
+ if first {
+ flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE
+ }
+
+ var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS
+ if c.MessageMode {
+ mode |= cPIPE_TYPE_MESSAGE
+ }
+
+ sa := &syscall.SecurityAttributes{}
+ sa.Length = uint32(unsafe.Sizeof(*sa))
+ if securityDescriptor != nil {
+ len := uint32(len(securityDescriptor))
+ sa.SecurityDescriptor = localAlloc(0, len)
+ defer localFree(sa.SecurityDescriptor)
+ copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor)
+ }
+ h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa)
+ if err != nil {
+ return 0, &os.PathError{Op: "open", Path: path, Err: err}
+ }
+ return h, nil
+}
+
+func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
+ h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false)
+ if err != nil {
+ return nil, err
+ }
+ f, err := makeWin32File(h)
+ if err != nil {
+ syscall.Close(h)
+ return nil, err
+ }
+ return f, nil
+}
+
+func (l *win32PipeListener) listenerRoutine() {
+ closed := false
+ for !closed {
+ select {
+ case <-l.closeCh:
+ closed = true
+ case responseCh := <-l.acceptCh:
+ p, err := l.makeServerPipe()
+ if err == nil {
+ // Wait for the client to connect.
+ ch := make(chan error)
+ go func(p *win32File) {
+ ch <- connectPipe(p)
+ }(p)
+ select {
+ case err = <-ch:
+ if err != nil {
+ p.Close()
+ p = nil
+ }
+ case <-l.closeCh:
+ // Abort the connect request by closing the handle.
+ p.Close()
+ p = nil
+ err = <-ch
+ if err == nil || err == ErrFileClosed {
+ err = ErrPipeListenerClosed
+ }
+ closed = true
+ }
+ }
+ responseCh <- acceptResponse{p, err}
+ }
+ }
+ syscall.Close(l.firstHandle)
+ l.firstHandle = 0
+ // Notify Close() and Accept() callers that the handle has been closed.
+ close(l.doneCh)
+}
+
+// PipeConfig contain configuration for the pipe listener.
+type PipeConfig struct {
+ // SecurityDescriptor contains a Windows security descriptor in SDDL format.
+ SecurityDescriptor string
+
+ // MessageMode determines whether the pipe is in byte or message mode. In either
+ // case the pipe is read in byte mode by default. The only practical difference in
+ // this implementation is that CloseWrite() is only supported for message mode pipes;
+ // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only
+ // transferred to the reader (and returned as io.EOF in this implementation)
+ // when the pipe is in message mode.
+ MessageMode bool
+
+ // InputBufferSize specifies the size the input buffer, in bytes.
+ InputBufferSize int32
+
+ // OutputBufferSize specifies the size the input buffer, in bytes.
+ OutputBufferSize int32
+}
+
+// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe.
+// The pipe must not already exist.
+func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
+ var (
+ sd []byte
+ err error
+ )
+ if c == nil {
+ c = &PipeConfig{}
+ }
+ if c.SecurityDescriptor != "" {
+ sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor)
+ if err != nil {
+ return nil, err
+ }
+ }
+ h, err := makeServerPipeHandle(path, sd, c, true)
+ if err != nil {
+ return nil, err
+ }
+ // Immediately open and then close a client handle so that the named pipe is
+ // created but not currently accepting connections.
+ h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
+ if err != nil {
+ syscall.Close(h)
+ return nil, err
+ }
+ syscall.Close(h2)
+ l := &win32PipeListener{
+ firstHandle: h,
+ path: path,
+ securityDescriptor: sd,
+ config: *c,
+ acceptCh: make(chan (chan acceptResponse)),
+ closeCh: make(chan int),
+ doneCh: make(chan int),
+ }
+ go l.listenerRoutine()
+ return l, nil
+}
+
+func connectPipe(p *win32File) error {
+ c, err := p.prepareIo()
+ if err != nil {
+ return err
+ }
+ defer p.wg.Done()
+
+ err = connectNamedPipe(p.handle, &c.o)
+ _, err = p.asyncIo(c, nil, 0, err)
+ if err != nil && err != cERROR_PIPE_CONNECTED {
+ return err
+ }
+ return nil
+}
+
+func (l *win32PipeListener) Accept() (net.Conn, error) {
+ ch := make(chan acceptResponse)
+ select {
+ case l.acceptCh <- ch:
+ response := <-ch
+ err := response.err
+ if err != nil {
+ return nil, err
+ }
+ if l.config.MessageMode {
+ return &win32MessageBytePipe{
+ win32Pipe: win32Pipe{win32File: response.f, path: l.path},
+ }, nil
+ }
+ return &win32Pipe{win32File: response.f, path: l.path}, nil
+ case <-l.doneCh:
+ return nil, ErrPipeListenerClosed
+ }
+}
+
+func (l *win32PipeListener) Close() error {
+ select {
+ case l.closeCh <- 1:
+ <-l.doneCh
+ case <-l.doneCh:
+ }
+ return nil
+}
+
+func (l *win32PipeListener) Addr() net.Addr {
+ return pipeAddress(l.path)
+}
diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go
new file mode 100644
index 000000000..9c83d36fe
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/privilege.go
@@ -0,0 +1,202 @@
+// +build windows
+
+package winio
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "runtime"
+ "sync"
+ "syscall"
+ "unicode/utf16"
+
+ "golang.org/x/sys/windows"
+)
+
+//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
+//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
+//sys revertToSelf() (err error) = advapi32.RevertToSelf
+//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken
+//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread
+//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
+//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
+//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
+
+const (
+ SE_PRIVILEGE_ENABLED = 2
+
+ ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
+
+ SeBackupPrivilege = "SeBackupPrivilege"
+ SeRestorePrivilege = "SeRestorePrivilege"
+)
+
+const (
+ securityAnonymous = iota
+ securityIdentification
+ securityImpersonation
+ securityDelegation
+)
+
+var (
+ privNames = make(map[string]uint64)
+ privNameMutex sync.Mutex
+)
+
+// PrivilegeError represents an error enabling privileges.
+type PrivilegeError struct {
+ privileges []uint64
+}
+
+func (e *PrivilegeError) Error() string {
+ s := ""
+ if len(e.privileges) > 1 {
+ s = "Could not enable privileges "
+ } else {
+ s = "Could not enable privilege "
+ }
+ for i, p := range e.privileges {
+ if i != 0 {
+ s += ", "
+ }
+ s += `"`
+ s += getPrivilegeName(p)
+ s += `"`
+ }
+ return s
+}
+
+// RunWithPrivilege enables a single privilege for a function call.
+func RunWithPrivilege(name string, fn func() error) error {
+ return RunWithPrivileges([]string{name}, fn)
+}
+
+// RunWithPrivileges enables privileges for a function call.
+func RunWithPrivileges(names []string, fn func() error) error {
+ privileges, err := mapPrivileges(names)
+ if err != nil {
+ return err
+ }
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ token, err := newThreadToken()
+ if err != nil {
+ return err
+ }
+ defer releaseThreadToken(token)
+ err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED)
+ if err != nil {
+ return err
+ }
+ return fn()
+}
+
+func mapPrivileges(names []string) ([]uint64, error) {
+ var privileges []uint64
+ privNameMutex.Lock()
+ defer privNameMutex.Unlock()
+ for _, name := range names {
+ p, ok := privNames[name]
+ if !ok {
+ err := lookupPrivilegeValue("", name, &p)
+ if err != nil {
+ return nil, err
+ }
+ privNames[name] = p
+ }
+ privileges = append(privileges, p)
+ }
+ return privileges, nil
+}
+
+// EnableProcessPrivileges enables privileges globally for the process.
+func EnableProcessPrivileges(names []string) error {
+ return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED)
+}
+
+// DisableProcessPrivileges disables privileges globally for the process.
+func DisableProcessPrivileges(names []string) error {
+ return enableDisableProcessPrivilege(names, 0)
+}
+
+func enableDisableProcessPrivilege(names []string, action uint32) error {
+ privileges, err := mapPrivileges(names)
+ if err != nil {
+ return err
+ }
+
+ p, _ := windows.GetCurrentProcess()
+ var token windows.Token
+ err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token)
+ if err != nil {
+ return err
+ }
+
+ defer token.Close()
+ return adjustPrivileges(token, privileges, action)
+}
+
+func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error {
+ var b bytes.Buffer
+ binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
+ for _, p := range privileges {
+ binary.Write(&b, binary.LittleEndian, p)
+ binary.Write(&b, binary.LittleEndian, action)
+ }
+ prevState := make([]byte, b.Len())
+ reqSize := uint32(0)
+ success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize)
+ if !success {
+ return err
+ }
+ if err == ERROR_NOT_ALL_ASSIGNED {
+ return &PrivilegeError{privileges}
+ }
+ return nil
+}
+
+func getPrivilegeName(luid uint64) string {
+ var nameBuffer [256]uint16
+ bufSize := uint32(len(nameBuffer))
+ err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize)
+ if err != nil {
+ return fmt.Sprintf("<unknown privilege %d>", luid)
+ }
+
+ var displayNameBuffer [256]uint16
+ displayBufSize := uint32(len(displayNameBuffer))
+ var langID uint32
+ err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID)
+ if err != nil {
+ return fmt.Sprintf("<unknown privilege %s>", string(utf16.Decode(nameBuffer[:bufSize])))
+ }
+
+ return string(utf16.Decode(displayNameBuffer[:displayBufSize]))
+}
+
+func newThreadToken() (windows.Token, error) {
+ err := impersonateSelf(securityImpersonation)
+ if err != nil {
+ return 0, err
+ }
+
+ var token windows.Token
+ err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token)
+ if err != nil {
+ rerr := revertToSelf()
+ if rerr != nil {
+ panic(rerr)
+ }
+ return 0, err
+ }
+ return token, nil
+}
+
+func releaseThreadToken(h windows.Token) {
+ err := revertToSelf()
+ if err != nil {
+ panic(err)
+ }
+ h.Close()
+}
diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go
new file mode 100644
index 000000000..fc1ee4d3a
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/reparse.go
@@ -0,0 +1,128 @@
+package winio
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "strings"
+ "unicode/utf16"
+ "unsafe"
+)
+
+const (
+ reparseTagMountPoint = 0xA0000003
+ reparseTagSymlink = 0xA000000C
+)
+
+type reparseDataBuffer struct {
+ ReparseTag uint32
+ ReparseDataLength uint16
+ Reserved uint16
+ SubstituteNameOffset uint16
+ SubstituteNameLength uint16
+ PrintNameOffset uint16
+ PrintNameLength uint16
+}
+
+// ReparsePoint describes a Win32 symlink or mount point.
+type ReparsePoint struct {
+ Target string
+ IsMountPoint bool
+}
+
+// UnsupportedReparsePointError is returned when trying to decode a non-symlink or
+// mount point reparse point.
+type UnsupportedReparsePointError struct {
+ Tag uint32
+}
+
+func (e *UnsupportedReparsePointError) Error() string {
+ return fmt.Sprintf("unsupported reparse point %x", e.Tag)
+}
+
+// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink
+// or a mount point.
+func DecodeReparsePoint(b []byte) (*ReparsePoint, error) {
+ tag := binary.LittleEndian.Uint32(b[0:4])
+ return DecodeReparsePointData(tag, b[8:])
+}
+
+func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) {
+ isMountPoint := false
+ switch tag {
+ case reparseTagMountPoint:
+ isMountPoint = true
+ case reparseTagSymlink:
+ default:
+ return nil, &UnsupportedReparsePointError{tag}
+ }
+ nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6])
+ if !isMountPoint {
+ nameOffset += 4
+ }
+ nameLength := binary.LittleEndian.Uint16(b[6:8])
+ name := make([]uint16, nameLength/2)
+ err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name)
+ if err != nil {
+ return nil, err
+ }
+ return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil
+}
+
+func isDriveLetter(c byte) bool {
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
+}
+
+// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or
+// mount point.
+func EncodeReparsePoint(rp *ReparsePoint) []byte {
+ // Generate an NT path and determine if this is a relative path.
+ var ntTarget string
+ relative := false
+ if strings.HasPrefix(rp.Target, `\\?\`) {
+ ntTarget = `\??\` + rp.Target[4:]
+ } else if strings.HasPrefix(rp.Target, `\\`) {
+ ntTarget = `\??\UNC\` + rp.Target[2:]
+ } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' {
+ ntTarget = `\??\` + rp.Target
+ } else {
+ ntTarget = rp.Target
+ relative = true
+ }
+
+ // The paths must be NUL-terminated even though they are counted strings.
+ target16 := utf16.Encode([]rune(rp.Target + "\x00"))
+ ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00"))
+
+ size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8
+ size += len(ntTarget16)*2 + len(target16)*2
+
+ tag := uint32(reparseTagMountPoint)
+ if !rp.IsMountPoint {
+ tag = reparseTagSymlink
+ size += 4 // Add room for symlink flags
+ }
+
+ data := reparseDataBuffer{
+ ReparseTag: tag,
+ ReparseDataLength: uint16(size),
+ SubstituteNameOffset: 0,
+ SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2),
+ PrintNameOffset: uint16(len(ntTarget16) * 2),
+ PrintNameLength: uint16((len(target16) - 1) * 2),
+ }
+
+ var b bytes.Buffer
+ binary.Write(&b, binary.LittleEndian, &data)
+ if !rp.IsMountPoint {
+ flags := uint32(0)
+ if relative {
+ flags |= 1
+ }
+ binary.Write(&b, binary.LittleEndian, flags)
+ }
+
+ binary.Write(&b, binary.LittleEndian, ntTarget16)
+ binary.Write(&b, binary.LittleEndian, target16)
+ return b.Bytes()
+}
diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go
new file mode 100644
index 000000000..db1b370a1
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/sd.go
@@ -0,0 +1,98 @@
+// +build windows
+
+package winio
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW
+//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
+//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW
+//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW
+//sys localFree(mem uintptr) = LocalFree
+//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength
+
+const (
+ cERROR_NONE_MAPPED = syscall.Errno(1332)
+)
+
+type AccountLookupError struct {
+ Name string
+ Err error
+}
+
+func (e *AccountLookupError) Error() string {
+ if e.Name == "" {
+ return "lookup account: empty account name specified"
+ }
+ var s string
+ switch e.Err {
+ case cERROR_NONE_MAPPED:
+ s = "not found"
+ default:
+ s = e.Err.Error()
+ }
+ return "lookup account " + e.Name + ": " + s
+}
+
+type SddlConversionError struct {
+ Sddl string
+ Err error
+}
+
+func (e *SddlConversionError) Error() string {
+ return "convert " + e.Sddl + ": " + e.Err.Error()
+}
+
+// LookupSidByName looks up the SID of an account by name
+func LookupSidByName(name string) (sid string, err error) {
+ if name == "" {
+ return "", &AccountLookupError{name, cERROR_NONE_MAPPED}
+ }
+
+ var sidSize, sidNameUse, refDomainSize uint32
+ err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
+ if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER {
+ return "", &AccountLookupError{name, err}
+ }
+ sidBuffer := make([]byte, sidSize)
+ refDomainBuffer := make([]uint16, refDomainSize)
+ err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
+ if err != nil {
+ return "", &AccountLookupError{name, err}
+ }
+ var strBuffer *uint16
+ err = convertSidToStringSid(&sidBuffer[0], &strBuffer)
+ if err != nil {
+ return "", &AccountLookupError{name, err}
+ }
+ sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:])
+ localFree(uintptr(unsafe.Pointer(strBuffer)))
+ return sid, nil
+}
+
+func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
+ var sdBuffer uintptr
+ err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil)
+ if err != nil {
+ return nil, &SddlConversionError{sddl, err}
+ }
+ defer localFree(sdBuffer)
+ sd := make([]byte, getSecurityDescriptorLength(sdBuffer))
+ copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)])
+ return sd, nil
+}
+
+func SecurityDescriptorToSddl(sd []byte) (string, error) {
+ var sddl *uint16
+ // The returned string length seems to including an aribtrary number of terminating NULs.
+ // Don't use it.
+ err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil)
+ if err != nil {
+ return "", err
+ }
+ defer localFree(uintptr(unsafe.Pointer(sddl)))
+ return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil
+}
diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go
new file mode 100644
index 000000000..20d64cf41
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/syscall.go
@@ -0,0 +1,3 @@
+package winio
+
+//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go
diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
new file mode 100644
index 000000000..4f7a52eeb
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
@@ -0,0 +1,528 @@
+// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
+
+package winio
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+ errnoERROR_IO_PENDING = 997
+)
+
+var (
+ errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return nil
+ case errnoERROR_IO_PENDING:
+ return errERROR_IO_PENDING
+ }
+ // TODO: add more here, after collecting data on the common
+ // error values see on Windows. (perhaps when running
+ // all.bat?)
+ return e
+}
+
+var (
+ modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+ modwinmm = windows.NewLazySystemDLL("winmm.dll")
+ modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
+
+ procCancelIoEx = modkernel32.NewProc("CancelIoEx")
+ procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
+ procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
+ procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
+ proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod")
+ procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
+ procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
+ procCreateFileW = modkernel32.NewProc("CreateFileW")
+ procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW")
+ procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
+ procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
+ procLocalAlloc = modkernel32.NewProc("LocalAlloc")
+ procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
+ procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
+ procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
+ procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
+ procLocalFree = modkernel32.NewProc("LocalFree")
+ procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength")
+ procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx")
+ procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle")
+ procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
+ procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
+ procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
+ procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
+ procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
+ procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
+ procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
+ procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
+ procBackupRead = modkernel32.NewProc("BackupRead")
+ procBackupWrite = modkernel32.NewProc("BackupWrite")
+)
+
+func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
+ r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
+ r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
+ newport = syscall.Handle(r0)
+ if newport == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) {
+ r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func timeBeginPeriod(period uint32) (n int32) {
+ r0, _, _ := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0)
+ n = int32(r0)
+ return
+}
+
+func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
+ r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return
+ }
+ return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
+}
+
+func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
+ r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
+ handle = syscall.Handle(r0)
+ if handle == syscall.InvalidHandle {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return
+ }
+ return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
+}
+
+func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
+ r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
+ handle = syscall.Handle(r0)
+ if handle == syscall.InvalidHandle {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func waitNamedPipe(name string, timeout uint32) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return
+ }
+ return _waitNamedPipe(_p0, timeout)
+}
+
+func _waitNamedPipe(name *uint16, timeout uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
+ r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
+ r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0)
+ ptr = uintptr(r0)
+ return
+}
+
+func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(accountName)
+ if err != nil {
+ return
+ }
+ return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse)
+}
+
+func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func convertSidToStringSid(sid *byte, str **uint16) (err error) {
+ r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(str)
+ if err != nil {
+ return
+ }
+ return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size)
+}
+
+func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func localFree(mem uintptr) {
+ syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0)
+ return
+}
+
+func getSecurityDescriptorLength(sd uintptr) (len uint32) {
+ r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
+ len = uint32(r0)
+ return
+}
+
+func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
+ var _p0 uint32
+ if releaseAll {
+ _p0 = 1
+ } else {
+ _p0 = 0
+ }
+ r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
+ success = r0 != 0
+ if true {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func impersonateSelf(level uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func revertToSelf() (err error) {
+ r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
+ var _p0 uint32
+ if openAsSelf {
+ _p0 = 1
+ } else {
+ _p0 = 0
+ }
+ r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func getCurrentThread() (h syscall.Handle) {
+ r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
+ h = syscall.Handle(r0)
+ return
+}
+
+func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(systemName)
+ if err != nil {
+ return
+ }
+ var _p1 *uint16
+ _p1, err = syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return
+ }
+ return _lookupPrivilegeValue(_p0, _p1, luid)
+}
+
+func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
+ r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(systemName)
+ if err != nil {
+ return
+ }
+ return _lookupPrivilegeName(_p0, luid, buffer, size)
+}
+
+func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(systemName)
+ if err != nil {
+ return
+ }
+ return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId)
+}
+
+func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
+ var _p0 *byte
+ if len(b) > 0 {
+ _p0 = &b[0]
+ }
+ var _p1 uint32
+ if abort {
+ _p1 = 1
+ } else {
+ _p1 = 0
+ }
+ var _p2 uint32
+ if processSecurity {
+ _p2 = 1
+ } else {
+ _p2 = 0
+ }
+ r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
+ var _p0 *byte
+ if len(b) > 0 {
+ _p0 = &b[0]
+ }
+ var _p1 uint32
+ if abort {
+ _p1 = 1
+ } else {
+ _p1 = 0
+ }
+ var _p2 uint32
+ if processSecurity {
+ _p2 = 1
+ } else {
+ _p2 = 0
+ }
+ r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/LICENSE b/vendor/github.com/Microsoft/hcsshim/LICENSE
new file mode 100644
index 000000000..49d21669a
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Microsoft
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE. \ No newline at end of file
diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md
new file mode 100644
index 000000000..30991a12e
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/README.md
@@ -0,0 +1,12 @@
+# hcsshim
+
+This package supports launching Windows Server containers from Go. It is
+primarily used in the [Docker Engine](https://github.com/docker/docker) project,
+but it can be freely used by other projects as well.
+
+This project has adopted the [Microsoft Open Source Code of
+Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
+see the [Code of Conduct
+FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
+[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
+questions or comments.
diff --git a/vendor/github.com/Microsoft/hcsshim/activatelayer.go b/vendor/github.com/Microsoft/hcsshim/activatelayer.go
new file mode 100644
index 000000000..6d824d7a7
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/activatelayer.go
@@ -0,0 +1,28 @@
+package hcsshim
+
+import "github.com/sirupsen/logrus"
+
+// ActivateLayer will find the layer with the given id and mount it's filesystem.
+// For a read/write layer, the mounted filesystem will appear as a volume on the
+// host, while a read-only layer is generally expected to be a no-op.
+// An activated layer must later be deactivated via DeactivateLayer.
+func ActivateLayer(info DriverInfo, id string) error {
+ title := "hcsshim::ActivateLayer "
+ logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
+
+ infop, err := convertDriverInfo(info)
+ if err != nil {
+ logrus.Error(err)
+ return err
+ }
+
+ err = activateLayer(&infop, id)
+ if err != nil {
+ err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
+ logrus.Error(err)
+ return err
+ }
+
+ logrus.Debugf(title+" - succeeded id=%s flavour=%d", id, info.Flavour)
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/baselayer.go b/vendor/github.com/Microsoft/hcsshim/baselayer.go
new file mode 100644
index 000000000..9babd4e18
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/baselayer.go
@@ -0,0 +1,183 @@
+package hcsshim
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+ "syscall"
+
+ "github.com/Microsoft/go-winio"
+)
+
+type baseLayerWriter struct {
+ root string
+ f *os.File
+ bw *winio.BackupFileWriter
+ err error
+ hasUtilityVM bool
+ dirInfo []dirInfo
+}
+
+type dirInfo struct {
+ path string
+ fileInfo winio.FileBasicInfo
+}
+
+// reapplyDirectoryTimes reapplies directory modification, creation, etc. times
+// after processing of the directory tree has completed. The times are expected
+// to be ordered such that parent directories come before child directories.
+func reapplyDirectoryTimes(dis []dirInfo) error {
+ for i := range dis {
+ di := &dis[len(dis)-i-1] // reverse order: process child directories first
+ f, err := winio.OpenForBackup(di.path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, syscall.OPEN_EXISTING)
+ if err != nil {
+ return err
+ }
+
+ err = winio.SetFileBasicInfo(f, &di.fileInfo)
+ f.Close()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (w *baseLayerWriter) closeCurrentFile() error {
+ if w.f != nil {
+ err := w.bw.Close()
+ err2 := w.f.Close()
+ w.f = nil
+ w.bw = nil
+ if err != nil {
+ return err
+ }
+ if err2 != nil {
+ return err2
+ }
+ }
+ return nil
+}
+
+func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err error) {
+ defer func() {
+ if err != nil {
+ w.err = err
+ }
+ }()
+
+ err = w.closeCurrentFile()
+ if err != nil {
+ return err
+ }
+
+ if filepath.ToSlash(name) == `UtilityVM/Files` {
+ w.hasUtilityVM = true
+ }
+
+ path := filepath.Join(w.root, name)
+ path, err = makeLongAbsPath(path)
+ if err != nil {
+ return err
+ }
+
+ var f *os.File
+ defer func() {
+ if f != nil {
+ f.Close()
+ }
+ }()
+
+ createmode := uint32(syscall.CREATE_NEW)
+ if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
+ err := os.Mkdir(path, 0)
+ if err != nil && !os.IsExist(err) {
+ return err
+ }
+ createmode = syscall.OPEN_EXISTING
+ if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 {
+ w.dirInfo = append(w.dirInfo, dirInfo{path, *fileInfo})
+ }
+ }
+
+ mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY)
+ f, err = winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createmode)
+ if err != nil {
+ return makeError(err, "Failed to OpenForBackup", path)
+ }
+
+ err = winio.SetFileBasicInfo(f, fileInfo)
+ if err != nil {
+ return makeError(err, "Failed to SetFileBasicInfo", path)
+ }
+
+ w.f = f
+ w.bw = winio.NewBackupFileWriter(f, true)
+ f = nil
+ return nil
+}
+
+func (w *baseLayerWriter) AddLink(name string, target string) (err error) {
+ defer func() {
+ if err != nil {
+ w.err = err
+ }
+ }()
+
+ err = w.closeCurrentFile()
+ if err != nil {
+ return err
+ }
+
+ linkpath, err := makeLongAbsPath(filepath.Join(w.root, name))
+ if err != nil {
+ return err
+ }
+
+ linktarget, err := makeLongAbsPath(filepath.Join(w.root, target))
+ if err != nil {
+ return err
+ }
+
+ return os.Link(linktarget, linkpath)
+}
+
+func (w *baseLayerWriter) Remove(name string) error {
+ return errors.New("base layer cannot have tombstones")
+}
+
+func (w *baseLayerWriter) Write(b []byte) (int, error) {
+ n, err := w.bw.Write(b)
+ if err != nil {
+ w.err = err
+ }
+ return n, err
+}
+
+func (w *baseLayerWriter) Close() error {
+ err := w.closeCurrentFile()
+ if err != nil {
+ return err
+ }
+ if w.err == nil {
+ // Restore the file times of all the directories, since they may have
+ // been modified by creating child directories.
+ err = reapplyDirectoryTimes(w.dirInfo)
+ if err != nil {
+ return err
+ }
+
+ err = ProcessBaseLayer(w.root)
+ if err != nil {
+ return err
+ }
+
+ if w.hasUtilityVM {
+ err = ProcessUtilityVMImage(filepath.Join(w.root, "UtilityVM"))
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return w.err
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/callback.go b/vendor/github.com/Microsoft/hcsshim/callback.go
new file mode 100644
index 000000000..e8c2b00c8
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/callback.go
@@ -0,0 +1,79 @@
+package hcsshim
+
+import (
+ "sync"
+ "syscall"
+)
+
+var (
+ nextCallback uintptr
+ callbackMap = map[uintptr]*notifcationWatcherContext{}
+ callbackMapLock = sync.RWMutex{}
+
+ notificationWatcherCallback = syscall.NewCallback(notificationWatcher)
+
+ // Notifications for HCS_SYSTEM handles
+ hcsNotificationSystemExited hcsNotification = 0x00000001
+ hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002
+ hcsNotificationSystemStartCompleted hcsNotification = 0x00000003
+ hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004
+ hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005
+
+ // Notifications for HCS_PROCESS handles
+ hcsNotificationProcessExited hcsNotification = 0x00010000
+
+ // Common notifications
+ hcsNotificationInvalid hcsNotification = 0x00000000
+ hcsNotificationServiceDisconnect hcsNotification = 0x01000000
+)
+
+type hcsNotification uint32
+type notificationChannel chan error
+
+type notifcationWatcherContext struct {
+ channels notificationChannels
+ handle hcsCallback
+}
+
+type notificationChannels map[hcsNotification]notificationChannel
+
+func newChannels() notificationChannels {
+ channels := make(notificationChannels)
+
+ channels[hcsNotificationSystemExited] = make(notificationChannel, 1)
+ channels[hcsNotificationSystemCreateCompleted] = make(notificationChannel, 1)
+ channels[hcsNotificationSystemStartCompleted] = make(notificationChannel, 1)
+ channels[hcsNotificationSystemPauseCompleted] = make(notificationChannel, 1)
+ channels[hcsNotificationSystemResumeCompleted] = make(notificationChannel, 1)
+ channels[hcsNotificationProcessExited] = make(notificationChannel, 1)
+ channels[hcsNotificationServiceDisconnect] = make(notificationChannel, 1)
+ return channels
+}
+func closeChannels(channels notificationChannels) {
+ close(channels[hcsNotificationSystemExited])
+ close(channels[hcsNotificationSystemCreateCompleted])
+ close(channels[hcsNotificationSystemStartCompleted])
+ close(channels[hcsNotificationSystemPauseCompleted])
+ close(channels[hcsNotificationSystemResumeCompleted])
+ close(channels[hcsNotificationProcessExited])
+ close(channels[hcsNotificationServiceDisconnect])
+}
+
+func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr {
+ var result error
+ if int32(notificationStatus) < 0 {
+ result = syscall.Errno(win32FromHresult(notificationStatus))
+ }
+
+ callbackMapLock.RLock()
+ context := callbackMap[callbackNumber]
+ callbackMapLock.RUnlock()
+
+ if context == nil {
+ return 0
+ }
+
+ context.channels[notificationType] <- result
+
+ return 0
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/cgo.go b/vendor/github.com/Microsoft/hcsshim/cgo.go
new file mode 100644
index 000000000..200333233
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/cgo.go
@@ -0,0 +1,7 @@
+package hcsshim
+
+import "C"
+
+// This import is needed to make the library compile as CGO because HCSSHIM
+// only works with CGO due to callbacks from HCS comming back from a C thread
+// which is not supported without CGO. See https://github.com/golang/go/issues/10973
diff --git a/vendor/github.com/Microsoft/hcsshim/container.go b/vendor/github.com/Microsoft/hcsshim/container.go
new file mode 100644
index 000000000..b924d39f4
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/container.go
@@ -0,0 +1,794 @@
+package hcsshim
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ defaultTimeout = time.Minute * 4
+)
+
+const (
+ pendingUpdatesQuery = `{ "PropertyTypes" : ["PendingUpdates"]}`
+ statisticsQuery = `{ "PropertyTypes" : ["Statistics"]}`
+ processListQuery = `{ "PropertyTypes" : ["ProcessList"]}`
+ mappedVirtualDiskQuery = `{ "PropertyTypes" : ["MappedVirtualDisk"]}`
+)
+
+type container struct {
+ handleLock sync.RWMutex
+ handle hcsSystem
+ id string
+ callbackNumber uintptr
+}
+
+// ContainerProperties holds the properties for a container and the processes running in that container
+type ContainerProperties struct {
+ ID string `json:"Id"`
+ Name string
+ SystemType string
+ Owner string
+ SiloGUID string `json:"SiloGuid,omitempty"`
+ RuntimeID string `json:"RuntimeId,omitempty"`
+ IsRuntimeTemplate bool `json:",omitempty"`
+ RuntimeImagePath string `json:",omitempty"`
+ Stopped bool `json:",omitempty"`
+ ExitType string `json:",omitempty"`
+ AreUpdatesPending bool `json:",omitempty"`
+ ObRoot string `json:",omitempty"`
+ Statistics Statistics `json:",omitempty"`
+ ProcessList []ProcessListItem `json:",omitempty"`
+ MappedVirtualDiskControllers map[int]MappedVirtualDiskController `json:",omitempty"`
+}
+
+// MemoryStats holds the memory statistics for a container
+type MemoryStats struct {
+ UsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"`
+ UsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"`
+ UsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"`
+}
+
+// ProcessorStats holds the processor statistics for a container
+type ProcessorStats struct {
+ TotalRuntime100ns uint64 `json:",omitempty"`
+ RuntimeUser100ns uint64 `json:",omitempty"`
+ RuntimeKernel100ns uint64 `json:",omitempty"`
+}
+
+// StorageStats holds the storage statistics for a container
+type StorageStats struct {
+ ReadCountNormalized uint64 `json:",omitempty"`
+ ReadSizeBytes uint64 `json:",omitempty"`
+ WriteCountNormalized uint64 `json:",omitempty"`
+ WriteSizeBytes uint64 `json:",omitempty"`
+}
+
+// NetworkStats holds the network statistics for a container
+type NetworkStats struct {
+ BytesReceived uint64 `json:",omitempty"`
+ BytesSent uint64 `json:",omitempty"`
+ PacketsReceived uint64 `json:",omitempty"`
+ PacketsSent uint64 `json:",omitempty"`
+ DroppedPacketsIncoming uint64 `json:",omitempty"`
+ DroppedPacketsOutgoing uint64 `json:",omitempty"`
+ EndpointId string `json:",omitempty"`
+ InstanceId string `json:",omitempty"`
+}
+
+// Statistics is the structure returned by a statistics call on a container
+type Statistics struct {
+ Timestamp time.Time `json:",omitempty"`
+ ContainerStartTime time.Time `json:",omitempty"`
+ Uptime100ns uint64 `json:",omitempty"`
+ Memory MemoryStats `json:",omitempty"`
+ Processor ProcessorStats `json:",omitempty"`
+ Storage StorageStats `json:",omitempty"`
+ Network []NetworkStats `json:",omitempty"`
+}
+
+// ProcessList is the structure of an item returned by a ProcessList call on a container
+type ProcessListItem struct {
+ CreateTimestamp time.Time `json:",omitempty"`
+ ImageName string `json:",omitempty"`
+ KernelTime100ns uint64 `json:",omitempty"`
+ MemoryCommitBytes uint64 `json:",omitempty"`
+ MemoryWorkingSetPrivateBytes uint64 `json:",omitempty"`
+ MemoryWorkingSetSharedBytes uint64 `json:",omitempty"`
+ ProcessId uint32 `json:",omitempty"`
+ UserTime100ns uint64 `json:",omitempty"`
+}
+
+// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container
+type MappedVirtualDiskController struct {
+ MappedVirtualDisks map[int]MappedVirtualDisk `json:",omitempty"`
+}
+
+// Type of Request Support in ModifySystem
+type RequestType string
+
+// Type of Resource Support in ModifySystem
+type ResourceType string
+
+// RequestType const
+const (
+ Add RequestType = "Add"
+ Remove RequestType = "Remove"
+ Network ResourceType = "Network"
+)
+
+// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system
+// Supported resource types are Network and Request Types are Add/Remove
+type ResourceModificationRequestResponse struct {
+ Resource ResourceType `json:"ResourceType"`
+ Data interface{} `json:"Settings"`
+ Request RequestType `json:"RequestType,omitempty"`
+}
+
+// createContainerAdditionalJSON is read from the environment at initialisation
+// time. It allows an environment variable to define additional JSON which
+// is merged in the CreateContainer call to HCS.
+var createContainerAdditionalJSON string
+
+func init() {
+ createContainerAdditionalJSON = os.Getenv("HCSSHIM_CREATECONTAINER_ADDITIONALJSON")
+}
+
+// CreateContainer creates a new container with the given configuration but does not start it.
+func CreateContainer(id string, c *ContainerConfig) (Container, error) {
+ return createContainerWithJSON(id, c, "")
+}
+
+// CreateContainerWithJSON creates a new container with the given configuration but does not start it.
+// It is identical to CreateContainer except that optional additional JSON can be merged before passing to HCS.
+func CreateContainerWithJSON(id string, c *ContainerConfig, additionalJSON string) (Container, error) {
+ return createContainerWithJSON(id, c, additionalJSON)
+}
+
+func createContainerWithJSON(id string, c *ContainerConfig, additionalJSON string) (Container, error) {
+ operation := "CreateContainer"
+ title := "HCSShim::" + operation
+
+ container := &container{
+ id: id,
+ }
+
+ configurationb, err := json.Marshal(c)
+ if err != nil {
+ return nil, err
+ }
+
+ configuration := string(configurationb)
+ logrus.Debugf(title+" id=%s config=%s", id, configuration)
+
+ // Merge any additional JSON. Priority is given to what is passed in explicitly,
+ // falling back to what's set in the environment.
+ if additionalJSON == "" && createContainerAdditionalJSON != "" {
+ additionalJSON = createContainerAdditionalJSON
+ }
+ if additionalJSON != "" {
+ configurationMap := map[string]interface{}{}
+ if err := json.Unmarshal([]byte(configuration), &configurationMap); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal %s: %s", configuration, err)
+ }
+
+ additionalMap := map[string]interface{}{}
+ if err := json.Unmarshal([]byte(additionalJSON), &additionalMap); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal %s: %s", additionalJSON, err)
+ }
+
+ mergedMap := mergeMaps(additionalMap, configurationMap)
+ mergedJSON, err := json.Marshal(mergedMap)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal merged configuration map %+v: %s", mergedMap, err)
+ }
+
+ configuration = string(mergedJSON)
+ logrus.Debugf(title+" id=%s merged config=%s", id, configuration)
+ }
+
+ var (
+ resultp *uint16
+ identity syscall.Handle
+ )
+ createError := hcsCreateComputeSystem(id, configuration, identity, &container.handle, &resultp)
+
+ if createError == nil || IsPending(createError) {
+ if err := container.registerCallback(); err != nil {
+ return nil, makeContainerError(container, operation, "", err)
+ }
+ }
+
+ err = processAsyncHcsResult(createError, resultp, container.callbackNumber, hcsNotificationSystemCreateCompleted, &defaultTimeout)
+ if err != nil {
+ return nil, makeContainerError(container, operation, configuration, err)
+ }
+
+ logrus.Debugf(title+" succeeded id=%s handle=%d", id, container.handle)
+ return container, nil
+}
+
+// mergeMaps recursively merges map `fromMap` into map `ToMap`. Any pre-existing values
+// in ToMap are overwritten. Values in fromMap are added to ToMap.
+// From http://stackoverflow.com/questions/40491438/merging-two-json-strings-in-golang
+func mergeMaps(fromMap, ToMap interface{}) interface{} {
+ switch fromMap := fromMap.(type) {
+ case map[string]interface{}:
+ ToMap, ok := ToMap.(map[string]interface{})
+ if !ok {
+ return fromMap
+ }
+ for keyToMap, valueToMap := range ToMap {
+ if valueFromMap, ok := fromMap[keyToMap]; ok {
+ fromMap[keyToMap] = mergeMaps(valueFromMap, valueToMap)
+ } else {
+ fromMap[keyToMap] = valueToMap
+ }
+ }
+ case nil:
+ // merge(nil, map[string]interface{...}) -> map[string]interface{...}
+ ToMap, ok := ToMap.(map[string]interface{})
+ if ok {
+ return ToMap
+ }
+ }
+ return fromMap
+}
+
+// OpenContainer opens an existing container by ID.
+func OpenContainer(id string) (Container, error) {
+ operation := "OpenContainer"
+ title := "HCSShim::" + operation
+ logrus.Debugf(title+" id=%s", id)
+
+ container := &container{
+ id: id,
+ }
+
+ var (
+ handle hcsSystem
+ resultp *uint16
+ )
+ err := hcsOpenComputeSystem(id, &handle, &resultp)
+ err = processHcsResult(err, resultp)
+ if err != nil {
+ return nil, makeContainerError(container, operation, "", err)
+ }
+
+ container.handle = handle
+
+ if err := container.registerCallback(); err != nil {
+ return nil, makeContainerError(container, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded id=%s handle=%d", id, handle)
+ return container, nil
+}
+
+// GetContainers gets a list of the containers on the system that match the query
+func GetContainers(q ComputeSystemQuery) ([]ContainerProperties, error) {
+ operation := "GetContainers"
+ title := "HCSShim::" + operation
+
+ queryb, err := json.Marshal(q)
+ if err != nil {
+ return nil, err
+ }
+
+ query := string(queryb)
+ logrus.Debugf(title+" query=%s", query)
+
+ var (
+ resultp *uint16
+ computeSystemsp *uint16
+ )
+ err = hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp)
+ err = processHcsResult(err, resultp)
+ if err != nil {
+ return nil, err
+ }
+
+ if computeSystemsp == nil {
+ return nil, ErrUnexpectedValue
+ }
+ computeSystemsRaw := convertAndFreeCoTaskMemBytes(computeSystemsp)
+ computeSystems := []ContainerProperties{}
+ if err := json.Unmarshal(computeSystemsRaw, &computeSystems); err != nil {
+ return nil, err
+ }
+
+ logrus.Debugf(title + " succeeded")
+ return computeSystems, nil
+}
+
+// Start synchronously starts the container.
+func (container *container) Start() error {
+ container.handleLock.RLock()
+ defer container.handleLock.RUnlock()
+ operation := "Start"
+ title := "HCSShim::Container::" + operation
+ logrus.Debugf(title+" id=%s", container.id)
+
+ if container.handle == 0 {
+ return makeContainerError(container, operation, "", ErrAlreadyClosed)
+ }
+
+ var resultp *uint16
+ err := hcsStartComputeSystem(container.handle, "", &resultp)
+ err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemStartCompleted, &defaultTimeout)
+ if err != nil {
+ return makeContainerError(container, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded id=%s", container.id)
+ return nil
+}
+
+// Shutdown requests a container shutdown, if IsPending() on the error returned is true,
+// it may not actually be shut down until Wait() succeeds.
+func (container *container) Shutdown() error {
+ container.handleLock.RLock()
+ defer container.handleLock.RUnlock()
+ operation := "Shutdown"
+ title := "HCSShim::Container::" + operation
+ logrus.Debugf(title+" id=%s", container.id)
+
+ if container.handle == 0 {
+ return makeContainerError(container, operation, "", ErrAlreadyClosed)
+ }
+
+ var resultp *uint16
+ err := hcsShutdownComputeSystem(container.handle, "", &resultp)
+ err = processHcsResult(err, resultp)
+ if err != nil {
+ return makeContainerError(container, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded id=%s", container.id)
+ return nil
+}
+
+// Terminate requests a container terminate, if IsPending() on the error returned is true,
+// it may not actually be shut down until Wait() succeeds.
+func (container *container) Terminate() error {
+ container.handleLock.RLock()
+ defer container.handleLock.RUnlock()
+ operation := "Terminate"
+ title := "HCSShim::Container::" + operation
+ logrus.Debugf(title+" id=%s", container.id)
+
+ if container.handle == 0 {
+ return makeContainerError(container, operation, "", ErrAlreadyClosed)
+ }
+
+ var resultp *uint16
+ err := hcsTerminateComputeSystem(container.handle, "", &resultp)
+ err = processHcsResult(err, resultp)
+ if err != nil {
+ return makeContainerError(container, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded id=%s", container.id)
+ return nil
+}
+
+// Wait synchronously waits for the container to shutdown or terminate.
+func (container *container) Wait() error {
+ operation := "Wait"
+ title := "HCSShim::Container::" + operation
+ logrus.Debugf(title+" id=%s", container.id)
+
+ err := waitForNotification(container.callbackNumber, hcsNotificationSystemExited, nil)
+ if err != nil {
+ return makeContainerError(container, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded id=%s", container.id)
+ return nil
+}
+
+// WaitTimeout synchronously waits for the container to terminate or the duration to elapse.
+// If the timeout expires, IsTimeout(err) == true
+func (container *container) WaitTimeout(timeout time.Duration) error {
+ operation := "WaitTimeout"
+ title := "HCSShim::Container::" + operation
+ logrus.Debugf(title+" id=%s", container.id)
+
+ err := waitForNotification(container.callbackNumber, hcsNotificationSystemExited, &timeout)
+ if err != nil {
+ return makeContainerError(container, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded id=%s", container.id)
+ return nil
+}
+
+func (container *container) properties(query string) (*ContainerProperties, error) {
+ var (
+ resultp *uint16
+ propertiesp *uint16
+ )
+ err := hcsGetComputeSystemProperties(container.handle, query, &propertiesp, &resultp)
+ err = processHcsResult(err, resultp)
+ if err != nil {
+ return nil, err
+ }
+
+ if propertiesp == nil {
+ return nil, ErrUnexpectedValue
+ }
+ propertiesRaw := convertAndFreeCoTaskMemBytes(propertiesp)
+ properties := &ContainerProperties{}
+ if err := json.Unmarshal(propertiesRaw, properties); err != nil {
+ return nil, err
+ }
+ return properties, nil
+}
+
+// HasPendingUpdates returns true if the container has updates pending to install
+func (container *container) HasPendingUpdates() (bool, error) {
+ container.handleLock.RLock()
+ defer container.handleLock.RUnlock()
+ operation := "HasPendingUpdates"
+ title := "HCSShim::Container::" + operation
+ logrus.Debugf(title+" id=%s", container.id)
+
+ if container.handle == 0 {
+ return false, makeContainerError(container, operation, "", ErrAlreadyClosed)
+ }
+
+ properties, err := container.properties(pendingUpdatesQuery)
+ if err != nil {
+ return false, makeContainerError(container, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded id=%s", container.id)
+ return properties.AreUpdatesPending, nil
+}
+
+// Statistics returns statistics for the container
+func (container *container) Statistics() (Statistics, error) {
+ container.handleLock.RLock()
+ defer container.handleLock.RUnlock()
+ operation := "Statistics"
+ title := "HCSShim::Container::" + operation
+ logrus.Debugf(title+" id=%s", container.id)
+
+ if container.handle == 0 {
+ return Statistics{}, makeContainerError(container, operation, "", ErrAlreadyClosed)
+ }
+
+ properties, err := container.properties(statisticsQuery)
+ if err != nil {
+ return Statistics{}, makeContainerError(container, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded id=%s", container.id)
+ return properties.Statistics, nil
+}
+
+// ProcessList returns an array of ProcessListItems for the container
+func (container *container) ProcessList() ([]ProcessListItem, error) {
+ container.handleLock.RLock()
+ defer container.handleLock.RUnlock()
+ operation := "ProcessList"
+ title := "HCSShim::Container::" + operation
+ logrus.Debugf(title+" id=%s", container.id)
+
+ if container.handle == 0 {
+ return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
+ }
+
+ properties, err := container.properties(processListQuery)
+ if err != nil {
+ return nil, makeContainerError(container, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded id=%s", container.id)
+ return properties.ProcessList, nil
+}
+
+// MappedVirtualDisks returns a map of the controllers and the disks mapped
+// to a container.
+//
+// Example of JSON returned by the query.
+//{
+// "Id":"1126e8d7d279c707a666972a15976371d365eaf622c02cea2c442b84f6f550a3_svm",
+// "SystemType":"Container",
+// "RuntimeOsType":"Linux",
+// "RuntimeId":"00000000-0000-0000-0000-000000000000",
+// "State":"Running",
+// "MappedVirtualDiskControllers":{
+// "0":{
+// "MappedVirtualDisks":{
+// "2":{
+// "HostPath":"C:\\lcow\\lcow\\scratch\\1126e8d7d279c707a666972a15976371d365eaf622c02cea2c442b84f6f550a3.vhdx",
+// "ContainerPath":"/mnt/gcs/LinuxServiceVM/scratch",
+// "Lun":2,
+// "CreateInUtilityVM":true
+// },
+// "3":{
+// "HostPath":"C:\\lcow\\lcow\\1126e8d7d279c707a666972a15976371d365eaf622c02cea2c442b84f6f550a3\\sandbox.vhdx",
+// "Lun":3,
+// "CreateInUtilityVM":true,
+// "AttachOnly":true
+// }
+// }
+// }
+// }
+//}
+func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) {
+ container.handleLock.RLock()
+ defer container.handleLock.RUnlock()
+ operation := "MappedVirtualDiskList"
+ title := "HCSShim::Container::" + operation
+ logrus.Debugf(title+" id=%s", container.id)
+
+ if container.handle == 0 {
+ return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
+ }
+
+ properties, err := container.properties(mappedVirtualDiskQuery)
+ if err != nil {
+ return nil, makeContainerError(container, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded id=%s", container.id)
+ return properties.MappedVirtualDiskControllers, nil
+}
+
+// Pause pauses the execution of the container. This feature is not enabled in TP5.
+func (container *container) Pause() error {
+ container.handleLock.RLock()
+ defer container.handleLock.RUnlock()
+ operation := "Pause"
+ title := "HCSShim::Container::" + operation
+ logrus.Debugf(title+" id=%s", container.id)
+
+ if container.handle == 0 {
+ return makeContainerError(container, operation, "", ErrAlreadyClosed)
+ }
+
+ var resultp *uint16
+ err := hcsPauseComputeSystem(container.handle, "", &resultp)
+ err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemPauseCompleted, &defaultTimeout)
+ if err != nil {
+ return makeContainerError(container, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded id=%s", container.id)
+ return nil
+}
+
+// Resume resumes the execution of the container. This feature is not enabled in TP5.
+func (container *container) Resume() error {
+ container.handleLock.RLock()
+ defer container.handleLock.RUnlock()
+ operation := "Resume"
+ title := "HCSShim::Container::" + operation
+ logrus.Debugf(title+" id=%s", container.id)
+
+ if container.handle == 0 {
+ return makeContainerError(container, operation, "", ErrAlreadyClosed)
+ }
+
+ var resultp *uint16
+ err := hcsResumeComputeSystem(container.handle, "", &resultp)
+ err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemResumeCompleted, &defaultTimeout)
+ if err != nil {
+ return makeContainerError(container, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded id=%s", container.id)
+ return nil
+}
+
+// CreateProcess launches a new process within the container.
+func (container *container) CreateProcess(c *ProcessConfig) (Process, error) {
+ container.handleLock.RLock()
+ defer container.handleLock.RUnlock()
+ operation := "CreateProcess"
+ title := "HCSShim::Container::" + operation
+ var (
+ processInfo hcsProcessInformation
+ processHandle hcsProcess
+ resultp *uint16
+ )
+
+ if container.handle == 0 {
+ return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
+ }
+
+ // If we are not emulating a console, ignore any console size passed to us
+ if !c.EmulateConsole {
+ c.ConsoleSize[0] = 0
+ c.ConsoleSize[1] = 0
+ }
+
+ configurationb, err := json.Marshal(c)
+ if err != nil {
+ return nil, makeContainerError(container, operation, "", err)
+ }
+
+ configuration := string(configurationb)
+ logrus.Debugf(title+" id=%s config=%s", container.id, configuration)
+
+ err = hcsCreateProcess(container.handle, configuration, &processInfo, &processHandle, &resultp)
+ err = processHcsResult(err, resultp)
+ if err != nil {
+ return nil, makeContainerError(container, operation, configuration, err)
+ }
+
+ process := &process{
+ handle: processHandle,
+ processID: int(processInfo.ProcessId),
+ container: container,
+ cachedPipes: &cachedPipes{
+ stdIn: processInfo.StdInput,
+ stdOut: processInfo.StdOutput,
+ stdErr: processInfo.StdError,
+ },
+ }
+
+ if err := process.registerCallback(); err != nil {
+ return nil, makeContainerError(container, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded id=%s processid=%d", container.id, process.processID)
+ return process, nil
+}
+
+// OpenProcess gets an interface to an existing process within the container.
+func (container *container) OpenProcess(pid int) (Process, error) {
+ container.handleLock.RLock()
+ defer container.handleLock.RUnlock()
+ operation := "OpenProcess"
+ title := "HCSShim::Container::" + operation
+ logrus.Debugf(title+" id=%s, processid=%d", container.id, pid)
+ var (
+ processHandle hcsProcess
+ resultp *uint16
+ )
+
+ if container.handle == 0 {
+ return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
+ }
+
+ err := hcsOpenProcess(container.handle, uint32(pid), &processHandle, &resultp)
+ err = processHcsResult(err, resultp)
+ if err != nil {
+ return nil, makeContainerError(container, operation, "", err)
+ }
+
+ process := &process{
+ handle: processHandle,
+ processID: pid,
+ container: container,
+ }
+
+ if err := process.registerCallback(); err != nil {
+ return nil, makeContainerError(container, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded id=%s processid=%s", container.id, process.processID)
+ return process, nil
+}
+
+// Close cleans up any state associated with the container but does not terminate or wait for it.
+func (container *container) Close() error {
+ container.handleLock.Lock()
+ defer container.handleLock.Unlock()
+ operation := "Close"
+ title := "HCSShim::Container::" + operation
+ logrus.Debugf(title+" id=%s", container.id)
+
+ // Don't double free this
+ if container.handle == 0 {
+ return nil
+ }
+
+ if err := container.unregisterCallback(); err != nil {
+ return makeContainerError(container, operation, "", err)
+ }
+
+ if err := hcsCloseComputeSystem(container.handle); err != nil {
+ return makeContainerError(container, operation, "", err)
+ }
+
+ container.handle = 0
+
+ logrus.Debugf(title+" succeeded id=%s", container.id)
+ return nil
+}
+
+func (container *container) registerCallback() error {
+ context := &notifcationWatcherContext{
+ channels: newChannels(),
+ }
+
+ callbackMapLock.Lock()
+ callbackNumber := nextCallback
+ nextCallback++
+ callbackMap[callbackNumber] = context
+ callbackMapLock.Unlock()
+
+ var callbackHandle hcsCallback
+ err := hcsRegisterComputeSystemCallback(container.handle, notificationWatcherCallback, callbackNumber, &callbackHandle)
+ if err != nil {
+ return err
+ }
+ context.handle = callbackHandle
+ container.callbackNumber = callbackNumber
+
+ return nil
+}
+
+func (container *container) unregisterCallback() error {
+ callbackNumber := container.callbackNumber
+
+ callbackMapLock.RLock()
+ context := callbackMap[callbackNumber]
+ callbackMapLock.RUnlock()
+
+ if context == nil {
+ return nil
+ }
+
+ handle := context.handle
+
+ if handle == 0 {
+ return nil
+ }
+
+ // hcsUnregisterComputeSystemCallback has its own syncronization
+ // to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
+ err := hcsUnregisterComputeSystemCallback(handle)
+ if err != nil {
+ return err
+ }
+
+ closeChannels(context.channels)
+
+ callbackMapLock.Lock()
+ callbackMap[callbackNumber] = nil
+ callbackMapLock.Unlock()
+
+ handle = 0
+
+ return nil
+}
+
+// Modifies the System by sending a request to HCS
+func (container *container) Modify(config *ResourceModificationRequestResponse) error {
+ container.handleLock.RLock()
+ defer container.handleLock.RUnlock()
+ operation := "Modify"
+ title := "HCSShim::Container::" + operation
+
+ if container.handle == 0 {
+ return makeContainerError(container, operation, "", ErrAlreadyClosed)
+ }
+
+ requestJSON, err := json.Marshal(config)
+ if err != nil {
+ return err
+ }
+
+ requestString := string(requestJSON)
+ logrus.Debugf(title+" id=%s request=%s", container.id, requestString)
+
+ var resultp *uint16
+ err = hcsModifyComputeSystem(container.handle, requestString, &resultp)
+ err = processHcsResult(err, resultp)
+ if err != nil {
+ return makeContainerError(container, operation, "", err)
+ }
+ logrus.Debugf(title+" succeeded id=%s", container.id)
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/createlayer.go b/vendor/github.com/Microsoft/hcsshim/createlayer.go
new file mode 100644
index 000000000..035d9c394
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/createlayer.go
@@ -0,0 +1,27 @@
+package hcsshim
+
+import "github.com/sirupsen/logrus"
+
+// CreateLayer creates a new, empty, read-only layer on the filesystem based on
+// the parent layer provided.
+func CreateLayer(info DriverInfo, id, parent string) error {
+ title := "hcsshim::CreateLayer "
+ logrus.Debugf(title+"Flavour %d ID %s parent %s", info.Flavour, id, parent)
+
+ // Convert info to API calling convention
+ infop, err := convertDriverInfo(info)
+ if err != nil {
+ logrus.Error(err)
+ return err
+ }
+
+ err = createLayer(&infop, id, parent)
+ if err != nil {
+ err = makeErrorf(err, title, "id=%s parent=%s flavour=%d", id, parent, info.Flavour)
+ logrus.Error(err)
+ return err
+ }
+
+ logrus.Debugf(title+" - succeeded id=%s parent=%s flavour=%d", id, parent, info.Flavour)
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/createsandboxlayer.go b/vendor/github.com/Microsoft/hcsshim/createsandboxlayer.go
new file mode 100644
index 000000000..7a6a8854c
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/createsandboxlayer.go
@@ -0,0 +1,35 @@
+package hcsshim
+
+import "github.com/sirupsen/logrus"
+
+// CreateSandboxLayer creates and populates new read-write layer for use by a container.
+// This requires both the id of the direct parent layer, as well as the full list
+// of paths to all parent layers up to the base (and including the direct parent
+// whose id was provided).
+func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error {
+ title := "hcsshim::CreateSandboxLayer "
+ logrus.Debugf(title+"layerId %s parentId %s", layerId, parentId)
+
+ // Generate layer descriptors
+ layers, err := layerPathsToDescriptors(parentLayerPaths)
+ if err != nil {
+ return err
+ }
+
+ // Convert info to API calling convention
+ infop, err := convertDriverInfo(info)
+ if err != nil {
+ logrus.Error(err)
+ return err
+ }
+
+ err = createSandboxLayer(&infop, layerId, parentId, layers)
+ if err != nil {
+ err = makeErrorf(err, title, "layerId=%s parentId=%s", layerId, parentId)
+ logrus.Error(err)
+ return err
+ }
+
+ logrus.Debugf(title+"- succeeded layerId=%s parentId=%s", layerId, parentId)
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/deactivatelayer.go b/vendor/github.com/Microsoft/hcsshim/deactivatelayer.go
new file mode 100644
index 000000000..fd785030f
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/deactivatelayer.go
@@ -0,0 +1,26 @@
+package hcsshim
+
+import "github.com/sirupsen/logrus"
+
+// DeactivateLayer will dismount a layer that was mounted via ActivateLayer.
+func DeactivateLayer(info DriverInfo, id string) error {
+ title := "hcsshim::DeactivateLayer "
+ logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
+
+ // Convert info to API calling convention
+ infop, err := convertDriverInfo(info)
+ if err != nil {
+ logrus.Error(err)
+ return err
+ }
+
+ err = deactivateLayer(&infop, id)
+ if err != nil {
+ err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
+ logrus.Error(err)
+ return err
+ }
+
+ logrus.Debugf(title+"succeeded flavour=%d id=%s", info.Flavour, id)
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/destroylayer.go b/vendor/github.com/Microsoft/hcsshim/destroylayer.go
new file mode 100644
index 000000000..b1e3b89fc
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/destroylayer.go
@@ -0,0 +1,27 @@
+package hcsshim
+
+import "github.com/sirupsen/logrus"
+
+// DestroyLayer will remove the on-disk files representing the layer with the given
+// id, including that layer's containing folder, if any.
+func DestroyLayer(info DriverInfo, id string) error {
+ title := "hcsshim::DestroyLayer "
+ logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
+
+ // Convert info to API calling convention
+ infop, err := convertDriverInfo(info)
+ if err != nil {
+ logrus.Error(err)
+ return err
+ }
+
+ err = destroyLayer(&infop, id)
+ if err != nil {
+ err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
+ logrus.Error(err)
+ return err
+ }
+
+ logrus.Debugf(title+"succeeded flavour=%d id=%s", info.Flavour, id)
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/errors.go b/vendor/github.com/Microsoft/hcsshim/errors.go
new file mode 100644
index 000000000..d2f9cc8bd
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/errors.go
@@ -0,0 +1,239 @@
+package hcsshim
+
+import (
+ "errors"
+ "fmt"
+ "syscall"
+)
+
+var (
+ // ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists
+ ErrComputeSystemDoesNotExist = syscall.Errno(0xc037010e)
+
+ // ErrElementNotFound is an error encountered when the object being referenced does not exist
+ ErrElementNotFound = syscall.Errno(0x490)
+
+ // ErrElementNotFound is an error encountered when the object being referenced does not exist
+ ErrNotSupported = syscall.Errno(0x32)
+
+ // ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported
+ // decimal -2147024883 / hex 0x8007000d
+ ErrInvalidData = syscall.Errno(0xd)
+
+ // ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed
+ ErrHandleClose = errors.New("hcsshim: the handle generating this notification has been closed")
+
+ // ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method
+ ErrAlreadyClosed = errors.New("hcsshim: the handle has already been closed")
+
+ // ErrInvalidNotificationType is an error encountered when an invalid notification type is used
+ ErrInvalidNotificationType = errors.New("hcsshim: invalid notification type")
+
+ // ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation
+ ErrInvalidProcessState = errors.New("the process is in an invalid state for the attempted operation")
+
+ // ErrTimeout is an error encountered when waiting on a notification times out
+ ErrTimeout = errors.New("hcsshim: timeout waiting for notification")
+
+ // ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for
+ // a different expected notification
+ ErrUnexpectedContainerExit = errors.New("unexpected container exit")
+
+ // ErrUnexpectedProcessAbort is the error encountered when communication with the compute service
+ // is lost while waiting for a notification
+ ErrUnexpectedProcessAbort = errors.New("lost communication with compute service")
+
+ // ErrUnexpectedValue is an error encountered when hcs returns an invalid value
+ ErrUnexpectedValue = errors.New("unexpected value returned from hcs")
+
+ // ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container
+ ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110)
+
+ // ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously
+ ErrVmcomputeOperationPending = syscall.Errno(0xC0370103)
+
+ // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation
+ ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105)
+
+ // ErrProcNotFound is an error encountered when the the process cannot be found
+ ErrProcNotFound = syscall.Errno(0x7f)
+
+ // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2
+ // builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3.
+ ErrVmcomputeOperationAccessIsDenied = syscall.Errno(0x5)
+
+ // ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management
+ ErrVmcomputeInvalidJSON = syscall.Errno(0xc037010d)
+
+ // ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message
+ ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b)
+
+ // ErrNotSupported is an error encountered when hcs doesn't support the request
+ ErrPlatformNotSupported = errors.New("unsupported platform request")
+)
+
+// ProcessError is an error encountered in HCS during an operation on a Process object
+type ProcessError struct {
+ Process *process
+ Operation string
+ ExtraInfo string
+ Err error
+}
+
+// ContainerError is an error encountered in HCS during an operation on a Container object
+type ContainerError struct {
+ Container *container
+ Operation string
+ ExtraInfo string
+ Err error
+}
+
+func (e *ContainerError) Error() string {
+ if e == nil {
+ return "<nil>"
+ }
+
+ if e.Container == nil {
+ return "unexpected nil container for error: " + e.Err.Error()
+ }
+
+ s := "container " + e.Container.id
+
+ if e.Operation != "" {
+ s += " encountered an error during " + e.Operation
+ }
+
+ switch e.Err.(type) {
+ case nil:
+ break
+ case syscall.Errno:
+ s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, win32FromError(e.Err))
+ default:
+ s += fmt.Sprintf(": %s", e.Err.Error())
+ }
+
+ if e.ExtraInfo != "" {
+ s += " extra info: " + e.ExtraInfo
+ }
+
+ return s
+}
+
+func makeContainerError(container *container, operation string, extraInfo string, err error) error {
+ // Don't double wrap errors
+ if _, ok := err.(*ContainerError); ok {
+ return err
+ }
+ containerError := &ContainerError{Container: container, Operation: operation, ExtraInfo: extraInfo, Err: err}
+ return containerError
+}
+
+func (e *ProcessError) Error() string {
+ if e == nil {
+ return "<nil>"
+ }
+
+ if e.Process == nil {
+ return "Unexpected nil process for error: " + e.Err.Error()
+ }
+
+ s := fmt.Sprintf("process %d", e.Process.processID)
+
+ if e.Process.container != nil {
+ s += " in container " + e.Process.container.id
+ }
+
+ if e.Operation != "" {
+ s += " encountered an error during " + e.Operation
+ }
+
+ switch e.Err.(type) {
+ case nil:
+ break
+ case syscall.Errno:
+ s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, win32FromError(e.Err))
+ default:
+ s += fmt.Sprintf(": %s", e.Err.Error())
+ }
+
+ return s
+}
+
+func makeProcessError(process *process, operation string, extraInfo string, err error) error {
+ // Don't double wrap errors
+ if _, ok := err.(*ProcessError); ok {
+ return err
+ }
+ processError := &ProcessError{Process: process, Operation: operation, ExtraInfo: extraInfo, Err: err}
+ return processError
+}
+
+// IsNotExist checks if an error is caused by the Container or Process not existing.
+// Note: Currently, ErrElementNotFound can mean that a Process has either
+// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
+// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
+func IsNotExist(err error) bool {
+ err = getInnerError(err)
+ return err == ErrComputeSystemDoesNotExist ||
+ err == ErrElementNotFound ||
+ err == ErrProcNotFound
+}
+
+// IsAlreadyClosed checks if an error is caused by the Container or Process having been
+// already closed by a call to the Close() method.
+func IsAlreadyClosed(err error) bool {
+ err = getInnerError(err)
+ return err == ErrAlreadyClosed
+}
+
+// IsPending returns a boolean indicating whether the error is that
+// the requested operation is being completed in the background.
+func IsPending(err error) bool {
+ err = getInnerError(err)
+ return err == ErrVmcomputeOperationPending
+}
+
+// IsTimeout returns a boolean indicating whether the error is caused by
+// a timeout waiting for the operation to complete.
+func IsTimeout(err error) bool {
+ err = getInnerError(err)
+ return err == ErrTimeout
+}
+
+// IsAlreadyStopped returns a boolean indicating whether the error is caused by
+// a Container or Process being already stopped.
+// Note: Currently, ErrElementNotFound can mean that a Process has either
+// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
+// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
+func IsAlreadyStopped(err error) bool {
+ err = getInnerError(err)
+ return err == ErrVmcomputeAlreadyStopped ||
+ err == ErrElementNotFound ||
+ err == ErrProcNotFound
+}
+
+// IsNotSupported returns a boolean indicating whether the error is caused by
+// unsupported platform requests
+// Note: Currently Unsupported platform requests can be mean either
+// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage
+// is thrown from the Platform
+func IsNotSupported(err error) bool {
+ err = getInnerError(err)
+ // If Platform doesn't recognize or support the request sent, below errors are seen
+ return err == ErrVmcomputeInvalidJSON ||
+ err == ErrInvalidData ||
+ err == ErrNotSupported ||
+ err == ErrVmcomputeUnknownMessage
+}
+
+func getInnerError(err error) error {
+ switch pe := err.(type) {
+ case nil:
+ return nil
+ case *ContainerError:
+ err = pe.Err
+ case *ProcessError:
+ err = pe.Err
+ }
+ return err
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/expandsandboxsize.go b/vendor/github.com/Microsoft/hcsshim/expandsandboxsize.go
new file mode 100644
index 000000000..6946c6a84
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/expandsandboxsize.go
@@ -0,0 +1,26 @@
+package hcsshim
+
+import "github.com/sirupsen/logrus"
+
+// ExpandSandboxSize expands the size of a layer to at least size bytes.
+func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error {
+ title := "hcsshim::ExpandSandboxSize "
+ logrus.Debugf(title+"layerId=%s size=%d", layerId, size)
+
+ // Convert info to API calling convention
+ infop, err := convertDriverInfo(info)
+ if err != nil {
+ logrus.Error(err)
+ return err
+ }
+
+ err = expandSandboxSize(&infop, layerId, size)
+ if err != nil {
+ err = makeErrorf(err, title, "layerId=%s size=%d", layerId, size)
+ logrus.Error(err)
+ return err
+ }
+
+ logrus.Debugf(title+"- succeeded layerId=%s size=%d", layerId, size)
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/exportlayer.go b/vendor/github.com/Microsoft/hcsshim/exportlayer.go
new file mode 100644
index 000000000..d7025f20b
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/exportlayer.go
@@ -0,0 +1,156 @@
+package hcsshim
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "syscall"
+
+ "github.com/Microsoft/go-winio"
+ "github.com/sirupsen/logrus"
+)
+
+// ExportLayer will create a folder at exportFolderPath and fill that folder with
+// the transport format version of the layer identified by layerId. This transport
+// format includes any metadata required for later importing the layer (using
+// ImportLayer), and requires the full list of parent layer paths in order to
+// perform the export.
+func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error {
+ title := "hcsshim::ExportLayer "
+ logrus.Debugf(title+"flavour %d layerId %s folder %s", info.Flavour, layerId, exportFolderPath)
+
+ // Generate layer descriptors
+ layers, err := layerPathsToDescriptors(parentLayerPaths)
+ if err != nil {
+ return err
+ }
+
+ // Convert info to API calling convention
+ infop, err := convertDriverInfo(info)
+ if err != nil {
+ logrus.Error(err)
+ return err
+ }
+
+ err = exportLayer(&infop, layerId, exportFolderPath, layers)
+ if err != nil {
+ err = makeErrorf(err, title, "layerId=%s flavour=%d folder=%s", layerId, info.Flavour, exportFolderPath)
+ logrus.Error(err)
+ return err
+ }
+
+ logrus.Debugf(title+"succeeded flavour=%d layerId=%s folder=%s", info.Flavour, layerId, exportFolderPath)
+ return nil
+}
+
+type LayerReader interface {
+ Next() (string, int64, *winio.FileBasicInfo, error)
+ Read(b []byte) (int, error)
+ Close() error
+}
+
+// FilterLayerReader provides an interface for extracting the contents of an on-disk layer.
+type FilterLayerReader struct {
+ context uintptr
+}
+
+// Next reads the next available file from a layer, ensuring that parent directories are always read
+// before child files and directories.
+//
+// Next returns the file's relative path, size, and basic file metadata. Read() should be used to
+// extract a Win32 backup stream with the remainder of the metadata and the data.
+func (r *FilterLayerReader) Next() (string, int64, *winio.FileBasicInfo, error) {
+ var fileNamep *uint16
+ fileInfo := &winio.FileBasicInfo{}
+ var deleted uint32
+ var fileSize int64
+ err := exportLayerNext(r.context, &fileNamep, fileInfo, &fileSize, &deleted)
+ if err != nil {
+ if err == syscall.ERROR_NO_MORE_FILES {
+ err = io.EOF
+ } else {
+ err = makeError(err, "ExportLayerNext", "")
+ }
+ return "", 0, nil, err
+ }
+ fileName := convertAndFreeCoTaskMemString(fileNamep)
+ if deleted != 0 {
+ fileInfo = nil
+ }
+ if fileName[0] == '\\' {
+ fileName = fileName[1:]
+ }
+ return fileName, fileSize, fileInfo, nil
+}
+
+// Read reads from the current file's Win32 backup stream.
+func (r *FilterLayerReader) Read(b []byte) (int, error) {
+ var bytesRead uint32
+ err := exportLayerRead(r.context, b, &bytesRead)
+ if err != nil {
+ return 0, makeError(err, "ExportLayerRead", "")
+ }
+ if bytesRead == 0 {
+ return 0, io.EOF
+ }
+ return int(bytesRead), nil
+}
+
+// Close frees resources associated with the layer reader. It will return an
+// error if there was an error while reading the layer or of the layer was not
+// completely read.
+func (r *FilterLayerReader) Close() (err error) {
+ if r.context != 0 {
+ err = exportLayerEnd(r.context)
+ if err != nil {
+ err = makeError(err, "ExportLayerEnd", "")
+ }
+ r.context = 0
+ }
+ return
+}
+
+// NewLayerReader returns a new layer reader for reading the contents of an on-disk layer.
+// The caller must have taken the SeBackupPrivilege privilege
+// to call this and any methods on the resulting LayerReader.
+func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) {
+ if procExportLayerBegin.Find() != nil {
+ // The new layer reader is not available on this Windows build. Fall back to the
+ // legacy export code path.
+ path, err := ioutil.TempDir("", "hcs")
+ if err != nil {
+ return nil, err
+ }
+ err = ExportLayer(info, layerID, path, parentLayerPaths)
+ if err != nil {
+ os.RemoveAll(path)
+ return nil, err
+ }
+ return &legacyLayerReaderWrapper{newLegacyLayerReader(path)}, nil
+ }
+
+ layers, err := layerPathsToDescriptors(parentLayerPaths)
+ if err != nil {
+ return nil, err
+ }
+ infop, err := convertDriverInfo(info)
+ if err != nil {
+ return nil, err
+ }
+ r := &FilterLayerReader{}
+ err = exportLayerBegin(&infop, layerID, layers, &r.context)
+ if err != nil {
+ return nil, makeError(err, "ExportLayerBegin", "")
+ }
+ return r, err
+}
+
+type legacyLayerReaderWrapper struct {
+ *legacyLayerReader
+}
+
+func (r *legacyLayerReaderWrapper) Close() error {
+ err := r.legacyLayerReader.Close()
+ os.RemoveAll(r.root)
+ return err
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/getlayermountpath.go b/vendor/github.com/Microsoft/hcsshim/getlayermountpath.go
new file mode 100644
index 000000000..89f8079d0
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/getlayermountpath.go
@@ -0,0 +1,55 @@
+package hcsshim
+
+import (
+ "syscall"
+
+ "github.com/sirupsen/logrus"
+)
+
+// GetLayerMountPath will look for a mounted layer with the given id and return
+// the path at which that layer can be accessed. This path may be a volume path
+// if the layer is a mounted read-write layer, otherwise it is expected to be the
+// folder path at which the layer is stored.
+func GetLayerMountPath(info DriverInfo, id string) (string, error) {
+ title := "hcsshim::GetLayerMountPath "
+ logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
+
+ // Convert info to API calling convention
+ infop, err := convertDriverInfo(info)
+ if err != nil {
+ logrus.Error(err)
+ return "", err
+ }
+
+ var mountPathLength uintptr
+ mountPathLength = 0
+
+ // Call the procedure itself.
+ logrus.Debugf("Calling proc (1)")
+ err = getLayerMountPath(&infop, id, &mountPathLength, nil)
+ if err != nil {
+ err = makeErrorf(err, title, "(first call) id=%s flavour=%d", id, info.Flavour)
+ logrus.Error(err)
+ return "", err
+ }
+
+ // Allocate a mount path of the returned length.
+ if mountPathLength == 0 {
+ return "", nil
+ }
+ mountPathp := make([]uint16, mountPathLength)
+ mountPathp[0] = 0
+
+ // Call the procedure again
+ logrus.Debugf("Calling proc (2)")
+ err = getLayerMountPath(&infop, id, &mountPathLength, &mountPathp[0])
+ if err != nil {
+ err = makeErrorf(err, title, "(second call) id=%s flavour=%d", id, info.Flavour)
+ logrus.Error(err)
+ return "", err
+ }
+
+ path := syscall.UTF16ToString(mountPathp[0:])
+ logrus.Debugf(title+"succeeded flavour=%d id=%s path=%s", info.Flavour, id, path)
+ return path, nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/getsharedbaseimages.go b/vendor/github.com/Microsoft/hcsshim/getsharedbaseimages.go
new file mode 100644
index 000000000..05d3d9532
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/getsharedbaseimages.go
@@ -0,0 +1,22 @@
+package hcsshim
+
+import "github.com/sirupsen/logrus"
+
+// GetSharedBaseImages will enumerate the images stored in the common central
+// image store and return descriptive info about those images for the purpose
+// of registering them with the graphdriver, graph, and tagstore.
+func GetSharedBaseImages() (imageData string, err error) {
+ title := "hcsshim::GetSharedBaseImages "
+
+ logrus.Debugf("Calling proc")
+ var buffer *uint16
+ err = getBaseImages(&buffer)
+ if err != nil {
+ err = makeError(err, title, "")
+ logrus.Error(err)
+ return
+ }
+ imageData = convertAndFreeCoTaskMemString(buffer)
+ logrus.Debugf(title+" - succeeded output=%s", imageData)
+ return
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/guid.go b/vendor/github.com/Microsoft/hcsshim/guid.go
new file mode 100644
index 000000000..620aba123
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/guid.go
@@ -0,0 +1,19 @@
+package hcsshim
+
+import (
+ "crypto/sha1"
+ "fmt"
+)
+
+type GUID [16]byte
+
+func NewGUID(source string) *GUID {
+ h := sha1.Sum([]byte(source))
+ var g GUID
+ copy(g[0:], h[0:16])
+ return &g
+}
+
+func (g *GUID) ToString() string {
+ return fmt.Sprintf("%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x-%02x", g[3], g[2], g[1], g[0], g[5], g[4], g[7], g[6], g[8:10], g[10:])
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/hcsshim.go b/vendor/github.com/Microsoft/hcsshim/hcsshim.go
new file mode 100644
index 000000000..236ba1fa3
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/hcsshim.go
@@ -0,0 +1,166 @@
+// Shim for the Host Compute Service (HCS) to manage Windows Server
+// containers and Hyper-V containers.
+
+package hcsshim
+
+import (
+ "fmt"
+ "syscall"
+ "unsafe"
+
+ "github.com/sirupsen/logrus"
+)
+
+//go:generate go run mksyscall_windows.go -output zhcsshim.go hcsshim.go
+
+//sys coTaskMemFree(buffer unsafe.Pointer) = ole32.CoTaskMemFree
+//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId
+
+//sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer?
+//sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer?
+//sys createLayer(info *driverInfo, id string, parent string) (hr error) = vmcompute.CreateLayer?
+//sys createSandboxLayer(info *driverInfo, id string, parent string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CreateSandboxLayer?
+//sys expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) = vmcompute.ExpandSandboxSize?
+//sys deactivateLayer(info *driverInfo, id string) (hr error) = vmcompute.DeactivateLayer?
+//sys destroyLayer(info *driverInfo, id string) (hr error) = vmcompute.DestroyLayer?
+//sys exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ExportLayer?
+//sys getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) = vmcompute.GetLayerMountPath?
+//sys getBaseImages(buffer **uint16) (hr error) = vmcompute.GetBaseImages?
+//sys importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ImportLayer?
+//sys layerExists(info *driverInfo, id string, exists *uint32) (hr error) = vmcompute.LayerExists?
+//sys nameToGuid(name string, guid *GUID) (hr error) = vmcompute.NameToGuid?
+//sys prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.PrepareLayer?
+//sys unprepareLayer(info *driverInfo, id string) (hr error) = vmcompute.UnprepareLayer?
+//sys processBaseImage(path string) (hr error) = vmcompute.ProcessBaseImage?
+//sys processUtilityImage(path string) (hr error) = vmcompute.ProcessUtilityImage?
+
+//sys importLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ImportLayerBegin?
+//sys importLayerNext(context uintptr, fileName string, fileInfo *winio.FileBasicInfo) (hr error) = vmcompute.ImportLayerNext?
+//sys importLayerWrite(context uintptr, buffer []byte) (hr error) = vmcompute.ImportLayerWrite?
+//sys importLayerEnd(context uintptr) (hr error) = vmcompute.ImportLayerEnd?
+
+//sys exportLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ExportLayerBegin?
+//sys exportLayerNext(context uintptr, fileName **uint16, fileInfo *winio.FileBasicInfo, fileSize *int64, deleted *uint32) (hr error) = vmcompute.ExportLayerNext?
+//sys exportLayerRead(context uintptr, buffer []byte, bytesRead *uint32) (hr error) = vmcompute.ExportLayerRead?
+//sys exportLayerEnd(context uintptr) (hr error) = vmcompute.ExportLayerEnd?
+
+//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems?
+//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem?
+//sys hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem?
+//sys hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem?
+//sys hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem?
+//sys hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem?
+//sys hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem?
+//sys hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem?
+//sys hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem?
+//sys hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties?
+//sys hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem?
+//sys hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback?
+//sys hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback?
+
+//sys hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess?
+//sys hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess?
+//sys hcsCloseProcess(process hcsProcess) (hr error) = vmcompute.HcsCloseProcess?
+//sys hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess?
+//sys hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo?
+//sys hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties?
+//sys hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess?
+//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties?
+//sys hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback?
+//sys hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback?
+
+//sys hcsModifyServiceSettings(settings string, result **uint16) (hr error) = vmcompute.HcsModifyServiceSettings?
+
+//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall?
+
+const (
+ // Specific user-visible exit codes
+ WaitErrExecFailed = 32767
+
+ ERROR_GEN_FAILURE = syscall.Errno(31)
+ ERROR_SHUTDOWN_IN_PROGRESS = syscall.Errno(1115)
+ WSAEINVAL = syscall.Errno(10022)
+
+ // Timeout on wait calls
+ TimeoutInfinite = 0xFFFFFFFF
+)
+
+type HcsError struct {
+ title string
+ rest string
+ Err error
+}
+
+type hcsSystem syscall.Handle
+type hcsProcess syscall.Handle
+type hcsCallback syscall.Handle
+
+type hcsProcessInformation struct {
+ ProcessId uint32
+ Reserved uint32
+ StdInput syscall.Handle
+ StdOutput syscall.Handle
+ StdError syscall.Handle
+}
+
+func makeError(err error, title, rest string) error {
+ // Pass through DLL errors directly since they do not originate from HCS.
+ if _, ok := err.(*syscall.DLLError); ok {
+ return err
+ }
+ return &HcsError{title, rest, err}
+}
+
+func makeErrorf(err error, title, format string, a ...interface{}) error {
+ return makeError(err, title, fmt.Sprintf(format, a...))
+}
+
+func win32FromError(err error) uint32 {
+ if herr, ok := err.(*HcsError); ok {
+ return win32FromError(herr.Err)
+ }
+ if code, ok := err.(syscall.Errno); ok {
+ return uint32(code)
+ }
+ return uint32(ERROR_GEN_FAILURE)
+}
+
+func win32FromHresult(hr uintptr) uintptr {
+ if hr&0x1fff0000 == 0x00070000 {
+ return hr & 0xffff
+ }
+ return hr
+}
+
+func (e *HcsError) Error() string {
+ s := e.title
+ if len(s) > 0 && s[len(s)-1] != ' ' {
+ s += " "
+ }
+ s += fmt.Sprintf("failed in Win32: %s (0x%x)", e.Err, win32FromError(e.Err))
+ if e.rest != "" {
+ if e.rest[0] != ' ' {
+ s += " "
+ }
+ s += e.rest
+ }
+ return s
+}
+
+func convertAndFreeCoTaskMemString(buffer *uint16) string {
+ str := syscall.UTF16ToString((*[1 << 30]uint16)(unsafe.Pointer(buffer))[:])
+ coTaskMemFree(unsafe.Pointer(buffer))
+ return str
+}
+
+func convertAndFreeCoTaskMemBytes(buffer *uint16) []byte {
+ return []byte(convertAndFreeCoTaskMemString(buffer))
+}
+
+func processHcsResult(err error, resultp *uint16) error {
+ if resultp != nil {
+ result := convertAndFreeCoTaskMemString(resultp)
+ logrus.Debugf("Result: %s", result)
+ }
+ return err
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go
new file mode 100644
index 000000000..92afc0c24
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go
@@ -0,0 +1,318 @@
+package hcsshim
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+
+ "github.com/sirupsen/logrus"
+)
+
+// HNSEndpoint represents a network endpoint in HNS
+type HNSEndpoint struct {
+ Id string `json:"ID,omitempty"`
+ Name string `json:",omitempty"`
+ VirtualNetwork string `json:",omitempty"`
+ VirtualNetworkName string `json:",omitempty"`
+ Policies []json.RawMessage `json:",omitempty"`
+ MacAddress string `json:",omitempty"`
+ IPAddress net.IP `json:",omitempty"`
+ DNSSuffix string `json:",omitempty"`
+ DNSServerList string `json:",omitempty"`
+ GatewayAddress string `json:",omitempty"`
+ EnableInternalDNS bool `json:",omitempty"`
+ DisableICC bool `json:",omitempty"`
+ PrefixLength uint8 `json:",omitempty"`
+ IsRemoteEndpoint bool `json:",omitempty"`
+}
+
+//SystemType represents the type of the system on which actions are done
+type SystemType string
+
+// SystemType const
+const (
+ ContainerType SystemType = "Container"
+ VirtualMachineType SystemType = "VirtualMachine"
+ HostType SystemType = "Host"
+)
+
+// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system
+// Supported resource types are Network and Request Types are Add/Remove
+type EndpointAttachDetachRequest struct {
+ ContainerID string `json:"ContainerId,omitempty"`
+ SystemType SystemType `json:"SystemType"`
+ CompartmentID uint16 `json:"CompartmentId,omitempty"`
+ VirtualNICName string `json:"VirtualNicName,omitempty"`
+}
+
+// EndpointResquestResponse is object to get the endpoint request response
+type EndpointResquestResponse struct {
+ Success bool
+ Error string
+}
+
+// HNSEndpointRequest makes a HNS call to modify/query a network endpoint
+func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) {
+ endpoint := &HNSEndpoint{}
+ err := hnsCall(method, "/endpoints/"+path, request, &endpoint)
+ if err != nil {
+ return nil, err
+ }
+
+ return endpoint, nil
+}
+
+// HNSListEndpointRequest makes a HNS call to query the list of available endpoints
+func HNSListEndpointRequest() ([]HNSEndpoint, error) {
+ var endpoint []HNSEndpoint
+ err := hnsCall("GET", "/endpoints/", "", &endpoint)
+ if err != nil {
+ return nil, err
+ }
+
+ return endpoint, nil
+}
+
+// HotAttachEndpoint makes a HCS Call to attach the endpoint to the container
+func HotAttachEndpoint(containerID string, endpointID string) error {
+ return modifyNetworkEndpoint(containerID, endpointID, Add)
+}
+
+// HotDetachEndpoint makes a HCS Call to detach the endpoint from the container
+func HotDetachEndpoint(containerID string, endpointID string) error {
+ return modifyNetworkEndpoint(containerID, endpointID, Remove)
+}
+
+// ModifyContainer corresponding to the container id, by sending a request
+func modifyContainer(id string, request *ResourceModificationRequestResponse) error {
+ container, err := OpenContainer(id)
+ if err != nil {
+ if IsNotExist(err) {
+ return ErrComputeSystemDoesNotExist
+ }
+ return getInnerError(err)
+ }
+ defer container.Close()
+ err = container.Modify(request)
+ if err != nil {
+ if IsNotSupported(err) {
+ return ErrPlatformNotSupported
+ }
+ return getInnerError(err)
+ }
+
+ return nil
+}
+
+func modifyNetworkEndpoint(containerID string, endpointID string, request RequestType) error {
+ requestMessage := &ResourceModificationRequestResponse{
+ Resource: Network,
+ Request: request,
+ Data: endpointID,
+ }
+ err := modifyContainer(containerID, requestMessage)
+
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// GetHNSEndpointByID get the Endpoint by ID
+func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) {
+ return HNSEndpointRequest("GET", endpointID, "")
+}
+
+// GetHNSEndpointByName gets the endpoint filtered by Name
+func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) {
+ hnsResponse, err := HNSListEndpointRequest()
+ if err != nil {
+ return nil, err
+ }
+ for _, hnsEndpoint := range hnsResponse {
+ if hnsEndpoint.Name == endpointName {
+ return &hnsEndpoint, nil
+ }
+ }
+ return nil, fmt.Errorf("Endpoint %v not found", endpointName)
+}
+
+// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods
+func (endpoint *HNSEndpoint) Create() (*HNSEndpoint, error) {
+ operation := "Create"
+ title := "HCSShim::HNSEndpoint::" + operation
+ logrus.Debugf(title+" id=%s", endpoint.Id)
+
+ jsonString, err := json.Marshal(endpoint)
+ if err != nil {
+ return nil, err
+ }
+ return HNSEndpointRequest("POST", "", string(jsonString))
+}
+
+// Delete Endpoint by sending EndpointRequest to HNS
+func (endpoint *HNSEndpoint) Delete() (*HNSEndpoint, error) {
+ operation := "Delete"
+ title := "HCSShim::HNSEndpoint::" + operation
+ logrus.Debugf(title+" id=%s", endpoint.Id)
+
+ return HNSEndpointRequest("DELETE", endpoint.Id, "")
+}
+
+// Update Endpoint
+func (endpoint *HNSEndpoint) Update() (*HNSEndpoint, error) {
+ operation := "Update"
+ title := "HCSShim::HNSEndpoint::" + operation
+ logrus.Debugf(title+" id=%s", endpoint.Id)
+ jsonString, err := json.Marshal(endpoint)
+ if err != nil {
+ return nil, err
+ }
+ err = hnsCall("POST", "/endpoints/"+endpoint.Id, string(jsonString), &endpoint)
+
+ return endpoint, err
+}
+
+// ContainerHotAttach attaches an endpoint to a running container
+func (endpoint *HNSEndpoint) ContainerHotAttach(containerID string) error {
+ operation := "ContainerHotAttach"
+ title := "HCSShim::HNSEndpoint::" + operation
+ logrus.Debugf(title+" id=%s, containerId=%s", endpoint.Id, containerID)
+
+ return modifyNetworkEndpoint(containerID, endpoint.Id, Add)
+}
+
+// ContainerHotDetach detaches an endpoint from a running container
+func (endpoint *HNSEndpoint) ContainerHotDetach(containerID string) error {
+ operation := "ContainerHotDetach"
+ title := "HCSShim::HNSEndpoint::" + operation
+ logrus.Debugf(title+" id=%s, containerId=%s", endpoint.Id, containerID)
+
+ return modifyNetworkEndpoint(containerID, endpoint.Id, Remove)
+}
+
+// ApplyACLPolicy applies Acl Policy on the Endpoint
+func (endpoint *HNSEndpoint) ApplyACLPolicy(policy *ACLPolicy) error {
+ operation := "ApplyACLPolicy"
+ title := "HCSShim::HNSEndpoint::" + operation
+ logrus.Debugf(title+" id=%s", endpoint.Id)
+
+ jsonString, err := json.Marshal(policy)
+ if err != nil {
+ return err
+ }
+ endpoint.Policies[0] = jsonString
+ _, err = endpoint.Update()
+ return err
+}
+
+// ContainerAttach attaches an endpoint to container
+func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error {
+ operation := "ContainerAttach"
+ title := "HCSShim::HNSEndpoint::" + operation
+ logrus.Debugf(title+" id=%s", endpoint.Id)
+
+ requestMessage := &EndpointAttachDetachRequest{
+ ContainerID: containerID,
+ CompartmentID: compartmentID,
+ SystemType: ContainerType,
+ }
+ response := &EndpointResquestResponse{}
+ jsonString, err := json.Marshal(requestMessage)
+ if err != nil {
+ return err
+ }
+ return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
+}
+
+// ContainerDetach detaches an endpoint from container
+func (endpoint *HNSEndpoint) ContainerDetach(containerID string) error {
+ operation := "ContainerDetach"
+ title := "HCSShim::HNSEndpoint::" + operation
+ logrus.Debugf(title+" id=%s", endpoint.Id)
+
+ requestMessage := &EndpointAttachDetachRequest{
+ ContainerID: containerID,
+ SystemType: ContainerType,
+ }
+ response := &EndpointResquestResponse{}
+
+ jsonString, err := json.Marshal(requestMessage)
+ if err != nil {
+ return err
+ }
+ return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
+}
+
+// HostAttach attaches a nic on the host
+func (endpoint *HNSEndpoint) HostAttach(compartmentID uint16) error {
+ operation := "HostAttach"
+ title := "HCSShim::HNSEndpoint::" + operation
+ logrus.Debugf(title+" id=%s", endpoint.Id)
+ requestMessage := &EndpointAttachDetachRequest{
+ CompartmentID: compartmentID,
+ SystemType: HostType,
+ }
+ response := &EndpointResquestResponse{}
+
+ jsonString, err := json.Marshal(requestMessage)
+ if err != nil {
+ return err
+ }
+ return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
+
+}
+
+// HostDetach detaches a nic on the host
+func (endpoint *HNSEndpoint) HostDetach() error {
+ operation := "HostDetach"
+ title := "HCSShim::HNSEndpoint::" + operation
+ logrus.Debugf(title+" id=%s", endpoint.Id)
+ requestMessage := &EndpointAttachDetachRequest{
+ SystemType: HostType,
+ }
+ response := &EndpointResquestResponse{}
+
+ jsonString, err := json.Marshal(requestMessage)
+ if err != nil {
+ return err
+ }
+ return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
+}
+
+// VirtualMachineNICAttach attaches a endpoint to a virtual machine
+func (endpoint *HNSEndpoint) VirtualMachineNICAttach(virtualMachineNICName string) error {
+ operation := "VirtualMachineNicAttach"
+ title := "HCSShim::HNSEndpoint::" + operation
+ logrus.Debugf(title+" id=%s", endpoint.Id)
+ requestMessage := &EndpointAttachDetachRequest{
+ VirtualNICName: virtualMachineNICName,
+ SystemType: VirtualMachineType,
+ }
+ response := &EndpointResquestResponse{}
+
+ jsonString, err := json.Marshal(requestMessage)
+ if err != nil {
+ return err
+ }
+ return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
+}
+
+// VirtualMachineNICDetach detaches a endpoint from a virtual machine
+func (endpoint *HNSEndpoint) VirtualMachineNICDetach() error {
+ operation := "VirtualMachineNicDetach"
+ title := "HCSShim::HNSEndpoint::" + operation
+ logrus.Debugf(title+" id=%s", endpoint.Id)
+
+ requestMessage := &EndpointAttachDetachRequest{
+ SystemType: VirtualMachineType,
+ }
+ response := &EndpointResquestResponse{}
+
+ jsonString, err := json.Marshal(requestMessage)
+ if err != nil {
+ return err
+ }
+ return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/hnsfuncs.go b/vendor/github.com/Microsoft/hcsshim/hnsfuncs.go
new file mode 100644
index 000000000..2c1b979ae
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/hnsfuncs.go
@@ -0,0 +1,40 @@
+package hcsshim
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/sirupsen/logrus"
+)
+
+func hnsCall(method, path, request string, returnResponse interface{}) error {
+ var responseBuffer *uint16
+ logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request)
+
+ err := _hnsCall(method, path, request, &responseBuffer)
+ if err != nil {
+ return makeError(err, "hnsCall ", "")
+ }
+ response := convertAndFreeCoTaskMemString(responseBuffer)
+
+ hnsresponse := &hnsResponse{}
+ if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil {
+ return err
+ }
+
+ if !hnsresponse.Success {
+ return fmt.Errorf("HNS failed with error : %s", hnsresponse.Error)
+ }
+
+ if len(hnsresponse.Output) == 0 {
+ return nil
+ }
+
+ logrus.Debugf("Network Response : %s", hnsresponse.Output)
+ err = json.Unmarshal(hnsresponse.Output, returnResponse)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go b/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go
new file mode 100644
index 000000000..3345bfa3f
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go
@@ -0,0 +1,142 @@
+package hcsshim
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+
+ "github.com/sirupsen/logrus"
+)
+
+// Subnet is assoicated with a network and represents a list
+// of subnets available to the network
+type Subnet struct {
+ AddressPrefix string `json:",omitempty"`
+ GatewayAddress string `json:",omitempty"`
+ Policies []json.RawMessage `json:",omitempty"`
+}
+
+// MacPool is assoicated with a network and represents a list
+// of macaddresses available to the network
+type MacPool struct {
+ StartMacAddress string `json:",omitempty"`
+ EndMacAddress string `json:",omitempty"`
+}
+
+// HNSNetwork represents a network in HNS
+type HNSNetwork struct {
+ Id string `json:"ID,omitempty"`
+ Name string `json:",omitempty"`
+ Type string `json:",omitempty"`
+ NetworkAdapterName string `json:",omitempty"`
+ SourceMac string `json:",omitempty"`
+ Policies []json.RawMessage `json:",omitempty"`
+ MacPools []MacPool `json:",omitempty"`
+ Subnets []Subnet `json:",omitempty"`
+ DNSSuffix string `json:",omitempty"`
+ DNSServerList string `json:",omitempty"`
+ DNSServerCompartment uint32 `json:",omitempty"`
+ ManagementIP string `json:",omitempty"`
+ AutomaticDNS bool `json:",omitempty"`
+}
+
+type hnsNetworkResponse struct {
+ Success bool
+ Error string
+ Output HNSNetwork
+}
+
+type hnsResponse struct {
+ Success bool
+ Error string
+ Output json.RawMessage
+}
+
+// HNSNetworkRequest makes a call into HNS to update/query a single network
+func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) {
+ var network HNSNetwork
+ err := hnsCall(method, "/networks/"+path, request, &network)
+ if err != nil {
+ return nil, err
+ }
+
+ return &network, nil
+}
+
+// HNSListNetworkRequest makes a HNS call to query the list of available networks
+func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) {
+ var network []HNSNetwork
+ err := hnsCall(method, "/networks/"+path, request, &network)
+ if err != nil {
+ return nil, err
+ }
+
+ return network, nil
+}
+
+// GetHNSNetworkByID
+func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) {
+ return HNSNetworkRequest("GET", networkID, "")
+}
+
+// GetHNSNetworkName filtered by Name
+func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) {
+ hsnnetworks, err := HNSListNetworkRequest("GET", "", "")
+ if err != nil {
+ return nil, err
+ }
+ for _, hnsnetwork := range hsnnetworks {
+ if hnsnetwork.Name == networkName {
+ return &hnsnetwork, nil
+ }
+ }
+ return nil, fmt.Errorf("Network %v not found", networkName)
+}
+
+// Create Network by sending NetworkRequest to HNS.
+func (network *HNSNetwork) Create() (*HNSNetwork, error) {
+ operation := "Create"
+ title := "HCSShim::HNSNetwork::" + operation
+ logrus.Debugf(title+" id=%s", network.Id)
+
+ jsonString, err := json.Marshal(network)
+ if err != nil {
+ return nil, err
+ }
+ return HNSNetworkRequest("POST", "", string(jsonString))
+}
+
+// Delete Network by sending NetworkRequest to HNS
+func (network *HNSNetwork) Delete() (*HNSNetwork, error) {
+ operation := "Delete"
+ title := "HCSShim::HNSNetwork::" + operation
+ logrus.Debugf(title+" id=%s", network.Id)
+
+ return HNSNetworkRequest("DELETE", network.Id, "")
+}
+
+// Creates an endpoint on the Network.
+func (network *HNSNetwork) NewEndpoint(ipAddress net.IP, macAddress net.HardwareAddr) *HNSEndpoint {
+ return &HNSEndpoint{
+ VirtualNetwork: network.Id,
+ IPAddress: ipAddress,
+ MacAddress: string(macAddress),
+ }
+}
+
+func (network *HNSNetwork) CreateEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) {
+ operation := "CreateEndpoint"
+ title := "HCSShim::HNSNetwork::" + operation
+ logrus.Debugf(title+" id=%s, endpointId=%s", network.Id, endpoint.Id)
+
+ endpoint.VirtualNetwork = network.Id
+ return endpoint.Create()
+}
+
+func (network *HNSNetwork) CreateRemoteEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) {
+ operation := "CreateRemoteEndpoint"
+ title := "HCSShim::HNSNetwork::" + operation
+ logrus.Debugf(title+" id=%s", network.Id)
+ endpoint.IsRemoteEndpoint = true
+ return network.CreateEndpoint(endpoint)
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/hnspolicy.go
new file mode 100644
index 000000000..ecfbf0eda
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/hnspolicy.go
@@ -0,0 +1,95 @@
+package hcsshim
+
+// Type of Request Support in ModifySystem
+type PolicyType string
+
+// RequestType const
+const (
+ Nat PolicyType = "NAT"
+ ACL PolicyType = "ACL"
+ PA PolicyType = "PA"
+ VLAN PolicyType = "VLAN"
+ VSID PolicyType = "VSID"
+ VNet PolicyType = "VNET"
+ L2Driver PolicyType = "L2Driver"
+ Isolation PolicyType = "Isolation"
+ QOS PolicyType = "QOS"
+ OutboundNat PolicyType = "OutBoundNAT"
+ ExternalLoadBalancer PolicyType = "ELB"
+ Route PolicyType = "ROUTE"
+)
+
+type NatPolicy struct {
+ Type PolicyType `json:"Type"`
+ Protocol string
+ InternalPort uint16
+ ExternalPort uint16
+}
+
+type QosPolicy struct {
+ Type PolicyType `json:"Type"`
+ MaximumOutgoingBandwidthInBytes uint64
+}
+
+type IsolationPolicy struct {
+ Type PolicyType `json:"Type"`
+ VLAN uint
+ VSID uint
+ InDefaultIsolation bool
+}
+
+type VlanPolicy struct {
+ Type PolicyType `json:"Type"`
+ VLAN uint
+}
+
+type VsidPolicy struct {
+ Type PolicyType `json:"Type"`
+ VSID uint
+}
+
+type PaPolicy struct {
+ Type PolicyType `json:"Type"`
+ PA string `json:"PA"`
+}
+
+type OutboundNatPolicy struct {
+ Policy
+ VIP string `json:"VIP,omitempty"`
+ Exceptions []string `json:"ExceptionList,omitempty"`
+}
+
+type ActionType string
+type DirectionType string
+type RuleType string
+
+const (
+ Allow ActionType = "Allow"
+ Block ActionType = "Block"
+
+ In DirectionType = "In"
+ Out DirectionType = "Out"
+
+ Host RuleType = "Host"
+ Switch RuleType = "Switch"
+)
+
+type ACLPolicy struct {
+ Type PolicyType `json:"Type"`
+ Protocol uint16
+ InternalPort uint16
+ Action ActionType
+ Direction DirectionType
+ LocalAddress string
+ RemoteAddress string
+ LocalPort uint16
+ RemotePort uint16
+ RuleType RuleType `json:"RuleType,omitempty"`
+
+ Priority uint16
+ ServiceName string
+}
+
+type Policy struct {
+ Type PolicyType `json:"Type"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go b/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go
new file mode 100644
index 000000000..15653b4f4
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go
@@ -0,0 +1,196 @@
+package hcsshim
+
+import (
+ "encoding/json"
+
+ "github.com/sirupsen/logrus"
+)
+
+// RoutePolicy is a structure defining schema for Route based Policy
+type RoutePolicy struct {
+ Policy
+ DestinationPrefix string `json:"DestinationPrefix,omitempty"`
+ NextHop string `json:"NextHop,omitempty"`
+ EncapEnabled bool `json:"NeedEncap,omitempty"`
+}
+
+// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy
+type ELBPolicy struct {
+ LBPolicy
+ SourceVIP string `json:"SourceVIP,omitempty"`
+ VIPs []string `json:"VIPs,omitempty"`
+ ILB bool `json:"ILB,omitempty"`
+}
+
+// LBPolicy is a structure defining schema for LoadBalancing based Policy
+type LBPolicy struct {
+ Policy
+ Protocol uint16 `json:"Protocol,omitempty"`
+ InternalPort uint16
+ ExternalPort uint16
+}
+
+// PolicyList is a structure defining schema for Policy list request
+type PolicyList struct {
+ ID string `json:"ID,omitempty"`
+ EndpointReferences []string `json:"References,omitempty"`
+ Policies []json.RawMessage `json:"Policies,omitempty"`
+}
+
+// HNSPolicyListRequest makes a call into HNS to update/query a single network
+func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) {
+ var policy PolicyList
+ err := hnsCall(method, "/policylists/"+path, request, &policy)
+ if err != nil {
+ return nil, err
+ }
+
+ return &policy, nil
+}
+
+// HNSListPolicyListRequest gets all the policy list
+func HNSListPolicyListRequest() ([]PolicyList, error) {
+ var plist []PolicyList
+ err := hnsCall("GET", "/policylists/", "", &plist)
+ if err != nil {
+ return nil, err
+ }
+
+ return plist, nil
+}
+
+// PolicyListRequest makes a HNS call to modify/query a network policy list
+func PolicyListRequest(method, path, request string) (*PolicyList, error) {
+ policylist := &PolicyList{}
+ err := hnsCall(method, "/policylists/"+path, request, &policylist)
+ if err != nil {
+ return nil, err
+ }
+
+ return policylist, nil
+}
+
+// GetPolicyListByID get the policy list by ID
+func GetPolicyListByID(policyListID string) (*PolicyList, error) {
+ return PolicyListRequest("GET", policyListID, "")
+}
+
+// Create PolicyList by sending PolicyListRequest to HNS.
+func (policylist *PolicyList) Create() (*PolicyList, error) {
+ operation := "Create"
+ title := "HCSShim::PolicyList::" + operation
+ logrus.Debugf(title+" id=%s", policylist.ID)
+ jsonString, err := json.Marshal(policylist)
+ if err != nil {
+ return nil, err
+ }
+ return PolicyListRequest("POST", "", string(jsonString))
+}
+
+// Delete deletes PolicyList
+func (policylist *PolicyList) Delete() (*PolicyList, error) {
+ operation := "Delete"
+ title := "HCSShim::PolicyList::" + operation
+ logrus.Debugf(title+" id=%s", policylist.ID)
+
+ return PolicyListRequest("DELETE", policylist.ID, "")
+}
+
+// AddEndpoint add an endpoint to a Policy List
+func (policylist *PolicyList) AddEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) {
+ operation := "AddEndpoint"
+ title := "HCSShim::PolicyList::" + operation
+ logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id)
+
+ _, err := policylist.Delete()
+ if err != nil {
+ return nil, err
+ }
+
+ // Add Endpoint to the Existing List
+ policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id)
+
+ return policylist.Create()
+}
+
+// RemoveEndpoint removes an endpoint from the Policy List
+func (policylist *PolicyList) RemoveEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) {
+ operation := "RemoveEndpoint"
+ title := "HCSShim::PolicyList::" + operation
+ logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id)
+
+ _, err := policylist.Delete()
+ if err != nil {
+ return nil, err
+ }
+
+ elementToRemove := "/endpoints/" + endpoint.Id
+
+ var references []string
+
+ for _, endpointReference := range policylist.EndpointReferences {
+ if endpointReference == elementToRemove {
+ continue
+ }
+ references = append(references, endpointReference)
+ }
+ policylist.EndpointReferences = references
+ return policylist.Create()
+}
+
+// AddLoadBalancer policy list for the specified endpoints
+func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) {
+ operation := "AddLoadBalancer"
+ title := "HCSShim::PolicyList::" + operation
+ logrus.Debugf(title+" Vip:%s", vip)
+
+ policylist := &PolicyList{}
+
+ elbPolicy := &ELBPolicy{
+ VIPs: []string{vip},
+ ILB: isILB,
+ }
+ elbPolicy.Type = ExternalLoadBalancer
+ elbPolicy.Protocol = protocol
+ elbPolicy.InternalPort = internalPort
+ elbPolicy.ExternalPort = externalPort
+
+ for _, endpoint := range endpoints {
+ policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id)
+ }
+
+ jsonString, err := json.Marshal(elbPolicy)
+ if err != nil {
+ return nil, err
+ }
+ policylist.Policies = append(policylist.Policies, jsonString)
+ return policylist.Create()
+}
+
+// AddRoute adds route policy list for the specified endpoints
+func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) {
+ operation := "AddRoute"
+ title := "HCSShim::PolicyList::" + operation
+ logrus.Debugf(title+" destinationPrefix:%s", destinationPrefix)
+
+ policylist := &PolicyList{}
+
+ rPolicy := &RoutePolicy{
+ DestinationPrefix: destinationPrefix,
+ NextHop: nextHop,
+ EncapEnabled: encapEnabled,
+ }
+ rPolicy.Type = Route
+
+ for _, endpoint := range endpoints {
+ policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id)
+ }
+
+ jsonString, err := json.Marshal(rPolicy)
+ if err != nil {
+ return nil, err
+ }
+
+ policylist.Policies = append(policylist.Policies, jsonString)
+ return policylist.Create()
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/importlayer.go b/vendor/github.com/Microsoft/hcsshim/importlayer.go
new file mode 100644
index 000000000..3aed14376
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/importlayer.go
@@ -0,0 +1,212 @@
+package hcsshim
+
+import (
+ "errors"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/Microsoft/go-winio"
+ "github.com/sirupsen/logrus"
+)
+
+// ImportLayer will take the contents of the folder at importFolderPath and import
+// that into a layer with the id layerId. Note that in order to correctly populate
+// the layer and interperet the transport format, all parent layers must already
+// be present on the system at the paths provided in parentLayerPaths.
+func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error {
+ title := "hcsshim::ImportLayer "
+ logrus.Debugf(title+"flavour %d layerId %s folder %s", info.Flavour, layerID, importFolderPath)
+
+ // Generate layer descriptors
+ layers, err := layerPathsToDescriptors(parentLayerPaths)
+ if err != nil {
+ return err
+ }
+
+ // Convert info to API calling convention
+ infop, err := convertDriverInfo(info)
+ if err != nil {
+ logrus.Error(err)
+ return err
+ }
+
+ err = importLayer(&infop, layerID, importFolderPath, layers)
+ if err != nil {
+ err = makeErrorf(err, title, "layerId=%s flavour=%d folder=%s", layerID, info.Flavour, importFolderPath)
+ logrus.Error(err)
+ return err
+ }
+
+ logrus.Debugf(title+"succeeded flavour=%d layerId=%s folder=%s", info.Flavour, layerID, importFolderPath)
+ return nil
+}
+
+// LayerWriter is an interface that supports writing a new container image layer.
+type LayerWriter interface {
+ // Add adds a file to the layer with given metadata.
+ Add(name string, fileInfo *winio.FileBasicInfo) error
+ // AddLink adds a hard link to the layer. The target must already have been added.
+ AddLink(name string, target string) error
+ // Remove removes a file that was present in a parent layer from the layer.
+ Remove(name string) error
+ // Write writes data to the current file. The data must be in the format of a Win32
+ // backup stream.
+ Write(b []byte) (int, error)
+ // Close finishes the layer writing process and releases any resources.
+ Close() error
+}
+
+// FilterLayerWriter provides an interface to write the contents of a layer to the file system.
+type FilterLayerWriter struct {
+ context uintptr
+}
+
+// Add adds a file or directory to the layer. The file's parent directory must have already been added.
+//
+// name contains the file's relative path. fileInfo contains file times and file attributes; the rest
+// of the file metadata and the file data must be written as a Win32 backup stream to the Write() method.
+// winio.BackupStreamWriter can be used to facilitate this.
+func (w *FilterLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error {
+ if name[0] != '\\' {
+ name = `\` + name
+ }
+ err := importLayerNext(w.context, name, fileInfo)
+ if err != nil {
+ return makeError(err, "ImportLayerNext", "")
+ }
+ return nil
+}
+
+// AddLink adds a hard link to the layer. The target of the link must have already been added.
+func (w *FilterLayerWriter) AddLink(name string, target string) error {
+ return errors.New("hard links not yet supported")
+}
+
+// Remove removes a file from the layer. The file must have been present in the parent layer.
+//
+// name contains the file's relative path.
+func (w *FilterLayerWriter) Remove(name string) error {
+ if name[0] != '\\' {
+ name = `\` + name
+ }
+ err := importLayerNext(w.context, name, nil)
+ if err != nil {
+ return makeError(err, "ImportLayerNext", "")
+ }
+ return nil
+}
+
+// Write writes more backup stream data to the current file.
+func (w *FilterLayerWriter) Write(b []byte) (int, error) {
+ err := importLayerWrite(w.context, b)
+ if err != nil {
+ err = makeError(err, "ImportLayerWrite", "")
+ return 0, err
+ }
+ return len(b), err
+}
+
+// Close completes the layer write operation. The error must be checked to ensure that the
+// operation was successful.
+func (w *FilterLayerWriter) Close() (err error) {
+ if w.context != 0 {
+ err = importLayerEnd(w.context)
+ if err != nil {
+ err = makeError(err, "ImportLayerEnd", "")
+ }
+ w.context = 0
+ }
+ return
+}
+
+type legacyLayerWriterWrapper struct {
+ *legacyLayerWriter
+ info DriverInfo
+ layerID string
+ path string
+ parentLayerPaths []string
+}
+
+func (r *legacyLayerWriterWrapper) Close() error {
+ defer os.RemoveAll(r.root)
+ err := r.legacyLayerWriter.Close()
+ if err != nil {
+ return err
+ }
+
+ // Use the original path here because ImportLayer does not support long paths for the source in TP5.
+ // But do use a long path for the destination to work around another bug with directories
+ // with MAX_PATH - 12 < length < MAX_PATH.
+ info := r.info
+ fullPath, err := makeLongAbsPath(filepath.Join(info.HomeDir, r.layerID))
+ if err != nil {
+ return err
+ }
+
+ info.HomeDir = ""
+ if err = ImportLayer(info, fullPath, r.path, r.parentLayerPaths); err != nil {
+ return err
+ }
+ // Add any hard links that were collected.
+ for _, lnk := range r.PendingLinks {
+ if err = os.Remove(lnk.Path); err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err = os.Link(lnk.Target, lnk.Path); err != nil {
+ return err
+ }
+ }
+ // Prepare the utility VM for use if one is present in the layer.
+ if r.HasUtilityVM {
+ err = ProcessUtilityVMImage(filepath.Join(fullPath, "UtilityVM"))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// NewLayerWriter returns a new layer writer for creating a layer on disk.
+// The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges
+// to call this and any methods on the resulting LayerWriter.
+func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) {
+ if len(parentLayerPaths) == 0 {
+ // This is a base layer. It gets imported differently.
+ return &baseLayerWriter{
+ root: filepath.Join(info.HomeDir, layerID),
+ }, nil
+ }
+
+ if procImportLayerBegin.Find() != nil {
+ // The new layer reader is not available on this Windows build. Fall back to the
+ // legacy export code path.
+ path, err := ioutil.TempDir("", "hcs")
+ if err != nil {
+ return nil, err
+ }
+ return &legacyLayerWriterWrapper{
+ legacyLayerWriter: newLegacyLayerWriter(path, parentLayerPaths, filepath.Join(info.HomeDir, layerID)),
+ info: info,
+ layerID: layerID,
+ path: path,
+ parentLayerPaths: parentLayerPaths,
+ }, nil
+ }
+ layers, err := layerPathsToDescriptors(parentLayerPaths)
+ if err != nil {
+ return nil, err
+ }
+
+ infop, err := convertDriverInfo(info)
+ if err != nil {
+ return nil, err
+ }
+
+ w := &FilterLayerWriter{}
+ err = importLayerBegin(&infop, layerID, layers, &w.context)
+ if err != nil {
+ return nil, makeError(err, "ImportLayerStart", "")
+ }
+ return w, nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/interface.go b/vendor/github.com/Microsoft/hcsshim/interface.go
new file mode 100644
index 000000000..9fc7852e4
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/interface.go
@@ -0,0 +1,187 @@
+package hcsshim
+
+import (
+ "encoding/json"
+ "io"
+ "time"
+)
+
+// ProcessConfig is used as both the input of Container.CreateProcess
+// and to convert the parameters to JSON for passing onto the HCS
+type ProcessConfig struct {
+ ApplicationName string `json:",omitempty"`
+ CommandLine string `json:",omitempty"`
+ CommandArgs []string `json:",omitempty"` // Used by Linux Containers on Windows
+ User string `json:",omitempty"`
+ WorkingDirectory string `json:",omitempty"`
+ Environment map[string]string `json:",omitempty"`
+ EmulateConsole bool `json:",omitempty"`
+ CreateStdInPipe bool `json:",omitempty"`
+ CreateStdOutPipe bool `json:",omitempty"`
+ CreateStdErrPipe bool `json:",omitempty"`
+ ConsoleSize [2]uint `json:",omitempty"`
+ CreateInUtilityVm bool `json:",omitempty"` // Used by Linux Containers on Windows
+ OCISpecification *json.RawMessage `json:",omitempty"` // Used by Linux Containers on Windows
+}
+
+type Layer struct {
+ ID string
+ Path string
+}
+
+type MappedDir struct {
+ HostPath string
+ ContainerPath string
+ ReadOnly bool
+ BandwidthMaximum uint64
+ IOPSMaximum uint64
+}
+
+type MappedPipe struct {
+ HostPath string
+ ContainerPipeName string
+}
+
+type HvRuntime struct {
+ ImagePath string `json:",omitempty"`
+ SkipTemplate bool `json:",omitempty"`
+ LinuxInitrdFile string `json:",omitempty"` // File under ImagePath on host containing an initrd image for starting a Linux utility VM
+ LinuxKernelFile string `json:",omitempty"` // File under ImagePath on host containing a kernel for starting a Linux utility VM
+ LinuxBootParameters string `json:",omitempty"` // Additional boot parameters for starting a Linux Utility VM in initrd mode
+ BootSource string `json:",omitempty"` // "Vhd" for Linux Utility VM booting from VHD
+ WritableBootSource bool `json:",omitempty"` // Linux Utility VM booting from VHD
+}
+
+type MappedVirtualDisk struct {
+ HostPath string `json:",omitempty"` // Path to VHD on the host
+ ContainerPath string // Platform-specific mount point path in the container
+ CreateInUtilityVM bool `json:",omitempty"`
+ ReadOnly bool `json:",omitempty"`
+ Cache string `json:",omitempty"` // "" (Unspecified); "Disabled"; "Enabled"; "Private"; "PrivateAllowSharing"
+ AttachOnly bool `json:",omitempty:`
+}
+
+// ContainerConfig is used as both the input of CreateContainer
+// and to convert the parameters to JSON for passing onto the HCS
+type ContainerConfig struct {
+ SystemType string // HCS requires this to be hard-coded to "Container"
+ Name string // Name of the container. We use the docker ID.
+ Owner string `json:",omitempty"` // The management platform that created this container
+ VolumePath string `json:",omitempty"` // Windows volume path for scratch space. Used by Windows Server Containers only. Format \\?\\Volume{GUID}
+ IgnoreFlushesDuringBoot bool `json:",omitempty"` // Optimization hint for container startup in Windows
+ LayerFolderPath string `json:",omitempty"` // Where the layer folders are located. Used by Windows Server Containers only. Format %root%\windowsfilter\containerID
+ Layers []Layer // List of storage layers. Required for Windows Server and Hyper-V Containers. Format ID=GUID;Path=%root%\windowsfilter\layerID
+ Credentials string `json:",omitempty"` // Credentials information
+ ProcessorCount uint32 `json:",omitempty"` // Number of processors to assign to the container.
+ ProcessorWeight uint64 `json:",omitempty"` // CPU shares (relative weight to other containers with cpu shares). Range is from 1 to 10000. A value of 0 results in default shares.
+ ProcessorMaximum int64 `json:",omitempty"` // Specifies the portion of processor cycles that this container can use as a percentage times 100. Range is from 1 to 10000. A value of 0 results in no limit.
+ StorageIOPSMaximum uint64 `json:",omitempty"` // Maximum Storage IOPS
+ StorageBandwidthMaximum uint64 `json:",omitempty"` // Maximum Storage Bandwidth in bytes per second
+ StorageSandboxSize uint64 `json:",omitempty"` // Size in bytes that the container system drive should be expanded to if smaller
+ MemoryMaximumInMB int64 `json:",omitempty"` // Maximum memory available to the container in Megabytes
+ HostName string `json:",omitempty"` // Hostname
+ MappedDirectories []MappedDir `json:",omitempty"` // List of mapped directories (volumes/mounts)
+ MappedPipes []MappedPipe `json:",omitempty"` // List of mapped Windows named pipes
+ HvPartition bool // True if it a Hyper-V Container
+ NetworkSharedContainerName string `json:",omitempty"` // Name (ID) of the container that we will share the network stack with.
+ EndpointList []string `json:",omitempty"` // List of networking endpoints to be attached to container
+ HvRuntime *HvRuntime `json:",omitempty"` // Hyper-V container settings. Used by Hyper-V containers only. Format ImagePath=%root%\BaseLayerID\UtilityVM
+ Servicing bool `json:",omitempty"` // True if this container is for servicing
+ AllowUnqualifiedDNSQuery bool `json:",omitempty"` // True to allow unqualified DNS name resolution
+ DNSSearchList string `json:",omitempty"` // Comma seperated list of DNS suffixes to use for name resolution
+ ContainerType string `json:",omitempty"` // "Linux" for Linux containers on Windows. Omitted otherwise.
+ TerminateOnLastHandleClosed bool `json:",omitempty"` // Should HCS terminate the container once all handles have been closed
+ MappedVirtualDisks []MappedVirtualDisk `json:",omitempty"` // Array of virtual disks to mount at start
+}
+
+type ComputeSystemQuery struct {
+ IDs []string `json:"Ids,omitempty"`
+ Types []string `json:",omitempty"`
+ Names []string `json:",omitempty"`
+ Owners []string `json:",omitempty"`
+}
+
+// Container represents a created (but not necessarily running) container.
+type Container interface {
+ // Start synchronously starts the container.
+ Start() error
+
+ // Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds.
+ Shutdown() error
+
+ // Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds.
+ Terminate() error
+
+ // Waits synchronously waits for the container to shutdown or terminate.
+ Wait() error
+
+ // WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It
+ // returns false if timeout occurs.
+ WaitTimeout(time.Duration) error
+
+ // Pause pauses the execution of a container.
+ Pause() error
+
+ // Resume resumes the execution of a container.
+ Resume() error
+
+ // HasPendingUpdates returns true if the container has updates pending to install.
+ HasPendingUpdates() (bool, error)
+
+ // Statistics returns statistics for a container.
+ Statistics() (Statistics, error)
+
+ // ProcessList returns details for the processes in a container.
+ ProcessList() ([]ProcessListItem, error)
+
+ // MappedVirtualDisks returns virtual disks mapped to a utility VM, indexed by controller
+ MappedVirtualDisks() (map[int]MappedVirtualDiskController, error)
+
+ // CreateProcess launches a new process within the container.
+ CreateProcess(c *ProcessConfig) (Process, error)
+
+ // OpenProcess gets an interface to an existing process within the container.
+ OpenProcess(pid int) (Process, error)
+
+ // Close cleans up any state associated with the container but does not terminate or wait for it.
+ Close() error
+
+ // Modify the System
+ Modify(config *ResourceModificationRequestResponse) error
+}
+
+// Process represents a running or exited process.
+type Process interface {
+ // Pid returns the process ID of the process within the container.
+ Pid() int
+
+ // Kill signals the process to terminate but does not wait for it to finish terminating.
+ Kill() error
+
+ // Wait waits for the process to exit.
+ Wait() error
+
+ // WaitTimeout waits for the process to exit or the duration to elapse. It returns
+ // false if timeout occurs.
+ WaitTimeout(time.Duration) error
+
+ // ExitCode returns the exit code of the process. The process must have
+ // already terminated.
+ ExitCode() (int, error)
+
+ // ResizeConsole resizes the console of the process.
+ ResizeConsole(width, height uint16) error
+
+ // Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing
+ // these pipes does not close the underlying pipes; it should be possible to
+ // call this multiple times to get multiple interfaces.
+ Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error)
+
+ // CloseStdin closes the write side of the stdin pipe so that the process is
+ // notified on the read side that there is no more data in stdin.
+ CloseStdin() error
+
+ // Close cleans up any state associated with the process but does not kill
+ // or wait on it.
+ Close() error
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/layerexists.go b/vendor/github.com/Microsoft/hcsshim/layerexists.go
new file mode 100644
index 000000000..fe46f404c
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/layerexists.go
@@ -0,0 +1,30 @@
+package hcsshim
+
+import "github.com/sirupsen/logrus"
+
+// LayerExists will return true if a layer with the given id exists and is known
+// to the system.
+func LayerExists(info DriverInfo, id string) (bool, error) {
+ title := "hcsshim::LayerExists "
+ logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
+
+ // Convert info to API calling convention
+ infop, err := convertDriverInfo(info)
+ if err != nil {
+ logrus.Error(err)
+ return false, err
+ }
+
+ // Call the procedure itself.
+ var exists uint32
+
+ err = layerExists(&infop, id, &exists)
+ if err != nil {
+ err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
+ logrus.Error(err)
+ return false, err
+ }
+
+ logrus.Debugf(title+"succeeded flavour=%d id=%s exists=%d", info.Flavour, id, exists)
+ return exists != 0, nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/layerutils.go b/vendor/github.com/Microsoft/hcsshim/layerutils.go
new file mode 100644
index 000000000..c0e550377
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/layerutils.go
@@ -0,0 +1,111 @@
+package hcsshim
+
+// This file contains utility functions to support storage (graph) related
+// functionality.
+
+import (
+ "path/filepath"
+ "syscall"
+
+ "github.com/sirupsen/logrus"
+)
+
+/* To pass into syscall, we need a struct matching the following:
+enum GraphDriverType
+{
+ DiffDriver,
+ FilterDriver
+};
+
+struct DriverInfo {
+ GraphDriverType Flavour;
+ LPCWSTR HomeDir;
+};
+*/
+type DriverInfo struct {
+ Flavour int
+ HomeDir string
+}
+
+type driverInfo struct {
+ Flavour int
+ HomeDirp *uint16
+}
+
+func convertDriverInfo(info DriverInfo) (driverInfo, error) {
+ homedirp, err := syscall.UTF16PtrFromString(info.HomeDir)
+ if err != nil {
+ logrus.Debugf("Failed conversion of home to pointer for driver info: %s", err.Error())
+ return driverInfo{}, err
+ }
+
+ return driverInfo{
+ Flavour: info.Flavour,
+ HomeDirp: homedirp,
+ }, nil
+}
+
+/* To pass into syscall, we need a struct matching the following:
+typedef struct _WC_LAYER_DESCRIPTOR {
+
+ //
+ // The ID of the layer
+ //
+
+ GUID LayerId;
+
+ //
+ // Additional flags
+ //
+
+ union {
+ struct {
+ ULONG Reserved : 31;
+ ULONG Dirty : 1; // Created from sandbox as a result of snapshot
+ };
+ ULONG Value;
+ } Flags;
+
+ //
+ // Path to the layer root directory, null-terminated
+ //
+
+ PCWSTR Path;
+
+} WC_LAYER_DESCRIPTOR, *PWC_LAYER_DESCRIPTOR;
+*/
+type WC_LAYER_DESCRIPTOR struct {
+ LayerId GUID
+ Flags uint32
+ Pathp *uint16
+}
+
+func layerPathsToDescriptors(parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) {
+ // Array of descriptors that gets constructed.
+ var layers []WC_LAYER_DESCRIPTOR
+
+ for i := 0; i < len(parentLayerPaths); i++ {
+ // Create a layer descriptor, using the folder name
+ // as the source for a GUID LayerId
+ _, folderName := filepath.Split(parentLayerPaths[i])
+ g, err := NameToGuid(folderName)
+ if err != nil {
+ logrus.Debugf("Failed to convert name to guid %s", err)
+ return nil, err
+ }
+
+ p, err := syscall.UTF16PtrFromString(parentLayerPaths[i])
+ if err != nil {
+ logrus.Debugf("Failed conversion of parentLayerPath to pointer %s", err)
+ return nil, err
+ }
+
+ layers = append(layers, WC_LAYER_DESCRIPTOR{
+ LayerId: g,
+ Flags: 0,
+ Pathp: p,
+ })
+ }
+
+ return layers, nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/legacy.go b/vendor/github.com/Microsoft/hcsshim/legacy.go
new file mode 100644
index 000000000..c7f6073ac
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/legacy.go
@@ -0,0 +1,741 @@
+package hcsshim
+
+import (
+ "bufio"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/Microsoft/go-winio"
+)
+
+var errorIterationCanceled = errors.New("")
+
+var mutatedUtilityVMFiles = map[string]bool{
+ `EFI\Microsoft\Boot\BCD`: true,
+ `EFI\Microsoft\Boot\BCD.LOG`: true,
+ `EFI\Microsoft\Boot\BCD.LOG1`: true,
+ `EFI\Microsoft\Boot\BCD.LOG2`: true,
+}
+
+const (
+ filesPath = `Files`
+ hivesPath = `Hives`
+ utilityVMPath = `UtilityVM`
+ utilityVMFilesPath = `UtilityVM\Files`
+)
+
+func openFileOrDir(path string, mode uint32, createDisposition uint32) (file *os.File, err error) {
+ return winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createDisposition)
+}
+
+func makeLongAbsPath(path string) (string, error) {
+ if strings.HasPrefix(path, `\\?\`) || strings.HasPrefix(path, `\\.\`) {
+ return path, nil
+ }
+ if !filepath.IsAbs(path) {
+ absPath, err := filepath.Abs(path)
+ if err != nil {
+ return "", err
+ }
+ path = absPath
+ }
+ if strings.HasPrefix(path, `\\`) {
+ return `\\?\UNC\` + path[2:], nil
+ }
+ return `\\?\` + path, nil
+}
+
+func hasPathPrefix(p, prefix string) bool {
+ return strings.HasPrefix(p, prefix) && len(p) > len(prefix) && p[len(prefix)] == '\\'
+}
+
+type fileEntry struct {
+ path string
+ fi os.FileInfo
+ err error
+}
+
+type legacyLayerReader struct {
+ root string
+ result chan *fileEntry
+ proceed chan bool
+ currentFile *os.File
+ backupReader *winio.BackupFileReader
+}
+
+// newLegacyLayerReader returns a new LayerReader that can read the Windows
+// container layer transport format from disk.
+func newLegacyLayerReader(root string) *legacyLayerReader {
+ r := &legacyLayerReader{
+ root: root,
+ result: make(chan *fileEntry),
+ proceed: make(chan bool),
+ }
+ go r.walk()
+ return r
+}
+
+func readTombstones(path string) (map[string]([]string), error) {
+ tf, err := os.Open(filepath.Join(path, "tombstones.txt"))
+ if err != nil {
+ return nil, err
+ }
+ defer tf.Close()
+ s := bufio.NewScanner(tf)
+ if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" {
+ return nil, errors.New("Invalid tombstones file")
+ }
+
+ ts := make(map[string]([]string))
+ for s.Scan() {
+ t := filepath.Join(filesPath, s.Text()[1:]) // skip leading `\`
+ dir := filepath.Dir(t)
+ ts[dir] = append(ts[dir], t)
+ }
+ if err = s.Err(); err != nil {
+ return nil, err
+ }
+
+ return ts, nil
+}
+
+func (r *legacyLayerReader) walkUntilCancelled() error {
+ root, err := makeLongAbsPath(r.root)
+ if err != nil {
+ return err
+ }
+
+ r.root = root
+ ts, err := readTombstones(r.root)
+ if err != nil {
+ return err
+ }
+
+ err = filepath.Walk(r.root, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if path == r.root || path == filepath.Join(r.root, "tombstones.txt") || strings.HasSuffix(path, ".$wcidirs$") {
+ return nil
+ }
+
+ r.result <- &fileEntry{path, info, nil}
+ if !<-r.proceed {
+ return errorIterationCanceled
+ }
+
+ // List all the tombstones.
+ if info.IsDir() {
+ relPath, err := filepath.Rel(r.root, path)
+ if err != nil {
+ return err
+ }
+ if dts, ok := ts[relPath]; ok {
+ for _, t := range dts {
+ r.result <- &fileEntry{filepath.Join(r.root, t), nil, nil}
+ if !<-r.proceed {
+ return errorIterationCanceled
+ }
+ }
+ }
+ }
+ return nil
+ })
+ if err == errorIterationCanceled {
+ return nil
+ }
+ if err == nil {
+ return io.EOF
+ }
+ return err
+}
+
+func (r *legacyLayerReader) walk() {
+ defer close(r.result)
+ if !<-r.proceed {
+ return
+ }
+
+ err := r.walkUntilCancelled()
+ if err != nil {
+ for {
+ r.result <- &fileEntry{err: err}
+ if !<-r.proceed {
+ return
+ }
+ }
+ }
+}
+
+func (r *legacyLayerReader) reset() {
+ if r.backupReader != nil {
+ r.backupReader.Close()
+ r.backupReader = nil
+ }
+ if r.currentFile != nil {
+ r.currentFile.Close()
+ r.currentFile = nil
+ }
+}
+
+func findBackupStreamSize(r io.Reader) (int64, error) {
+ br := winio.NewBackupStreamReader(r)
+ for {
+ hdr, err := br.Next()
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ return 0, err
+ }
+ if hdr.Id == winio.BackupData {
+ return hdr.Size, nil
+ }
+ }
+}
+
+func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.FileBasicInfo, err error) {
+ r.reset()
+ r.proceed <- true
+ fe := <-r.result
+ if fe == nil {
+ err = errors.New("LegacyLayerReader closed")
+ return
+ }
+ if fe.err != nil {
+ err = fe.err
+ return
+ }
+
+ path, err = filepath.Rel(r.root, fe.path)
+ if err != nil {
+ return
+ }
+
+ if fe.fi == nil {
+ // This is a tombstone. Return a nil fileInfo.
+ return
+ }
+
+ if fe.fi.IsDir() && hasPathPrefix(path, filesPath) {
+ fe.path += ".$wcidirs$"
+ }
+
+ f, err := openFileOrDir(fe.path, syscall.GENERIC_READ, syscall.OPEN_EXISTING)
+ if err != nil {
+ return
+ }
+ defer func() {
+ if f != nil {
+ f.Close()
+ }
+ }()
+
+ fileInfo, err = winio.GetFileBasicInfo(f)
+ if err != nil {
+ return
+ }
+
+ if !hasPathPrefix(path, filesPath) {
+ size = fe.fi.Size()
+ r.backupReader = winio.NewBackupFileReader(f, false)
+ if path == hivesPath || path == filesPath {
+ // The Hives directory has a non-deterministic file time because of the
+ // nature of the import process. Use the times from System_Delta.
+ var g *os.File
+ g, err = os.Open(filepath.Join(r.root, hivesPath, `System_Delta`))
+ if err != nil {
+ return
+ }
+ attr := fileInfo.FileAttributes
+ fileInfo, err = winio.GetFileBasicInfo(g)
+ g.Close()
+ if err != nil {
+ return
+ }
+ fileInfo.FileAttributes = attr
+ }
+
+ // The creation time and access time get reset for files outside of the Files path.
+ fileInfo.CreationTime = fileInfo.LastWriteTime
+ fileInfo.LastAccessTime = fileInfo.LastWriteTime
+
+ } else {
+ // The file attributes are written before the backup stream.
+ var attr uint32
+ err = binary.Read(f, binary.LittleEndian, &attr)
+ if err != nil {
+ return
+ }
+ fileInfo.FileAttributes = uintptr(attr)
+ beginning := int64(4)
+
+ // Find the accurate file size.
+ if !fe.fi.IsDir() {
+ size, err = findBackupStreamSize(f)
+ if err != nil {
+ err = &os.PathError{Op: "findBackupStreamSize", Path: fe.path, Err: err}
+ return
+ }
+ }
+
+ // Return back to the beginning of the backup stream.
+ _, err = f.Seek(beginning, 0)
+ if err != nil {
+ return
+ }
+ }
+
+ r.currentFile = f
+ f = nil
+ return
+}
+
+func (r *legacyLayerReader) Read(b []byte) (int, error) {
+ if r.backupReader == nil {
+ if r.currentFile == nil {
+ return 0, io.EOF
+ }
+ return r.currentFile.Read(b)
+ }
+ return r.backupReader.Read(b)
+}
+
+func (r *legacyLayerReader) Seek(offset int64, whence int) (int64, error) {
+ if r.backupReader == nil {
+ if r.currentFile == nil {
+ return 0, errors.New("no current file")
+ }
+ return r.currentFile.Seek(offset, whence)
+ }
+ return 0, errors.New("seek not supported on this stream")
+}
+
+func (r *legacyLayerReader) Close() error {
+ r.proceed <- false
+ <-r.result
+ r.reset()
+ return nil
+}
+
+type pendingLink struct {
+ Path, Target string
+}
+
+type legacyLayerWriter struct {
+ root string
+ parentRoots []string
+ destRoot string
+ currentFile *os.File
+ backupWriter *winio.BackupFileWriter
+ tombstones []string
+ pathFixed bool
+ HasUtilityVM bool
+ uvmDi []dirInfo
+ addedFiles map[string]bool
+ PendingLinks []pendingLink
+}
+
+// newLegacyLayerWriter returns a LayerWriter that can write the contaler layer
+// transport format to disk.
+func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) *legacyLayerWriter {
+ return &legacyLayerWriter{
+ root: root,
+ parentRoots: parentRoots,
+ destRoot: destRoot,
+ addedFiles: make(map[string]bool),
+ }
+}
+
+func (w *legacyLayerWriter) init() error {
+ if !w.pathFixed {
+ path, err := makeLongAbsPath(w.root)
+ if err != nil {
+ return err
+ }
+ for i, p := range w.parentRoots {
+ w.parentRoots[i], err = makeLongAbsPath(p)
+ if err != nil {
+ return err
+ }
+ }
+ destPath, err := makeLongAbsPath(w.destRoot)
+ if err != nil {
+ return err
+ }
+ w.root = path
+ w.destRoot = destPath
+ w.pathFixed = true
+ }
+ return nil
+}
+
+func (w *legacyLayerWriter) initUtilityVM() error {
+ if !w.HasUtilityVM {
+ err := os.Mkdir(filepath.Join(w.destRoot, utilityVMPath), 0)
+ if err != nil {
+ return err
+ }
+ // Server 2016 does not support multiple layers for the utility VM, so
+ // clone the utility VM from the parent layer into this layer. Use hard
+ // links to avoid unnecessary copying, since most of the files are
+ // immutable.
+ err = cloneTree(filepath.Join(w.parentRoots[0], utilityVMFilesPath), filepath.Join(w.destRoot, utilityVMFilesPath), mutatedUtilityVMFiles)
+ if err != nil {
+ return fmt.Errorf("cloning the parent utility VM image failed: %s", err)
+ }
+ w.HasUtilityVM = true
+ }
+ return nil
+}
+
+func (w *legacyLayerWriter) reset() {
+ if w.backupWriter != nil {
+ w.backupWriter.Close()
+ w.backupWriter = nil
+ }
+ if w.currentFile != nil {
+ w.currentFile.Close()
+ w.currentFile = nil
+ }
+}
+
+// copyFileWithMetadata copies a file using the backup/restore APIs in order to preserve metadata
+func copyFileWithMetadata(srcPath, destPath string, isDir bool) (fileInfo *winio.FileBasicInfo, err error) {
+ createDisposition := uint32(syscall.CREATE_NEW)
+ if isDir {
+ err = os.Mkdir(destPath, 0)
+ if err != nil {
+ return nil, err
+ }
+ createDisposition = syscall.OPEN_EXISTING
+ }
+
+ src, err := openFileOrDir(srcPath, syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY, syscall.OPEN_EXISTING)
+ if err != nil {
+ return nil, err
+ }
+ defer src.Close()
+ srcr := winio.NewBackupFileReader(src, true)
+ defer srcr.Close()
+
+ fileInfo, err = winio.GetFileBasicInfo(src)
+ if err != nil {
+ return nil, err
+ }
+
+ dest, err := openFileOrDir(destPath, syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, createDisposition)
+ if err != nil {
+ return nil, err
+ }
+ defer dest.Close()
+
+ err = winio.SetFileBasicInfo(dest, fileInfo)
+ if err != nil {
+ return nil, err
+ }
+
+ destw := winio.NewBackupFileWriter(dest, true)
+ defer func() {
+ cerr := destw.Close()
+ if err == nil {
+ err = cerr
+ }
+ }()
+
+ _, err = io.Copy(destw, srcr)
+ if err != nil {
+ return nil, err
+ }
+
+ return fileInfo, nil
+}
+
+// cloneTree clones a directory tree using hard links. It skips hard links for
+// the file names in the provided map and just copies those files.
+func cloneTree(srcPath, destPath string, mutatedFiles map[string]bool) error {
+ var di []dirInfo
+ err := filepath.Walk(srcPath, func(srcFilePath string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ relPath, err := filepath.Rel(srcPath, srcFilePath)
+ if err != nil {
+ return err
+ }
+ destFilePath := filepath.Join(destPath, relPath)
+
+ // Directories, reparse points, and files that will be mutated during
+ // utility VM import must be copied. All other files can be hard linked.
+ isReparsePoint := info.Sys().(*syscall.Win32FileAttributeData).FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0
+ if info.IsDir() || isReparsePoint || mutatedFiles[relPath] {
+ fi, err := copyFileWithMetadata(srcFilePath, destFilePath, info.IsDir())
+ if err != nil {
+ return err
+ }
+ if info.IsDir() && !isReparsePoint {
+ di = append(di, dirInfo{path: destFilePath, fileInfo: *fi})
+ }
+ } else {
+ err = os.Link(srcFilePath, destFilePath)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Don't recurse on reparse points.
+ if info.IsDir() && isReparsePoint {
+ return filepath.SkipDir
+ }
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ return reapplyDirectoryTimes(di)
+}
+
+func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error {
+ w.reset()
+ err := w.init()
+ if err != nil {
+ return err
+ }
+
+ if name == utilityVMPath {
+ return w.initUtilityVM()
+ }
+
+ if hasPathPrefix(name, utilityVMPath) {
+ if !w.HasUtilityVM {
+ return errors.New("missing UtilityVM directory")
+ }
+ if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath {
+ return errors.New("invalid UtilityVM layer")
+ }
+ path := filepath.Join(w.destRoot, name)
+ createDisposition := uint32(syscall.OPEN_EXISTING)
+ if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
+ st, err := os.Lstat(path)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if st != nil {
+ // Delete the existing file/directory if it is not the same type as this directory.
+ existingAttr := st.Sys().(*syscall.Win32FileAttributeData).FileAttributes
+ if (uint32(fileInfo.FileAttributes)^existingAttr)&(syscall.FILE_ATTRIBUTE_DIRECTORY|syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 {
+ if err = os.RemoveAll(path); err != nil {
+ return err
+ }
+ st = nil
+ }
+ }
+ if st == nil {
+ if err = os.Mkdir(path, 0); err != nil {
+ return err
+ }
+ }
+ if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 {
+ w.uvmDi = append(w.uvmDi, dirInfo{path: path, fileInfo: *fileInfo})
+ }
+ } else {
+ // Overwrite any existing hard link.
+ err = os.Remove(path)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ createDisposition = syscall.CREATE_NEW
+ }
+
+ f, err := openFileOrDir(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, createDisposition)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if f != nil {
+ f.Close()
+ os.Remove(path)
+ }
+ }()
+
+ err = winio.SetFileBasicInfo(f, fileInfo)
+ if err != nil {
+ return err
+ }
+
+ w.backupWriter = winio.NewBackupFileWriter(f, true)
+ w.currentFile = f
+ w.addedFiles[name] = true
+ f = nil
+ return nil
+ }
+
+ path := filepath.Join(w.root, name)
+ if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
+ err := os.Mkdir(path, 0)
+ if err != nil {
+ return err
+ }
+ path += ".$wcidirs$"
+ }
+
+ f, err := openFileOrDir(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.CREATE_NEW)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if f != nil {
+ f.Close()
+ os.Remove(path)
+ }
+ }()
+
+ strippedFi := *fileInfo
+ strippedFi.FileAttributes = 0
+ err = winio.SetFileBasicInfo(f, &strippedFi)
+ if err != nil {
+ return err
+ }
+
+ if hasPathPrefix(name, hivesPath) {
+ w.backupWriter = winio.NewBackupFileWriter(f, false)
+ } else {
+ // The file attributes are written before the stream.
+ err = binary.Write(f, binary.LittleEndian, uint32(fileInfo.FileAttributes))
+ if err != nil {
+ return err
+ }
+ }
+
+ w.currentFile = f
+ w.addedFiles[name] = true
+ f = nil
+ return nil
+}
+
+func (w *legacyLayerWriter) AddLink(name string, target string) error {
+ w.reset()
+ err := w.init()
+ if err != nil {
+ return err
+ }
+
+ var roots []string
+ if hasPathPrefix(target, filesPath) {
+ // Look for cross-layer hard link targets in the parent layers, since
+ // nothing is in the destination path yet.
+ roots = w.parentRoots
+ } else if hasPathPrefix(target, utilityVMFilesPath) {
+ // Since the utility VM is fully cloned into the destination path
+ // already, look for cross-layer hard link targets directly in the
+ // destination path.
+ roots = []string{w.destRoot}
+ }
+
+ if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, utilityVMFilesPath)) {
+ return errors.New("invalid hard link in layer")
+ }
+
+ // Find to try the target of the link in a previously added file. If that
+ // fails, search in parent layers.
+ var selectedRoot string
+ if _, ok := w.addedFiles[target]; ok {
+ selectedRoot = w.destRoot
+ } else {
+ for _, r := range roots {
+ if _, err = os.Lstat(filepath.Join(r, target)); err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ }
+ } else {
+ selectedRoot = r
+ break
+ }
+ }
+ if selectedRoot == "" {
+ return fmt.Errorf("failed to find link target for '%s' -> '%s'", name, target)
+ }
+ }
+ // The link can't be written until after the ImportLayer call.
+ w.PendingLinks = append(w.PendingLinks, pendingLink{
+ Path: filepath.Join(w.destRoot, name),
+ Target: filepath.Join(selectedRoot, target),
+ })
+ w.addedFiles[name] = true
+ return nil
+}
+
+func (w *legacyLayerWriter) Remove(name string) error {
+ if hasPathPrefix(name, filesPath) {
+ w.tombstones = append(w.tombstones, name[len(filesPath)+1:])
+ } else if hasPathPrefix(name, utilityVMFilesPath) {
+ err := w.initUtilityVM()
+ if err != nil {
+ return err
+ }
+ // Make sure the path exists; os.RemoveAll will not fail if the file is
+ // already gone, and this needs to be a fatal error for diagnostics
+ // purposes.
+ path := filepath.Join(w.destRoot, name)
+ if _, err := os.Lstat(path); err != nil {
+ return err
+ }
+ err = os.RemoveAll(path)
+ if err != nil {
+ return err
+ }
+ } else {
+ return fmt.Errorf("invalid tombstone %s", name)
+ }
+
+ return nil
+}
+
+func (w *legacyLayerWriter) Write(b []byte) (int, error) {
+ if w.backupWriter == nil {
+ if w.currentFile == nil {
+ return 0, errors.New("closed")
+ }
+ return w.currentFile.Write(b)
+ }
+ return w.backupWriter.Write(b)
+}
+
+func (w *legacyLayerWriter) Close() error {
+ w.reset()
+ err := w.init()
+ if err != nil {
+ return err
+ }
+ tf, err := os.Create(filepath.Join(w.root, "tombstones.txt"))
+ if err != nil {
+ return err
+ }
+ defer tf.Close()
+ _, err = tf.Write([]byte("\xef\xbb\xbfVersion 1.0\n"))
+ if err != nil {
+ return err
+ }
+ for _, t := range w.tombstones {
+ _, err = tf.Write([]byte(filepath.Join(`\`, t) + "\n"))
+ if err != nil {
+ return err
+ }
+ }
+ if w.HasUtilityVM {
+ err = reapplyDirectoryTimes(w.uvmDi)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/nametoguid.go b/vendor/github.com/Microsoft/hcsshim/nametoguid.go
new file mode 100644
index 000000000..b7c6d020c
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/nametoguid.go
@@ -0,0 +1,20 @@
+package hcsshim
+
+import "github.com/sirupsen/logrus"
+
+// NameToGuid converts the given string into a GUID using the algorithm in the
+// Host Compute Service, ensuring GUIDs generated with the same string are common
+// across all clients.
+func NameToGuid(name string) (id GUID, err error) {
+ title := "hcsshim::NameToGuid "
+ logrus.Debugf(title+"Name %s", name)
+
+ err = nameToGuid(name, &id)
+ if err != nil {
+ err = makeErrorf(err, title, "name=%s", name)
+ logrus.Error(err)
+ return
+ }
+
+ return
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/preparelayer.go b/vendor/github.com/Microsoft/hcsshim/preparelayer.go
new file mode 100644
index 000000000..5c5b61841
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/preparelayer.go
@@ -0,0 +1,46 @@
+package hcsshim
+
+import (
+ "sync"
+
+ "github.com/sirupsen/logrus"
+)
+
+var prepareLayerLock sync.Mutex
+
+// PrepareLayer finds a mounted read-write layer matching layerId and enables the
+// the filesystem filter for use on that layer. This requires the paths to all
+// parent layers, and is necessary in order to view or interact with the layer
+// as an actual filesystem (reading and writing files, creating directories, etc).
+// Disabling the filter must be done via UnprepareLayer.
+func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error {
+ title := "hcsshim::PrepareLayer "
+ logrus.Debugf(title+"flavour %d layerId %s", info.Flavour, layerId)
+
+ // Generate layer descriptors
+ layers, err := layerPathsToDescriptors(parentLayerPaths)
+ if err != nil {
+ return err
+ }
+
+ // Convert info to API calling convention
+ infop, err := convertDriverInfo(info)
+ if err != nil {
+ logrus.Error(err)
+ return err
+ }
+
+ // This lock is a temporary workaround for a Windows bug. Only allowing one
+ // call to prepareLayer at a time vastly reduces the chance of a timeout.
+ prepareLayerLock.Lock()
+ defer prepareLayerLock.Unlock()
+ err = prepareLayer(&infop, layerId, layers)
+ if err != nil {
+ err = makeErrorf(err, title, "layerId=%s flavour=%d", layerId, info.Flavour)
+ logrus.Error(err)
+ return err
+ }
+
+ logrus.Debugf(title+"succeeded flavour=%d layerId=%s", info.Flavour, layerId)
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/process.go b/vendor/github.com/Microsoft/hcsshim/process.go
new file mode 100644
index 000000000..faee2cfee
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/process.go
@@ -0,0 +1,384 @@
+package hcsshim
+
+import (
+ "encoding/json"
+ "io"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/sirupsen/logrus"
+)
+
+// ContainerError is an error encountered in HCS
+type process struct {
+ handleLock sync.RWMutex
+ handle hcsProcess
+ processID int
+ container *container
+ cachedPipes *cachedPipes
+ callbackNumber uintptr
+}
+
+type cachedPipes struct {
+ stdIn syscall.Handle
+ stdOut syscall.Handle
+ stdErr syscall.Handle
+}
+
+type processModifyRequest struct {
+ Operation string
+ ConsoleSize *consoleSize `json:",omitempty"`
+ CloseHandle *closeHandle `json:",omitempty"`
+}
+
+type consoleSize struct {
+ Height uint16
+ Width uint16
+}
+
+type closeHandle struct {
+ Handle string
+}
+
+type processStatus struct {
+ ProcessID uint32
+ Exited bool
+ ExitCode uint32
+ LastWaitResult int32
+}
+
+const (
+ stdIn string = "StdIn"
+ stdOut string = "StdOut"
+ stdErr string = "StdErr"
+)
+
+const (
+ modifyConsoleSize string = "ConsoleSize"
+ modifyCloseHandle string = "CloseHandle"
+)
+
+// Pid returns the process ID of the process within the container.
+func (process *process) Pid() int {
+ return process.processID
+}
+
+// Kill signals the process to terminate but does not wait for it to finish terminating.
+func (process *process) Kill() error {
+ process.handleLock.RLock()
+ defer process.handleLock.RUnlock()
+ operation := "Kill"
+ title := "HCSShim::Process::" + operation
+ logrus.Debugf(title+" processid=%d", process.processID)
+
+ if process.handle == 0 {
+ return makeProcessError(process, operation, "", ErrAlreadyClosed)
+ }
+
+ var resultp *uint16
+ err := hcsTerminateProcess(process.handle, &resultp)
+ err = processHcsResult(err, resultp)
+ if err != nil {
+ return makeProcessError(process, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded processid=%d", process.processID)
+ return nil
+}
+
+// Wait waits for the process to exit.
+func (process *process) Wait() error {
+ operation := "Wait"
+ title := "HCSShim::Process::" + operation
+ logrus.Debugf(title+" processid=%d", process.processID)
+
+ err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil)
+ if err != nil {
+ return makeProcessError(process, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded processid=%d", process.processID)
+ return nil
+}
+
+// WaitTimeout waits for the process to exit or the duration to elapse. It returns
+// false if timeout occurs.
+func (process *process) WaitTimeout(timeout time.Duration) error {
+ operation := "WaitTimeout"
+ title := "HCSShim::Process::" + operation
+ logrus.Debugf(title+" processid=%d", process.processID)
+
+ err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout)
+ if err != nil {
+ return makeProcessError(process, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded processid=%d", process.processID)
+ return nil
+}
+
+// ExitCode returns the exit code of the process. The process must have
+// already terminated.
+func (process *process) ExitCode() (int, error) {
+ process.handleLock.RLock()
+ defer process.handleLock.RUnlock()
+ operation := "ExitCode"
+ title := "HCSShim::Process::" + operation
+ logrus.Debugf(title+" processid=%d", process.processID)
+
+ if process.handle == 0 {
+ return 0, makeProcessError(process, operation, "", ErrAlreadyClosed)
+ }
+
+ properties, err := process.properties()
+ if err != nil {
+ return 0, makeProcessError(process, operation, "", err)
+ }
+
+ if properties.Exited == false {
+ return 0, makeProcessError(process, operation, "", ErrInvalidProcessState)
+ }
+
+ if properties.LastWaitResult != 0 {
+ return 0, makeProcessError(process, operation, "", syscall.Errno(properties.LastWaitResult))
+ }
+
+ logrus.Debugf(title+" succeeded processid=%d exitCode=%d", process.processID, properties.ExitCode)
+ return int(properties.ExitCode), nil
+}
+
+// ResizeConsole resizes the console of the process.
+func (process *process) ResizeConsole(width, height uint16) error {
+ process.handleLock.RLock()
+ defer process.handleLock.RUnlock()
+ operation := "ResizeConsole"
+ title := "HCSShim::Process::" + operation
+ logrus.Debugf(title+" processid=%d", process.processID)
+
+ if process.handle == 0 {
+ return makeProcessError(process, operation, "", ErrAlreadyClosed)
+ }
+
+ modifyRequest := processModifyRequest{
+ Operation: modifyConsoleSize,
+ ConsoleSize: &consoleSize{
+ Height: height,
+ Width: width,
+ },
+ }
+
+ modifyRequestb, err := json.Marshal(modifyRequest)
+ if err != nil {
+ return err
+ }
+
+ modifyRequestStr := string(modifyRequestb)
+
+ var resultp *uint16
+ err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp)
+ err = processHcsResult(err, resultp)
+ if err != nil {
+ return makeProcessError(process, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded processid=%d", process.processID)
+ return nil
+}
+
+func (process *process) properties() (*processStatus, error) {
+ operation := "properties"
+ title := "HCSShim::Process::" + operation
+ logrus.Debugf(title+" processid=%d", process.processID)
+
+ var (
+ resultp *uint16
+ propertiesp *uint16
+ )
+ err := hcsGetProcessProperties(process.handle, &propertiesp, &resultp)
+ err = processHcsResult(err, resultp)
+ if err != nil {
+ return nil, err
+ }
+
+ if propertiesp == nil {
+ return nil, ErrUnexpectedValue
+ }
+ propertiesRaw := convertAndFreeCoTaskMemBytes(propertiesp)
+
+ properties := &processStatus{}
+ if err := json.Unmarshal(propertiesRaw, properties); err != nil {
+ return nil, err
+ }
+
+ logrus.Debugf(title+" succeeded processid=%d, properties=%s", process.processID, propertiesRaw)
+ return properties, nil
+}
+
+// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing
+// these pipes does not close the underlying pipes; it should be possible to
+// call this multiple times to get multiple interfaces.
+func (process *process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) {
+ process.handleLock.RLock()
+ defer process.handleLock.RUnlock()
+ operation := "Stdio"
+ title := "HCSShim::Process::" + operation
+ logrus.Debugf(title+" processid=%d", process.processID)
+
+ if process.handle == 0 {
+ return nil, nil, nil, makeProcessError(process, operation, "", ErrAlreadyClosed)
+ }
+
+ var stdIn, stdOut, stdErr syscall.Handle
+
+ if process.cachedPipes == nil {
+ var (
+ processInfo hcsProcessInformation
+ resultp *uint16
+ )
+ err := hcsGetProcessInfo(process.handle, &processInfo, &resultp)
+ err = processHcsResult(err, resultp)
+ if err != nil {
+ return nil, nil, nil, makeProcessError(process, operation, "", err)
+ }
+
+ stdIn, stdOut, stdErr = processInfo.StdInput, processInfo.StdOutput, processInfo.StdError
+ } else {
+ // Use cached pipes
+ stdIn, stdOut, stdErr = process.cachedPipes.stdIn, process.cachedPipes.stdOut, process.cachedPipes.stdErr
+
+ // Invalidate the cache
+ process.cachedPipes = nil
+ }
+
+ pipes, err := makeOpenFiles([]syscall.Handle{stdIn, stdOut, stdErr})
+ if err != nil {
+ return nil, nil, nil, makeProcessError(process, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded processid=%d", process.processID)
+ return pipes[0], pipes[1], pipes[2], nil
+}
+
+// CloseStdin closes the write side of the stdin pipe so that the process is
+// notified on the read side that there is no more data in stdin.
+func (process *process) CloseStdin() error {
+ process.handleLock.RLock()
+ defer process.handleLock.RUnlock()
+ operation := "CloseStdin"
+ title := "HCSShim::Process::" + operation
+ logrus.Debugf(title+" processid=%d", process.processID)
+
+ if process.handle == 0 {
+ return makeProcessError(process, operation, "", ErrAlreadyClosed)
+ }
+
+ modifyRequest := processModifyRequest{
+ Operation: modifyCloseHandle,
+ CloseHandle: &closeHandle{
+ Handle: stdIn,
+ },
+ }
+
+ modifyRequestb, err := json.Marshal(modifyRequest)
+ if err != nil {
+ return err
+ }
+
+ modifyRequestStr := string(modifyRequestb)
+
+ var resultp *uint16
+ err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp)
+ err = processHcsResult(err, resultp)
+ if err != nil {
+ return makeProcessError(process, operation, "", err)
+ }
+
+ logrus.Debugf(title+" succeeded processid=%d", process.processID)
+ return nil
+}
+
+// Close cleans up any state associated with the process but does not kill
+// or wait on it.
+func (process *process) Close() error {
+ process.handleLock.Lock()
+ defer process.handleLock.Unlock()
+ operation := "Close"
+ title := "HCSShim::Process::" + operation
+ logrus.Debugf(title+" processid=%d", process.processID)
+
+ // Don't double free this
+ if process.handle == 0 {
+ return nil
+ }
+
+ if err := process.unregisterCallback(); err != nil {
+ return makeProcessError(process, operation, "", err)
+ }
+
+ if err := hcsCloseProcess(process.handle); err != nil {
+ return makeProcessError(process, operation, "", err)
+ }
+
+ process.handle = 0
+
+ logrus.Debugf(title+" succeeded processid=%d", process.processID)
+ return nil
+}
+
+func (process *process) registerCallback() error {
+ context := &notifcationWatcherContext{
+ channels: newChannels(),
+ }
+
+ callbackMapLock.Lock()
+ callbackNumber := nextCallback
+ nextCallback++
+ callbackMap[callbackNumber] = context
+ callbackMapLock.Unlock()
+
+ var callbackHandle hcsCallback
+ err := hcsRegisterProcessCallback(process.handle, notificationWatcherCallback, callbackNumber, &callbackHandle)
+ if err != nil {
+ return err
+ }
+ context.handle = callbackHandle
+ process.callbackNumber = callbackNumber
+
+ return nil
+}
+
+func (process *process) unregisterCallback() error {
+ callbackNumber := process.callbackNumber
+
+ callbackMapLock.RLock()
+ context := callbackMap[callbackNumber]
+ callbackMapLock.RUnlock()
+
+ if context == nil {
+ return nil
+ }
+
+ handle := context.handle
+
+ if handle == 0 {
+ return nil
+ }
+
+ // hcsUnregisterProcessCallback has its own syncronization
+ // to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
+ err := hcsUnregisterProcessCallback(handle)
+ if err != nil {
+ return err
+ }
+
+ closeChannels(context.channels)
+
+ callbackMapLock.Lock()
+ callbackMap[callbackNumber] = nil
+ callbackMapLock.Unlock()
+
+ handle = 0
+
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/processimage.go b/vendor/github.com/Microsoft/hcsshim/processimage.go
new file mode 100644
index 000000000..fadb1b92c
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/processimage.go
@@ -0,0 +1,23 @@
+package hcsshim
+
+import "os"
+
+// ProcessBaseLayer post-processes a base layer that has had its files extracted.
+// The files should have been extracted to <path>\Files.
+func ProcessBaseLayer(path string) error {
+ err := processBaseImage(path)
+ if err != nil {
+ return &os.PathError{Op: "ProcessBaseLayer", Path: path, Err: err}
+ }
+ return nil
+}
+
+// ProcessUtilityVMImage post-processes a utility VM image that has had its files extracted.
+// The files should have been extracted to <path>\Files.
+func ProcessUtilityVMImage(path string) error {
+ err := processUtilityImage(path)
+ if err != nil {
+ return &os.PathError{Op: "ProcessUtilityVMImage", Path: path, Err: err}
+ }
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/unpreparelayer.go b/vendor/github.com/Microsoft/hcsshim/unpreparelayer.go
new file mode 100644
index 000000000..e8a3b507b
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/unpreparelayer.go
@@ -0,0 +1,27 @@
+package hcsshim
+
+import "github.com/sirupsen/logrus"
+
+// UnprepareLayer disables the filesystem filter for the read-write layer with
+// the given id.
+func UnprepareLayer(info DriverInfo, layerId string) error {
+ title := "hcsshim::UnprepareLayer "
+ logrus.Debugf(title+"flavour %d layerId %s", info.Flavour, layerId)
+
+ // Convert info to API calling convention
+ infop, err := convertDriverInfo(info)
+ if err != nil {
+ logrus.Error(err)
+ return err
+ }
+
+ err = unprepareLayer(&infop, layerId)
+ if err != nil {
+ err = makeErrorf(err, title, "layerId=%s flavour=%d", layerId, info.Flavour)
+ logrus.Error(err)
+ return err
+ }
+
+ logrus.Debugf(title+"succeeded flavour %d layerId=%s", info.Flavour, layerId)
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/utils.go b/vendor/github.com/Microsoft/hcsshim/utils.go
new file mode 100644
index 000000000..bd6e2d94a
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/utils.go
@@ -0,0 +1,33 @@
+package hcsshim
+
+import (
+ "io"
+ "syscall"
+
+ "github.com/Microsoft/go-winio"
+)
+
+// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles
+// if there is an error.
+func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) {
+ fs := make([]io.ReadWriteCloser, len(hs))
+ for i, h := range hs {
+ if h != syscall.Handle(0) {
+ if err == nil {
+ fs[i], err = winio.MakeOpenFile(h)
+ }
+ if err != nil {
+ syscall.Close(h)
+ }
+ }
+ }
+ if err != nil {
+ for _, f := range fs {
+ if f != nil {
+ f.Close()
+ }
+ }
+ return nil, err
+ }
+ return fs, nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/version.go b/vendor/github.com/Microsoft/hcsshim/version.go
new file mode 100644
index 000000000..ae10c23d4
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/version.go
@@ -0,0 +1,7 @@
+package hcsshim
+
+// IsTP4 returns whether the currently running Windows build is at least TP4.
+func IsTP4() bool {
+ // HNSCall was not present in TP4
+ return procHNSCall.Find() != nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/waithelper.go b/vendor/github.com/Microsoft/hcsshim/waithelper.go
new file mode 100644
index 000000000..b7be20ea0
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/waithelper.go
@@ -0,0 +1,63 @@
+package hcsshim
+
+import (
+ "time"
+
+ "github.com/sirupsen/logrus"
+)
+
+func processAsyncHcsResult(err error, resultp *uint16, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error {
+ err = processHcsResult(err, resultp)
+ if IsPending(err) {
+ return waitForNotification(callbackNumber, expectedNotification, timeout)
+ }
+
+ return err
+}
+
+func waitForNotification(callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error {
+ callbackMapLock.RLock()
+ channels := callbackMap[callbackNumber].channels
+ callbackMapLock.RUnlock()
+
+ expectedChannel := channels[expectedNotification]
+ if expectedChannel == nil {
+ logrus.Errorf("unknown notification type in waitForNotification %x", expectedNotification)
+ return ErrInvalidNotificationType
+ }
+
+ var c <-chan time.Time
+ if timeout != nil {
+ timer := time.NewTimer(*timeout)
+ c = timer.C
+ defer timer.Stop()
+ }
+
+ select {
+ case err, ok := <-expectedChannel:
+ if !ok {
+ return ErrHandleClose
+ }
+ return err
+ case err, ok := <-channels[hcsNotificationSystemExited]:
+ if !ok {
+ return ErrHandleClose
+ }
+ // If the expected notification is hcsNotificationSystemExited which of the two selects
+ // chosen is random. Return the raw error if hcsNotificationSystemExited is expected
+ if channels[hcsNotificationSystemExited] == expectedChannel {
+ return err
+ }
+ return ErrUnexpectedContainerExit
+ case _, ok := <-channels[hcsNotificationServiceDisconnect]:
+ if !ok {
+ return ErrHandleClose
+ }
+ // hcsNotificationServiceDisconnect should never be an expected notification
+ // it does not need the same handling as hcsNotificationSystemExited
+ return ErrUnexpectedProcessAbort
+ case <-c:
+ return ErrTimeout
+ }
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/zhcsshim.go b/vendor/github.com/Microsoft/hcsshim/zhcsshim.go
new file mode 100644
index 000000000..5d1a851ae
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/zhcsshim.go
@@ -0,0 +1,1042 @@
+// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
+
+package hcsshim
+
+import (
+ "syscall"
+ "unsafe"
+
+ "github.com/Microsoft/go-winio"
+ "golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+ errnoERROR_IO_PENDING = 997
+)
+
+var (
+ errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return nil
+ case errnoERROR_IO_PENDING:
+ return errERROR_IO_PENDING
+ }
+ // TODO: add more here, after collecting data on the common
+ // error values see on Windows. (perhaps when running
+ // all.bat?)
+ return e
+}
+
+var (
+ modole32 = windows.NewLazySystemDLL("ole32.dll")
+ modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll")
+ modvmcompute = windows.NewLazySystemDLL("vmcompute.dll")
+
+ procCoTaskMemFree = modole32.NewProc("CoTaskMemFree")
+ procSetCurrentThreadCompartmentId = modiphlpapi.NewProc("SetCurrentThreadCompartmentId")
+ procActivateLayer = modvmcompute.NewProc("ActivateLayer")
+ procCopyLayer = modvmcompute.NewProc("CopyLayer")
+ procCreateLayer = modvmcompute.NewProc("CreateLayer")
+ procCreateSandboxLayer = modvmcompute.NewProc("CreateSandboxLayer")
+ procExpandSandboxSize = modvmcompute.NewProc("ExpandSandboxSize")
+ procDeactivateLayer = modvmcompute.NewProc("DeactivateLayer")
+ procDestroyLayer = modvmcompute.NewProc("DestroyLayer")
+ procExportLayer = modvmcompute.NewProc("ExportLayer")
+ procGetLayerMountPath = modvmcompute.NewProc("GetLayerMountPath")
+ procGetBaseImages = modvmcompute.NewProc("GetBaseImages")
+ procImportLayer = modvmcompute.NewProc("ImportLayer")
+ procLayerExists = modvmcompute.NewProc("LayerExists")
+ procNameToGuid = modvmcompute.NewProc("NameToGuid")
+ procPrepareLayer = modvmcompute.NewProc("PrepareLayer")
+ procUnprepareLayer = modvmcompute.NewProc("UnprepareLayer")
+ procProcessBaseImage = modvmcompute.NewProc("ProcessBaseImage")
+ procProcessUtilityImage = modvmcompute.NewProc("ProcessUtilityImage")
+ procImportLayerBegin = modvmcompute.NewProc("ImportLayerBegin")
+ procImportLayerNext = modvmcompute.NewProc("ImportLayerNext")
+ procImportLayerWrite = modvmcompute.NewProc("ImportLayerWrite")
+ procImportLayerEnd = modvmcompute.NewProc("ImportLayerEnd")
+ procExportLayerBegin = modvmcompute.NewProc("ExportLayerBegin")
+ procExportLayerNext = modvmcompute.NewProc("ExportLayerNext")
+ procExportLayerRead = modvmcompute.NewProc("ExportLayerRead")
+ procExportLayerEnd = modvmcompute.NewProc("ExportLayerEnd")
+ procHcsEnumerateComputeSystems = modvmcompute.NewProc("HcsEnumerateComputeSystems")
+ procHcsCreateComputeSystem = modvmcompute.NewProc("HcsCreateComputeSystem")
+ procHcsOpenComputeSystem = modvmcompute.NewProc("HcsOpenComputeSystem")
+ procHcsCloseComputeSystem = modvmcompute.NewProc("HcsCloseComputeSystem")
+ procHcsStartComputeSystem = modvmcompute.NewProc("HcsStartComputeSystem")
+ procHcsShutdownComputeSystem = modvmcompute.NewProc("HcsShutdownComputeSystem")
+ procHcsTerminateComputeSystem = modvmcompute.NewProc("HcsTerminateComputeSystem")
+ procHcsPauseComputeSystem = modvmcompute.NewProc("HcsPauseComputeSystem")
+ procHcsResumeComputeSystem = modvmcompute.NewProc("HcsResumeComputeSystem")
+ procHcsGetComputeSystemProperties = modvmcompute.NewProc("HcsGetComputeSystemProperties")
+ procHcsModifyComputeSystem = modvmcompute.NewProc("HcsModifyComputeSystem")
+ procHcsRegisterComputeSystemCallback = modvmcompute.NewProc("HcsRegisterComputeSystemCallback")
+ procHcsUnregisterComputeSystemCallback = modvmcompute.NewProc("HcsUnregisterComputeSystemCallback")
+ procHcsCreateProcess = modvmcompute.NewProc("HcsCreateProcess")
+ procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess")
+ procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess")
+ procHcsTerminateProcess = modvmcompute.NewProc("HcsTerminateProcess")
+ procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo")
+ procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties")
+ procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess")
+ procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties")
+ procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback")
+ procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback")
+ procHcsModifyServiceSettings = modvmcompute.NewProc("HcsModifyServiceSettings")
+ procHNSCall = modvmcompute.NewProc("HNSCall")
+)
+
+func coTaskMemFree(buffer unsafe.Pointer) {
+ syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(buffer), 0, 0)
+ return
+}
+
+func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) {
+ r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func activateLayer(info *driverInfo, id string) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(id)
+ if hr != nil {
+ return
+ }
+ return _activateLayer(info, _p0)
+}
+
+func _activateLayer(info *driverInfo, id *uint16) (hr error) {
+ if hr = procActivateLayer.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procActivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(srcId)
+ if hr != nil {
+ return
+ }
+ var _p1 *uint16
+ _p1, hr = syscall.UTF16PtrFromString(dstId)
+ if hr != nil {
+ return
+ }
+ return _copyLayer(info, _p0, _p1, descriptors)
+}
+
+func _copyLayer(info *driverInfo, srcId *uint16, dstId *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
+ var _p2 *WC_LAYER_DESCRIPTOR
+ if len(descriptors) > 0 {
+ _p2 = &descriptors[0]
+ }
+ if hr = procCopyLayer.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall6(procCopyLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func createLayer(info *driverInfo, id string, parent string) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(id)
+ if hr != nil {
+ return
+ }
+ var _p1 *uint16
+ _p1, hr = syscall.UTF16PtrFromString(parent)
+ if hr != nil {
+ return
+ }
+ return _createLayer(info, _p0, _p1)
+}
+
+func _createLayer(info *driverInfo, id *uint16, parent *uint16) (hr error) {
+ if hr = procCreateLayer.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procCreateLayer.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent)))
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func createSandboxLayer(info *driverInfo, id string, parent string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(id)
+ if hr != nil {
+ return
+ }
+ var _p1 *uint16
+ _p1, hr = syscall.UTF16PtrFromString(parent)
+ if hr != nil {
+ return
+ }
+ return _createSandboxLayer(info, _p0, _p1, descriptors)
+}
+
+func _createSandboxLayer(info *driverInfo, id *uint16, parent *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
+ var _p2 *WC_LAYER_DESCRIPTOR
+ if len(descriptors) > 0 {
+ _p2 = &descriptors[0]
+ }
+ if hr = procCreateSandboxLayer.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall6(procCreateSandboxLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(id)
+ if hr != nil {
+ return
+ }
+ return _expandSandboxSize(info, _p0, size)
+}
+
+func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) {
+ if hr = procExpandSandboxSize.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size))
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func deactivateLayer(info *driverInfo, id string) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(id)
+ if hr != nil {
+ return
+ }
+ return _deactivateLayer(info, _p0)
+}
+
+func _deactivateLayer(info *driverInfo, id *uint16) (hr error) {
+ if hr = procDeactivateLayer.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func destroyLayer(info *driverInfo, id string) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(id)
+ if hr != nil {
+ return
+ }
+ return _destroyLayer(info, _p0)
+}
+
+func _destroyLayer(info *driverInfo, id *uint16) (hr error) {
+ if hr = procDestroyLayer.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(id)
+ if hr != nil {
+ return
+ }
+ var _p1 *uint16
+ _p1, hr = syscall.UTF16PtrFromString(path)
+ if hr != nil {
+ return
+ }
+ return _exportLayer(info, _p0, _p1, descriptors)
+}
+
+func _exportLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
+ var _p2 *WC_LAYER_DESCRIPTOR
+ if len(descriptors) > 0 {
+ _p2 = &descriptors[0]
+ }
+ if hr = procExportLayer.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(id)
+ if hr != nil {
+ return
+ }
+ return _getLayerMountPath(info, _p0, length, buffer)
+}
+
+func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *uint16) (hr error) {
+ if hr = procGetLayerMountPath.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall6(procGetLayerMountPath.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)), 0, 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func getBaseImages(buffer **uint16) (hr error) {
+ if hr = procGetBaseImages.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(id)
+ if hr != nil {
+ return
+ }
+ var _p1 *uint16
+ _p1, hr = syscall.UTF16PtrFromString(path)
+ if hr != nil {
+ return
+ }
+ return _importLayer(info, _p0, _p1, descriptors)
+}
+
+func _importLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
+ var _p2 *WC_LAYER_DESCRIPTOR
+ if len(descriptors) > 0 {
+ _p2 = &descriptors[0]
+ }
+ if hr = procImportLayer.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall6(procImportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func layerExists(info *driverInfo, id string, exists *uint32) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(id)
+ if hr != nil {
+ return
+ }
+ return _layerExists(info, _p0, exists)
+}
+
+func _layerExists(info *driverInfo, id *uint16, exists *uint32) (hr error) {
+ if hr = procLayerExists.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procLayerExists.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists)))
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func nameToGuid(name string, guid *GUID) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(name)
+ if hr != nil {
+ return
+ }
+ return _nameToGuid(_p0, guid)
+}
+
+func _nameToGuid(name *uint16, guid *GUID) (hr error) {
+ if hr = procNameToGuid.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procNameToGuid.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)), 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(id)
+ if hr != nil {
+ return
+ }
+ return _prepareLayer(info, _p0, descriptors)
+}
+
+func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
+ var _p1 *WC_LAYER_DESCRIPTOR
+ if len(descriptors) > 0 {
+ _p1 = &descriptors[0]
+ }
+ if hr = procPrepareLayer.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall6(procPrepareLayer.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0, 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func unprepareLayer(info *driverInfo, id string) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(id)
+ if hr != nil {
+ return
+ }
+ return _unprepareLayer(info, _p0)
+}
+
+func _unprepareLayer(info *driverInfo, id *uint16) (hr error) {
+ if hr = procUnprepareLayer.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func processBaseImage(path string) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(path)
+ if hr != nil {
+ return
+ }
+ return _processBaseImage(_p0)
+}
+
+func _processBaseImage(path *uint16) (hr error) {
+ if hr = procProcessBaseImage.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procProcessBaseImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func processUtilityImage(path string) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(path)
+ if hr != nil {
+ return
+ }
+ return _processUtilityImage(_p0)
+}
+
+func _processUtilityImage(path *uint16) (hr error) {
+ if hr = procProcessUtilityImage.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procProcessUtilityImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func importLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(id)
+ if hr != nil {
+ return
+ }
+ return _importLayerBegin(info, _p0, descriptors, context)
+}
+
+func _importLayerBegin(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) {
+ var _p1 *WC_LAYER_DESCRIPTOR
+ if len(descriptors) > 0 {
+ _p1 = &descriptors[0]
+ }
+ if hr = procImportLayerBegin.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall6(procImportLayerBegin.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), uintptr(unsafe.Pointer(context)), 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func importLayerNext(context uintptr, fileName string, fileInfo *winio.FileBasicInfo) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(fileName)
+ if hr != nil {
+ return
+ }
+ return _importLayerNext(context, _p0, fileInfo)
+}
+
+func _importLayerNext(context uintptr, fileName *uint16, fileInfo *winio.FileBasicInfo) (hr error) {
+ if hr = procImportLayerNext.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procImportLayerNext.Addr(), 3, uintptr(context), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(fileInfo)))
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func importLayerWrite(context uintptr, buffer []byte) (hr error) {
+ var _p0 *byte
+ if len(buffer) > 0 {
+ _p0 = &buffer[0]
+ }
+ if hr = procImportLayerWrite.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procImportLayerWrite.Addr(), 3, uintptr(context), uintptr(unsafe.Pointer(_p0)), uintptr(len(buffer)))
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func importLayerEnd(context uintptr) (hr error) {
+ if hr = procImportLayerEnd.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procImportLayerEnd.Addr(), 1, uintptr(context), 0, 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func exportLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(id)
+ if hr != nil {
+ return
+ }
+ return _exportLayerBegin(info, _p0, descriptors, context)
+}
+
+func _exportLayerBegin(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) {
+ var _p1 *WC_LAYER_DESCRIPTOR
+ if len(descriptors) > 0 {
+ _p1 = &descriptors[0]
+ }
+ if hr = procExportLayerBegin.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall6(procExportLayerBegin.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), uintptr(unsafe.Pointer(context)), 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func exportLayerNext(context uintptr, fileName **uint16, fileInfo *winio.FileBasicInfo, fileSize *int64, deleted *uint32) (hr error) {
+ if hr = procExportLayerNext.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall6(procExportLayerNext.Addr(), 5, uintptr(context), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(fileInfo)), uintptr(unsafe.Pointer(fileSize)), uintptr(unsafe.Pointer(deleted)), 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func exportLayerRead(context uintptr, buffer []byte, bytesRead *uint32) (hr error) {
+ var _p0 *byte
+ if len(buffer) > 0 {
+ _p0 = &buffer[0]
+ }
+ if hr = procExportLayerRead.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall6(procExportLayerRead.Addr(), 4, uintptr(context), uintptr(unsafe.Pointer(_p0)), uintptr(len(buffer)), uintptr(unsafe.Pointer(bytesRead)), 0, 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func exportLayerEnd(context uintptr) (hr error) {
+ if hr = procExportLayerEnd.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procExportLayerEnd.Addr(), 1, uintptr(context), 0, 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(query)
+ if hr != nil {
+ return
+ }
+ return _hcsEnumerateComputeSystems(_p0, computeSystems, result)
+}
+
+func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result **uint16) (hr error) {
+ if hr = procHcsEnumerateComputeSystems.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result)))
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(id)
+ if hr != nil {
+ return
+ }
+ var _p1 *uint16
+ _p1, hr = syscall.UTF16PtrFromString(configuration)
+ if hr != nil {
+ return
+ }
+ return _hcsCreateComputeSystem(_p0, _p1, identity, computeSystem, result)
+}
+
+func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) {
+ if hr = procHcsCreateComputeSystem.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(id)
+ if hr != nil {
+ return
+ }
+ return _hcsOpenComputeSystem(_p0, computeSystem, result)
+}
+
+func _hcsOpenComputeSystem(id *uint16, computeSystem *hcsSystem, result **uint16) (hr error) {
+ if hr = procHcsOpenComputeSystem.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)))
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) {
+ if hr = procHcsCloseComputeSystem.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(options)
+ if hr != nil {
+ return
+ }
+ return _hcsStartComputeSystem(computeSystem, _p0, result)
+}
+
+func _hcsStartComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) {
+ if hr = procHcsStartComputeSystem.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(options)
+ if hr != nil {
+ return
+ }
+ return _hcsShutdownComputeSystem(computeSystem, _p0, result)
+}
+
+func _hcsShutdownComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) {
+ if hr = procHcsShutdownComputeSystem.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(options)
+ if hr != nil {
+ return
+ }
+ return _hcsTerminateComputeSystem(computeSystem, _p0, result)
+}
+
+func _hcsTerminateComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) {
+ if hr = procHcsTerminateComputeSystem.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(options)
+ if hr != nil {
+ return
+ }
+ return _hcsPauseComputeSystem(computeSystem, _p0, result)
+}
+
+func _hcsPauseComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) {
+ if hr = procHcsPauseComputeSystem.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(options)
+ if hr != nil {
+ return
+ }
+ return _hcsResumeComputeSystem(computeSystem, _p0, result)
+}
+
+func _hcsResumeComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) {
+ if hr = procHcsResumeComputeSystem.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(propertyQuery)
+ if hr != nil {
+ return
+ }
+ return _hcsGetComputeSystemProperties(computeSystem, _p0, properties, result)
+}
+
+func _hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery *uint16, properties **uint16, result **uint16) (hr error) {
+ if hr = procHcsGetComputeSystemProperties.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(configuration)
+ if hr != nil {
+ return
+ }
+ return _hcsModifyComputeSystem(computeSystem, _p0, result)
+}
+
+func _hcsModifyComputeSystem(computeSystem hcsSystem, configuration *uint16, result **uint16) (hr error) {
+ if hr = procHcsModifyComputeSystem.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result)))
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) {
+ if hr = procHcsRegisterComputeSystemCallback.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) {
+ if hr = procHcsUnregisterComputeSystemCallback.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(processParameters)
+ if hr != nil {
+ return
+ }
+ return _hcsCreateProcess(computeSystem, _p0, processInformation, process, result)
+}
+
+func _hcsCreateProcess(computeSystem hcsSystem, processParameters *uint16, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) {
+ if hr = procHcsCreateProcess.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) {
+ if hr = procHcsOpenProcess.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsCloseProcess(process hcsProcess) (hr error) {
+ if hr = procHcsCloseProcess.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) {
+ if hr = procHcsTerminateProcess.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) {
+ if hr = procHcsGetProcessInfo.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result)))
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) {
+ if hr = procHcsGetProcessProperties.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result)))
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(settings)
+ if hr != nil {
+ return
+ }
+ return _hcsModifyProcess(process, _p0, result)
+}
+
+func _hcsModifyProcess(process hcsProcess, settings *uint16, result **uint16) (hr error) {
+ if hr = procHcsModifyProcess.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(propertyQuery)
+ if hr != nil {
+ return
+ }
+ return _hcsGetServiceProperties(_p0, properties, result)
+}
+
+func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result **uint16) (hr error) {
+ if hr = procHcsGetServiceProperties.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)))
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) {
+ if hr = procHcsRegisterProcessCallback.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) {
+ if hr = procHcsUnregisterProcessCallback.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func hcsModifyServiceSettings(settings string, result **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(settings)
+ if hr != nil {
+ return
+ }
+ return _hcsModifyServiceSettings(_p0, result)
+}
+
+func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) {
+ if hr = procHcsModifyServiceSettings.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall(procHcsModifyServiceSettings.Addr(), 2, uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)), 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
+
+func _hnsCall(method string, path string, object string, response **uint16) (hr error) {
+ var _p0 *uint16
+ _p0, hr = syscall.UTF16PtrFromString(method)
+ if hr != nil {
+ return
+ }
+ var _p1 *uint16
+ _p1, hr = syscall.UTF16PtrFromString(path)
+ if hr != nil {
+ return
+ }
+ var _p2 *uint16
+ _p2, hr = syscall.UTF16PtrFromString(object)
+ if hr != nil {
+ return
+ }
+ return __hnsCall(_p0, _p1, _p2, response)
+}
+
+func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) {
+ if hr = procHNSCall.Find(); hr != nil {
+ return
+ }
+ r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ return
+}
diff --git a/vendor/github.com/PuerkitoBio/purell/LICENSE b/vendor/github.com/PuerkitoBio/purell/LICENSE
new file mode 100644
index 000000000..4b9986dea
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/purell/LICENSE
@@ -0,0 +1,12 @@
+Copyright (c) 2012, Martin Angers
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md
new file mode 100644
index 000000000..09e8a32cb
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/purell/README.md
@@ -0,0 +1,187 @@
+# Purell
+
+Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know...
+
+Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc].
+
+[![build status](https://secure.travis-ci.org/PuerkitoBio/purell.png)](http://travis-ci.org/PuerkitoBio/purell)
+
+## Install
+
+`go get github.com/PuerkitoBio/purell`
+
+## Changelog
+
+* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121).
+* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich).
+* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]).
+* **v0.2.0** : Add benchmarks, Attempt IDN support.
+* **v0.1.0** : Initial release.
+
+## Examples
+
+From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."):
+
+```go
+package purell
+
+import (
+ "fmt"
+ "net/url"
+)
+
+func ExampleNormalizeURLString() {
+ if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
+ FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
+ panic(err)
+ } else {
+ fmt.Print(normalized)
+ }
+ // Output: http://somewebsite.com:80/Amazing%3F/url/
+}
+
+func ExampleMustNormalizeURLString() {
+ normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
+ FlagsUnsafeGreedy)
+ fmt.Print(normalized)
+
+ // Output: http://somewebsite.com/Amazing%FA/url
+}
+
+func ExampleNormalizeURL() {
+ if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
+ panic(err)
+ } else {
+ normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
+ fmt.Print(normalized)
+ }
+
+ // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
+}
+```
+
+## API
+
+As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags:
+
+```go
+const (
+ // Safe normalizations
+ FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
+ FlagLowercaseHost // http://HOST -> http://host
+ FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
+ FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
+ FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
+ FlagRemoveDefaultPort // http://host:80 -> http://host
+ FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
+
+ // Usually safe normalizations
+ FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
+ FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
+ FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
+
+ // Unsafe normalizations
+ FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
+ FlagRemoveFragment // http://host/path#fragment -> http://host/path
+ FlagForceHTTP // https://host -> http://host
+ FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
+ FlagRemoveWWW // http://www.host/ -> http://host/
+ FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
+ FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
+
+ // Normalizations not in the wikipedia article, required to cover tests cases
+ // submitted by jehiah
+ FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
+ FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
+ FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
+ FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
+ FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
+
+ // Convenience set of safe normalizations
+ FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
+
+ // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
+ // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
+
+ // Convenience set of usually safe normalizations (includes FlagsSafe)
+ FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
+ FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
+
+ // Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
+ FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
+ FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
+
+ // Convenience set of all available flags
+ FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
+ FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
+)
+```
+
+For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set.
+
+The [full godoc reference is available on gopkgdoc][godoc].
+
+Some things to note:
+
+* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it.
+
+* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*):
+ - %24 -> $
+ - %26 -> &
+ - %2B-%3B -> +,-./0123456789:;
+ - %3D -> =
+ - %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ - %5F -> _
+ - %61-%7A -> abcdefghijklmnopqrstuvwxyz
+ - %7E -> ~
+
+
+* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization).
+
+* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell.
+
+* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object.
+
+### Safe vs Usually Safe vs Unsafe
+
+Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between.
+
+Consider the following URL:
+
+`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
+
+Normalizing with the `FlagsSafe` gives:
+
+`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
+
+With the `FlagsUsuallySafeGreedy`:
+
+`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid`
+
+And with `FlagsUnsafeGreedy`:
+
+`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3`
+
+## TODOs
+
+* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`.
+
+## Thanks / Contributions
+
+@rogpeppe
+@jehiah
+@opennota
+@pchristopher1275
+@zenovich
+@beeker1121
+
+## License
+
+The [BSD 3-Clause license][bsd].
+
+[bsd]: http://opensource.org/licenses/BSD-3-Clause
+[wiki]: http://en.wikipedia.org/wiki/URL_normalization
+[rfc]: http://tools.ietf.org/html/rfc3986#section-6
+[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell
+[pr5]: https://github.com/PuerkitoBio/purell/pull/5
+[iss7]: https://github.com/PuerkitoBio/purell/issues/7
diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go
new file mode 100644
index 000000000..645e1b76f
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/purell/purell.go
@@ -0,0 +1,379 @@
+/*
+Package purell offers URL normalization as described on the wikipedia page:
+http://en.wikipedia.org/wiki/URL_normalization
+*/
+package purell
+
+import (
+ "bytes"
+ "fmt"
+ "net/url"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/PuerkitoBio/urlesc"
+ "golang.org/x/net/idna"
+ "golang.org/x/text/unicode/norm"
+ "golang.org/x/text/width"
+)
+
+// A set of normalization flags determines how a URL will
+// be normalized.
+type NormalizationFlags uint
+
+const (
+ // Safe normalizations
+ FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
+ FlagLowercaseHost // http://HOST -> http://host
+ FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
+ FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
+ FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
+ FlagRemoveDefaultPort // http://host:80 -> http://host
+ FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
+
+ // Usually safe normalizations
+ FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
+ FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
+ FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
+
+ // Unsafe normalizations
+ FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
+ FlagRemoveFragment // http://host/path#fragment -> http://host/path
+ FlagForceHTTP // https://host -> http://host
+ FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
+ FlagRemoveWWW // http://www.host/ -> http://host/
+ FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
+ FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
+
+ // Normalizations not in the wikipedia article, required to cover tests cases
+ // submitted by jehiah
+ FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
+ FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
+ FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
+ FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
+ FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
+
+ // Convenience set of safe normalizations
+ FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
+
+ // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
+ // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
+
+ // Convenience set of usually safe normalizations (includes FlagsSafe)
+ FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
+ FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
+
+ // Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
+ FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
+ FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
+
+ // Convenience set of all available flags
+ FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
+ FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
+)
+
+const (
+ defaultHttpPort = ":80"
+ defaultHttpsPort = ":443"
+)
+
+// Regular expressions used by the normalizations
+var rxPort = regexp.MustCompile(`(:\d+)/?$`)
+var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`)
+var rxDupSlashes = regexp.MustCompile(`/{2,}`)
+var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`)
+var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`)
+var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`)
+var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`)
+var rxEmptyPort = regexp.MustCompile(`:+$`)
+
+// Map of flags to implementation function.
+// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically
+// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator.
+
+// Since maps have undefined traversing order, make a slice of ordered keys
+var flagsOrder = []NormalizationFlags{
+ FlagLowercaseScheme,
+ FlagLowercaseHost,
+ FlagRemoveDefaultPort,
+ FlagRemoveDirectoryIndex,
+ FlagRemoveDotSegments,
+ FlagRemoveFragment,
+ FlagForceHTTP, // Must be after remove default port (because https=443/http=80)
+ FlagRemoveDuplicateSlashes,
+ FlagRemoveWWW,
+ FlagAddWWW,
+ FlagSortQuery,
+ FlagDecodeDWORDHost,
+ FlagDecodeOctalHost,
+ FlagDecodeHexHost,
+ FlagRemoveUnnecessaryHostDots,
+ FlagRemoveEmptyPortSeparator,
+ FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last
+ FlagAddTrailingSlash,
+}
+
+// ... and then the map, where order is unimportant
+var flags = map[NormalizationFlags]func(*url.URL){
+ FlagLowercaseScheme: lowercaseScheme,
+ FlagLowercaseHost: lowercaseHost,
+ FlagRemoveDefaultPort: removeDefaultPort,
+ FlagRemoveDirectoryIndex: removeDirectoryIndex,
+ FlagRemoveDotSegments: removeDotSegments,
+ FlagRemoveFragment: removeFragment,
+ FlagForceHTTP: forceHTTP,
+ FlagRemoveDuplicateSlashes: removeDuplicateSlashes,
+ FlagRemoveWWW: removeWWW,
+ FlagAddWWW: addWWW,
+ FlagSortQuery: sortQuery,
+ FlagDecodeDWORDHost: decodeDWORDHost,
+ FlagDecodeOctalHost: decodeOctalHost,
+ FlagDecodeHexHost: decodeHexHost,
+ FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots,
+ FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator,
+ FlagRemoveTrailingSlash: removeTrailingSlash,
+ FlagAddTrailingSlash: addTrailingSlash,
+}
+
+// MustNormalizeURLString returns the normalized string, and panics if an error occurs.
+// It takes an URL string as input, as well as the normalization flags.
+func MustNormalizeURLString(u string, f NormalizationFlags) string {
+ result, e := NormalizeURLString(u, f)
+ if e != nil {
+ panic(e)
+ }
+ return result
+}
+
+// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
+// It takes an URL string as input, as well as the normalization flags.
+func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
+ parsed, err := url.Parse(u)
+ if err != nil {
+ return "", err
+ }
+
+ if f&FlagLowercaseHost == FlagLowercaseHost {
+ parsed.Host = strings.ToLower(parsed.Host)
+ }
+
+ // The idna package doesn't fully conform to RFC 5895
+ // (https://tools.ietf.org/html/rfc5895), so we do it here.
+ // Taken from Go 1.8 cycle source, courtesy of bradfitz.
+ // TODO: Remove when (if?) idna package conforms to RFC 5895.
+ parsed.Host = width.Fold.String(parsed.Host)
+ parsed.Host = norm.NFC.String(parsed.Host)
+ if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil {
+ return "", err
+ }
+
+ return NormalizeURL(parsed, f), nil
+}
+
+// NormalizeURL returns the normalized string.
+// It takes a parsed URL object as input, as well as the normalization flags.
+func NormalizeURL(u *url.URL, f NormalizationFlags) string {
+ for _, k := range flagsOrder {
+ if f&k == k {
+ flags[k](u)
+ }
+ }
+ return urlesc.Escape(u)
+}
+
+func lowercaseScheme(u *url.URL) {
+ if len(u.Scheme) > 0 {
+ u.Scheme = strings.ToLower(u.Scheme)
+ }
+}
+
+func lowercaseHost(u *url.URL) {
+ if len(u.Host) > 0 {
+ u.Host = strings.ToLower(u.Host)
+ }
+}
+
+func removeDefaultPort(u *url.URL) {
+ if len(u.Host) > 0 {
+ scheme := strings.ToLower(u.Scheme)
+ u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
+ if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
+ return ""
+ }
+ return val
+ })
+ }
+}
+
+func removeTrailingSlash(u *url.URL) {
+ if l := len(u.Path); l > 0 {
+ if strings.HasSuffix(u.Path, "/") {
+ u.Path = u.Path[:l-1]
+ }
+ } else if l = len(u.Host); l > 0 {
+ if strings.HasSuffix(u.Host, "/") {
+ u.Host = u.Host[:l-1]
+ }
+ }
+}
+
+func addTrailingSlash(u *url.URL) {
+ if l := len(u.Path); l > 0 {
+ if !strings.HasSuffix(u.Path, "/") {
+ u.Path += "/"
+ }
+ } else if l = len(u.Host); l > 0 {
+ if !strings.HasSuffix(u.Host, "/") {
+ u.Host += "/"
+ }
+ }
+}
+
+func removeDotSegments(u *url.URL) {
+ if len(u.Path) > 0 {
+ var dotFree []string
+ var lastIsDot bool
+
+ sections := strings.Split(u.Path, "/")
+ for _, s := range sections {
+ if s == ".." {
+ if len(dotFree) > 0 {
+ dotFree = dotFree[:len(dotFree)-1]
+ }
+ } else if s != "." {
+ dotFree = append(dotFree, s)
+ }
+ lastIsDot = (s == "." || s == "..")
+ }
+ // Special case if host does not end with / and new path does not begin with /
+ u.Path = strings.Join(dotFree, "/")
+ if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") {
+ u.Path = "/" + u.Path
+ }
+ // Special case if the last segment was a dot, make sure the path ends with a slash
+ if lastIsDot && !strings.HasSuffix(u.Path, "/") {
+ u.Path += "/"
+ }
+ }
+}
+
+func removeDirectoryIndex(u *url.URL) {
+ if len(u.Path) > 0 {
+ u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1")
+ }
+}
+
+func removeFragment(u *url.URL) {
+ u.Fragment = ""
+}
+
+func forceHTTP(u *url.URL) {
+ if strings.ToLower(u.Scheme) == "https" {
+ u.Scheme = "http"
+ }
+}
+
+func removeDuplicateSlashes(u *url.URL) {
+ if len(u.Path) > 0 {
+ u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
+ }
+}
+
+func removeWWW(u *url.URL) {
+ if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") {
+ u.Host = u.Host[4:]
+ }
+}
+
+func addWWW(u *url.URL) {
+ if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") {
+ u.Host = "www." + u.Host
+ }
+}
+
+func sortQuery(u *url.URL) {
+ q := u.Query()
+
+ if len(q) > 0 {
+ arKeys := make([]string, len(q))
+ i := 0
+ for k, _ := range q {
+ arKeys[i] = k
+ i++
+ }
+ sort.Strings(arKeys)
+ buf := new(bytes.Buffer)
+ for _, k := range arKeys {
+ sort.Strings(q[k])
+ for _, v := range q[k] {
+ if buf.Len() > 0 {
+ buf.WriteRune('&')
+ }
+ buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v)))
+ }
+ }
+
+ // Rebuild the raw query string
+ u.RawQuery = buf.String()
+ }
+}
+
+func decodeDWORDHost(u *url.URL) {
+ if len(u.Host) > 0 {
+ if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 {
+ var parts [4]int64
+
+ dword, _ := strconv.ParseInt(matches[1], 10, 0)
+ for i, shift := range []uint{24, 16, 8, 0} {
+ parts[i] = dword >> shift & 0xFF
+ }
+ u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2])
+ }
+ }
+}
+
+func decodeOctalHost(u *url.URL) {
+ if len(u.Host) > 0 {
+ if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 {
+ var parts [4]int64
+
+ for i := 1; i <= 4; i++ {
+ parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0)
+ }
+ u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5])
+ }
+ }
+}
+
+func decodeHexHost(u *url.URL) {
+ if len(u.Host) > 0 {
+ if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 {
+ // Conversion is safe because of regex validation
+ parsed, _ := strconv.ParseInt(matches[1], 16, 0)
+ // Set host as DWORD (base 10) encoded host
+ u.Host = fmt.Sprintf("%d%s", parsed, matches[2])
+ // The rest is the same as decoding a DWORD host
+ decodeDWORDHost(u)
+ }
+ }
+}
+
+func removeUnncessaryHostDots(u *url.URL) {
+ if len(u.Host) > 0 {
+ if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 {
+ // Trim the leading and trailing dots
+ u.Host = strings.Trim(matches[1], ".")
+ if len(matches) > 2 {
+ u.Host += matches[2]
+ }
+ }
+ }
+}
+
+func removeEmptyPortSeparator(u *url.URL) {
+ if len(u.Host) > 0 {
+ u.Host = rxEmptyPort.ReplaceAllString(u.Host, "")
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/urlesc/LICENSE b/vendor/github.com/PuerkitoBio/urlesc/LICENSE
new file mode 100644
index 000000000..744875676
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/urlesc/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md
new file mode 100644
index 000000000..bebe305e0
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/urlesc/README.md
@@ -0,0 +1,16 @@
+urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.png?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc)
+======
+
+Package urlesc implements query escaping as per RFC 3986.
+
+It contains some parts of the net/url package, modified so as to allow
+some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)).
+
+## Install
+
+ go get github.com/PuerkitoBio/urlesc
+
+## License
+
+Go license (BSD-3-Clause)
+
diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go
new file mode 100644
index 000000000..1b8462459
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go
@@ -0,0 +1,180 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package urlesc implements query escaping as per RFC 3986.
+// It contains some parts of the net/url package, modified so as to allow
+// some reserved characters incorrectly escaped by net/url.
+// See https://github.com/golang/go/issues/5684
+package urlesc
+
+import (
+ "bytes"
+ "net/url"
+ "strings"
+)
+
+type encoding int
+
+const (
+ encodePath encoding = 1 + iota
+ encodeUserPassword
+ encodeQueryComponent
+ encodeFragment
+)
+
+// Return true if the specified character should be escaped when
+// appearing in a URL string, according to RFC 3986.
+func shouldEscape(c byte, mode encoding) bool {
+ // §2.3 Unreserved characters (alphanum)
+ if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
+ return false
+ }
+
+ switch c {
+ case '-', '.', '_', '~': // §2.3 Unreserved characters (mark)
+ return false
+
+ // §2.2 Reserved characters (reserved)
+ case ':', '/', '?', '#', '[', ']', '@', // gen-delims
+ '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims
+ // Different sections of the URL allow a few of
+ // the reserved characters to appear unescaped.
+ switch mode {
+ case encodePath: // §3.3
+ // The RFC allows sub-delims and : @.
+ // '/', '[' and ']' can be used to assign meaning to individual path
+ // segments. This package only manipulates the path as a whole,
+ // so we allow those as well. That leaves only ? and # to escape.
+ return c == '?' || c == '#'
+
+ case encodeUserPassword: // §3.2.1
+ // The RFC allows : and sub-delims in
+ // userinfo. The parsing of userinfo treats ':' as special so we must escape
+ // all the gen-delims.
+ return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'
+
+ case encodeQueryComponent: // §3.4
+ // The RFC allows / and ?.
+ return c != '/' && c != '?'
+
+ case encodeFragment: // §4.1
+ // The RFC text is silent but the grammar allows
+ // everything, so escape nothing but #
+ return c == '#'
+ }
+ }
+
+ // Everything else must be escaped.
+ return true
+}
+
+// QueryEscape escapes the string so it can be safely placed
+// inside a URL query.
+func QueryEscape(s string) string {
+ return escape(s, encodeQueryComponent)
+}
+
+func escape(s string, mode encoding) string {
+ spaceCount, hexCount := 0, 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if shouldEscape(c, mode) {
+ if c == ' ' && mode == encodeQueryComponent {
+ spaceCount++
+ } else {
+ hexCount++
+ }
+ }
+ }
+
+ if spaceCount == 0 && hexCount == 0 {
+ return s
+ }
+
+ t := make([]byte, len(s)+2*hexCount)
+ j := 0
+ for i := 0; i < len(s); i++ {
+ switch c := s[i]; {
+ case c == ' ' && mode == encodeQueryComponent:
+ t[j] = '+'
+ j++
+ case shouldEscape(c, mode):
+ t[j] = '%'
+ t[j+1] = "0123456789ABCDEF"[c>>4]
+ t[j+2] = "0123456789ABCDEF"[c&15]
+ j += 3
+ default:
+ t[j] = s[i]
+ j++
+ }
+ }
+ return string(t)
+}
+
+var uiReplacer = strings.NewReplacer(
+ "%21", "!",
+ "%27", "'",
+ "%28", "(",
+ "%29", ")",
+ "%2A", "*",
+)
+
+// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986.
+func unescapeUserinfo(s string) string {
+ return uiReplacer.Replace(s)
+}
+
+// Escape reassembles the URL into a valid URL string.
+// The general form of the result is one of:
+//
+// scheme:opaque
+// scheme://userinfo@host/path?query#fragment
+//
+// If u.Opaque is non-empty, String uses the first form;
+// otherwise it uses the second form.
+//
+// In the second form, the following rules apply:
+// - if u.Scheme is empty, scheme: is omitted.
+// - if u.User is nil, userinfo@ is omitted.
+// - if u.Host is empty, host/ is omitted.
+// - if u.Scheme and u.Host are empty and u.User is nil,
+// the entire scheme://userinfo@host/ is omitted.
+// - if u.Host is non-empty and u.Path begins with a /,
+// the form host/path does not add its own /.
+// - if u.RawQuery is empty, ?query is omitted.
+// - if u.Fragment is empty, #fragment is omitted.
+func Escape(u *url.URL) string {
+ var buf bytes.Buffer
+ if u.Scheme != "" {
+ buf.WriteString(u.Scheme)
+ buf.WriteByte(':')
+ }
+ if u.Opaque != "" {
+ buf.WriteString(u.Opaque)
+ } else {
+ if u.Scheme != "" || u.Host != "" || u.User != nil {
+ buf.WriteString("//")
+ if ui := u.User; ui != nil {
+ buf.WriteString(unescapeUserinfo(ui.String()))
+ buf.WriteByte('@')
+ }
+ if h := u.Host; h != "" {
+ buf.WriteString(h)
+ }
+ }
+ if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
+ buf.WriteByte('/')
+ }
+ buf.WriteString(escape(u.Path, encodePath))
+ }
+ if u.RawQuery != "" {
+ buf.WriteByte('?')
+ buf.WriteString(u.RawQuery)
+ }
+ if u.Fragment != "" {
+ buf.WriteByte('#')
+ buf.WriteString(escape(u.Fragment, encodeFragment))
+ }
+ return buf.String()
+}
diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE
new file mode 100644
index 000000000..339177be6
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/LICENSE
@@ -0,0 +1,20 @@
+Copyright (C) 2013 Blake Mizerany
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/beorn7/perks/README.md b/vendor/github.com/beorn7/perks/README.md
new file mode 100644
index 000000000..fc0577770
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/README.md
@@ -0,0 +1,31 @@
+# Perks for Go (golang.org)
+
+Perks contains the Go package quantile that computes approximate quantiles over
+an unbounded data stream within low memory and CPU bounds.
+
+For more information and examples, see:
+http://godoc.org/github.com/bmizerany/perks
+
+A very special thank you and shout out to Graham Cormode (Rutgers University),
+Flip Korn (AT&T Labs–Research), S. Muthukrishnan (Rutgers University), and
+Divesh Srivastava (AT&T Labs–Research) for their research and publication of
+[Effective Computation of Biased Quantiles over Data Streams](http://www.cs.rutgers.edu/~muthu/bquant.pdf)
+
+Thank you, also:
+* Armon Dadgar (@armon)
+* Andrew Gerrand (@nf)
+* Brad Fitzpatrick (@bradfitz)
+* Keith Rarick (@kr)
+
+FAQ:
+
+Q: Why not move the quantile package into the project root?
+A: I want to add more packages to perks later.
+
+Copyright (C) 2013 Blake Mizerany
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go
new file mode 100644
index 000000000..587b1fc5b
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/stream.go
@@ -0,0 +1,292 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+ "math"
+ "sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+ Value float64 `json:",string"`
+ Width float64 `json:",string"`
+ Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * r
+ }
+ return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * (s.n - r)
+ }
+ return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targets map[float64]float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ var m = math.MaxFloat64
+ var f float64
+ for quantile, epsilon := range targets {
+ if quantile*s.n <= r {
+ f = (2 * epsilon * r) / quantile
+ } else {
+ f = (2 * epsilon * (s.n - r)) / (1 - quantile)
+ }
+ if f < m {
+ m = f
+ }
+ }
+ return m
+ }
+ return newStream(ƒ)
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+ *stream
+ b Samples
+ sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+ x := &stream{ƒ: ƒ}
+ return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+ s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+ s.b = append(s.b, sample)
+ s.sorted = false
+ if len(s.b) == cap(s.b) {
+ s.flush()
+ }
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+ if !s.flushed() {
+ // Fast path when there hasn't been enough data for a flush;
+ // this also yields better accuracy for small sets of data.
+ l := len(s.b)
+ if l == 0 {
+ return 0
+ }
+ i := int(float64(l) * q)
+ if i > 0 {
+ i -= 1
+ }
+ s.maybeSort()
+ return s.b[i].Value
+ }
+ s.flush()
+ return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+ sort.Sort(samples)
+ s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+ s.stream.reset()
+ s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+ if !s.flushed() {
+ return s.b
+ }
+ s.flush()
+ return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+ return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+ s.maybeSort()
+ s.stream.merge(s.b)
+ s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+ if !s.sorted {
+ s.sorted = true
+ sort.Sort(s.b)
+ }
+}
+
+func (s *Stream) flushed() bool {
+ return len(s.stream.l) > 0
+}
+
+type stream struct {
+ n float64
+ l []Sample
+ ƒ invariant
+}
+
+func (s *stream) reset() {
+ s.l = s.l[:0]
+ s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+ s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+ // TODO(beorn7): This tries to merge not only individual samples, but
+ // whole summaries. The paper doesn't mention merging summaries at
+ // all. Unittests show that the merging is inaccurate. Find out how to
+ // do merges properly.
+ var r float64
+ i := 0
+ for _, sample := range samples {
+ for ; i < len(s.l); i++ {
+ c := s.l[i]
+ if c.Value > sample.Value {
+ // Insert at position i.
+ s.l = append(s.l, Sample{})
+ copy(s.l[i+1:], s.l[i:])
+ s.l[i] = Sample{
+ sample.Value,
+ sample.Width,
+ math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+ // TODO(beorn7): How to calculate delta correctly?
+ }
+ i++
+ goto inserted
+ }
+ r += c.Width
+ }
+ s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+ i++
+ inserted:
+ s.n += sample.Width
+ r += sample.Width
+ }
+ s.compress()
+}
+
+func (s *stream) count() int {
+ return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+ t := math.Ceil(q * s.n)
+ t += math.Ceil(s.ƒ(s, t) / 2)
+ p := s.l[0]
+ var r float64
+ for _, c := range s.l[1:] {
+ r += p.Width
+ if r+c.Width+c.Delta > t {
+ return p.Value
+ }
+ p = c
+ }
+ return p.Value
+}
+
+func (s *stream) compress() {
+ if len(s.l) < 2 {
+ return
+ }
+ x := s.l[len(s.l)-1]
+ xi := len(s.l) - 1
+ r := s.n - 1 - x.Width
+
+ for i := len(s.l) - 2; i >= 0; i-- {
+ c := s.l[i]
+ if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+ x.Width += c.Width
+ s.l[xi] = x
+ // Remove element at i.
+ copy(s.l[i:], s.l[i+1:])
+ s.l = s.l[:len(s.l)-1]
+ xi -= 1
+ } else {
+ x = c
+ xi = i
+ }
+ r -= c.Width
+ }
+}
+
+func (s *stream) samples() Samples {
+ samples := make(Samples, len(s.l))
+ copy(samples, s.l)
+ return samples
+}
diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/LICENSE
new file mode 100644
index 000000000..5ba5c86fc
--- /dev/null
+++ b/vendor/github.com/blang/semver/LICENSE
@@ -0,0 +1,22 @@
+The MIT License
+
+Copyright (c) 2014 Benedikt Lang <github at benediktlang.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md
new file mode 100644
index 000000000..4399639e2
--- /dev/null
+++ b/vendor/github.com/blang/semver/README.md
@@ -0,0 +1,191 @@
+semver for golang [![Build Status](https://drone.io/github.com/blang/semver/status.png)](https://drone.io/github.com/blang/semver/latest) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master)
+======
+
+semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
+
+Usage
+-----
+```bash
+$ go get github.com/blang/semver
+```
+Note: Always vendor your dependencies or fix on a specific version tag.
+
+```go
+import github.com/blang/semver
+v1, err := semver.Make("1.0.0-beta")
+v2, err := semver.Make("2.0.0-beta")
+v1.Compare(v2)
+```
+
+Also check the [GoDocs](http://godoc.org/github.com/blang/semver).
+
+Why should I use this lib?
+-----
+
+- Fully spec compatible
+- No reflection
+- No regex
+- Fully tested (Coverage >99%)
+- Readable parsing/validation errors
+- Fast (See [Benchmarks](#benchmarks))
+- Only Stdlib
+- Uses values instead of pointers
+- Many features, see below
+
+
+Features
+-----
+
+- Parsing and validation at all levels
+- Comparator-like comparisons
+- Compare Helper Methods
+- InPlace manipulation
+- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1`
+- Sortable (implements sort.Interface)
+- database/sql compatible (sql.Scanner/Valuer)
+- encoding/json compatible (json.Marshaler/Unmarshaler)
+
+Ranges
+------
+
+A `Range` is a set of conditions which specify which versions satisfy the range.
+
+A condition is composed of an operator and a version. The supported operators are:
+
+- `<1.0.0` Less than `1.0.0`
+- `<=1.0.0` Less than or equal to `1.0.0`
+- `>1.0.0` Greater than `1.0.0`
+- `>=1.0.0` Greater than or equal to `1.0.0`
+- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0`
+- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`.
+
+A `Range` can link multiple `Ranges` separated by space:
+
+Ranges can be linked by logical AND:
+
+ - `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0`
+ - `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2`
+
+Ranges can also be linked by logical OR:
+
+ - `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x`
+
+AND has a higher precedence than OR. It's not possible to use brackets.
+
+Ranges can be combined by both AND and OR
+
+ - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
+
+Range usage:
+
+```
+v, err := semver.Parse("1.2.3")
+range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0")
+if range(v) {
+ //valid
+}
+
+```
+
+Example
+-----
+
+Have a look at full examples in [examples/main.go](examples/main.go)
+
+```go
+import github.com/blang/semver
+
+v, err := semver.Make("0.0.1-alpha.preview+123.github")
+fmt.Printf("Major: %d\n", v.Major)
+fmt.Printf("Minor: %d\n", v.Minor)
+fmt.Printf("Patch: %d\n", v.Patch)
+fmt.Printf("Pre: %s\n", v.Pre)
+fmt.Printf("Build: %s\n", v.Build)
+
+// Prerelease versions array
+if len(v.Pre) > 0 {
+ fmt.Println("Prerelease versions:")
+ for i, pre := range v.Pre {
+ fmt.Printf("%d: %q\n", i, pre)
+ }
+}
+
+// Build meta data array
+if len(v.Build) > 0 {
+ fmt.Println("Build meta data:")
+ for i, build := range v.Build {
+ fmt.Printf("%d: %q\n", i, build)
+ }
+}
+
+v001, err := semver.Make("0.0.1")
+// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE
+v001.GT(v) == true
+v.LT(v001) == true
+v.GTE(v) == true
+v.LTE(v) == true
+
+// Or use v.Compare(v2) for comparisons (-1, 0, 1):
+v001.Compare(v) == 1
+v.Compare(v001) == -1
+v.Compare(v) == 0
+
+// Manipulate Version in place:
+v.Pre[0], err = semver.NewPRVersion("beta")
+if err != nil {
+ fmt.Printf("Error parsing pre release version: %q", err)
+}
+
+fmt.Println("\nValidate versions:")
+v.Build[0] = "?"
+
+err = v.Validate()
+if err != nil {
+ fmt.Printf("Validation failed: %s\n", err)
+}
+```
+
+
+Benchmarks
+-----
+
+ BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op
+ BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op
+ BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op
+ BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op
+ BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op
+ BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op
+ BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op
+ BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op
+ BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op
+ BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op
+ BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op
+ BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op
+ BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op
+ BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op
+ BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op
+ BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op
+ BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op
+ BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op
+ BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op
+ BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op
+
+See benchmark cases at [semver_test.go](semver_test.go)
+
+
+Motivation
+-----
+
+I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like.
+
+
+Contribution
+-----
+
+Feel free to make a pull request. For bigger changes create a issue first to discuss about it.
+
+
+License
+-----
+
+See [LICENSE](LICENSE) file.
diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go
new file mode 100644
index 000000000..a74bf7c44
--- /dev/null
+++ b/vendor/github.com/blang/semver/json.go
@@ -0,0 +1,23 @@
+package semver
+
+import (
+ "encoding/json"
+)
+
+// MarshalJSON implements the encoding/json.Marshaler interface.
+func (v Version) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
+func (v *Version) UnmarshalJSON(data []byte) (err error) {
+ var versionString string
+
+ if err = json.Unmarshal(data, &versionString); err != nil {
+ return
+ }
+
+ *v, err = Parse(versionString)
+
+ return
+}
diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go
new file mode 100644
index 000000000..fca406d47
--- /dev/null
+++ b/vendor/github.com/blang/semver/range.go
@@ -0,0 +1,416 @@
+package semver
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+type wildcardType int
+
+const (
+ noneWildcard wildcardType = iota
+ majorWildcard wildcardType = 1
+ minorWildcard wildcardType = 2
+ patchWildcard wildcardType = 3
+)
+
+func wildcardTypefromInt(i int) wildcardType {
+ switch i {
+ case 1:
+ return majorWildcard
+ case 2:
+ return minorWildcard
+ case 3:
+ return patchWildcard
+ default:
+ return noneWildcard
+ }
+}
+
+type comparator func(Version, Version) bool
+
+var (
+ compEQ comparator = func(v1 Version, v2 Version) bool {
+ return v1.Compare(v2) == 0
+ }
+ compNE = func(v1 Version, v2 Version) bool {
+ return v1.Compare(v2) != 0
+ }
+ compGT = func(v1 Version, v2 Version) bool {
+ return v1.Compare(v2) == 1
+ }
+ compGE = func(v1 Version, v2 Version) bool {
+ return v1.Compare(v2) >= 0
+ }
+ compLT = func(v1 Version, v2 Version) bool {
+ return v1.Compare(v2) == -1
+ }
+ compLE = func(v1 Version, v2 Version) bool {
+ return v1.Compare(v2) <= 0
+ }
+)
+
+type versionRange struct {
+ v Version
+ c comparator
+}
+
+// rangeFunc creates a Range from the given versionRange.
+func (vr *versionRange) rangeFunc() Range {
+ return Range(func(v Version) bool {
+ return vr.c(v, vr.v)
+ })
+}
+
+// Range represents a range of versions.
+// A Range can be used to check if a Version satisfies it:
+//
+// range, err := semver.ParseRange(">1.0.0 <2.0.0")
+// range(semver.MustParse("1.1.1") // returns true
+type Range func(Version) bool
+
+// OR combines the existing Range with another Range using logical OR.
+func (rf Range) OR(f Range) Range {
+ return Range(func(v Version) bool {
+ return rf(v) || f(v)
+ })
+}
+
+// AND combines the existing Range with another Range using logical AND.
+func (rf Range) AND(f Range) Range {
+ return Range(func(v Version) bool {
+ return rf(v) && f(v)
+ })
+}
+
+// ParseRange parses a range and returns a Range.
+// If the range could not be parsed an error is returned.
+//
+// Valid ranges are:
+// - "<1.0.0"
+// - "<=1.0.0"
+// - ">1.0.0"
+// - ">=1.0.0"
+// - "1.0.0", "=1.0.0", "==1.0.0"
+// - "!1.0.0", "!=1.0.0"
+//
+// A Range can consist of multiple ranges separated by space:
+// Ranges can be linked by logical AND:
+// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0"
+// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2
+//
+// Ranges can also be linked by logical OR:
+// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x"
+//
+// AND has a higher precedence than OR. It's not possible to use brackets.
+//
+// Ranges can be combined by both AND and OR
+//
+// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
+func ParseRange(s string) (Range, error) {
+ parts := splitAndTrim(s)
+ orParts, err := splitORParts(parts)
+ if err != nil {
+ return nil, err
+ }
+ expandedParts, err := expandWildcardVersion(orParts)
+ if err != nil {
+ return nil, err
+ }
+ var orFn Range
+ for _, p := range expandedParts {
+ var andFn Range
+ for _, ap := range p {
+ opStr, vStr, err := splitComparatorVersion(ap)
+ if err != nil {
+ return nil, err
+ }
+ vr, err := buildVersionRange(opStr, vStr)
+ if err != nil {
+ return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err)
+ }
+ rf := vr.rangeFunc()
+
+ // Set function
+ if andFn == nil {
+ andFn = rf
+ } else { // Combine with existing function
+ andFn = andFn.AND(rf)
+ }
+ }
+ if orFn == nil {
+ orFn = andFn
+ } else {
+ orFn = orFn.OR(andFn)
+ }
+
+ }
+ return orFn, nil
+}
+
+// splitORParts splits the already cleaned parts by '||'.
+// Checks for invalid positions of the operator and returns an
+// error if found.
+func splitORParts(parts []string) ([][]string, error) {
+ var ORparts [][]string
+ last := 0
+ for i, p := range parts {
+ if p == "||" {
+ if i == 0 {
+ return nil, fmt.Errorf("First element in range is '||'")
+ }
+ ORparts = append(ORparts, parts[last:i])
+ last = i + 1
+ }
+ }
+ if last == len(parts) {
+ return nil, fmt.Errorf("Last element in range is '||'")
+ }
+ ORparts = append(ORparts, parts[last:])
+ return ORparts, nil
+}
+
+// buildVersionRange takes a slice of 2: operator and version
+// and builds a versionRange, otherwise an error.
+func buildVersionRange(opStr, vStr string) (*versionRange, error) {
+ c := parseComparator(opStr)
+ if c == nil {
+ return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, ""))
+ }
+ v, err := Parse(vStr)
+ if err != nil {
+ return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err)
+ }
+
+ return &versionRange{
+ v: v,
+ c: c,
+ }, nil
+
+}
+
+// inArray checks if a byte is contained in an array of bytes
+func inArray(s byte, list []byte) bool {
+ for _, el := range list {
+ if el == s {
+ return true
+ }
+ }
+ return false
+}
+
+// splitAndTrim splits a range string by spaces and cleans whitespaces
+func splitAndTrim(s string) (result []string) {
+ last := 0
+ var lastChar byte
+ excludeFromSplit := []byte{'>', '<', '='}
+ for i := 0; i < len(s); i++ {
+ if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) {
+ if last < i-1 {
+ result = append(result, s[last:i])
+ }
+ last = i + 1
+ } else if s[i] != ' ' {
+ lastChar = s[i]
+ }
+ }
+ if last < len(s)-1 {
+ result = append(result, s[last:])
+ }
+
+ for i, v := range result {
+ result[i] = strings.Replace(v, " ", "", -1)
+ }
+
+ // parts := strings.Split(s, " ")
+ // for _, x := range parts {
+ // if s := strings.TrimSpace(x); len(s) != 0 {
+ // result = append(result, s)
+ // }
+ // }
+ return
+}
+
+// splitComparatorVersion splits the comparator from the version.
+// Input must be free of leading or trailing spaces.
+func splitComparatorVersion(s string) (string, string, error) {
+ i := strings.IndexFunc(s, unicode.IsDigit)
+ if i == -1 {
+ return "", "", fmt.Errorf("Could not get version from string: %q", s)
+ }
+ return strings.TrimSpace(s[0:i]), s[i:], nil
+}
+
+// getWildcardType will return the type of wildcard that the
+// passed version contains
+func getWildcardType(vStr string) wildcardType {
+ parts := strings.Split(vStr, ".")
+ nparts := len(parts)
+ wildcard := parts[nparts-1]
+
+ possibleWildcardType := wildcardTypefromInt(nparts)
+ if wildcard == "x" {
+ return possibleWildcardType
+ }
+
+ return noneWildcard
+}
+
+// createVersionFromWildcard will convert a wildcard version
+// into a regular version, replacing 'x's with '0's, handling
+// special cases like '1.x.x' and '1.x'
+func createVersionFromWildcard(vStr string) string {
+ // handle 1.x.x
+ vStr2 := strings.Replace(vStr, ".x.x", ".x", 1)
+ vStr2 = strings.Replace(vStr2, ".x", ".0", 1)
+ parts := strings.Split(vStr2, ".")
+
+ // handle 1.x
+ if len(parts) == 2 {
+ return vStr2 + ".0"
+ }
+
+ return vStr2
+}
+
+// incrementMajorVersion will increment the major version
+// of the passed version
+func incrementMajorVersion(vStr string) (string, error) {
+ parts := strings.Split(vStr, ".")
+ i, err := strconv.Atoi(parts[0])
+ if err != nil {
+ return "", err
+ }
+ parts[0] = strconv.Itoa(i + 1)
+
+ return strings.Join(parts, "."), nil
+}
+
+// incrementMajorVersion will increment the minor version
+// of the passed version
+func incrementMinorVersion(vStr string) (string, error) {
+ parts := strings.Split(vStr, ".")
+ i, err := strconv.Atoi(parts[1])
+ if err != nil {
+ return "", err
+ }
+ parts[1] = strconv.Itoa(i + 1)
+
+ return strings.Join(parts, "."), nil
+}
+
+// expandWildcardVersion will expand wildcards inside versions
+// following these rules:
+//
+// * when dealing with patch wildcards:
+// >= 1.2.x will become >= 1.2.0
+// <= 1.2.x will become < 1.3.0
+// > 1.2.x will become >= 1.3.0
+// < 1.2.x will become < 1.2.0
+// != 1.2.x will become < 1.2.0 >= 1.3.0
+//
+// * when dealing with minor wildcards:
+// >= 1.x will become >= 1.0.0
+// <= 1.x will become < 2.0.0
+// > 1.x will become >= 2.0.0
+// < 1.0 will become < 1.0.0
+// != 1.x will become < 1.0.0 >= 2.0.0
+//
+// * when dealing with wildcards without
+// version operator:
+// 1.2.x will become >= 1.2.0 < 1.3.0
+// 1.x will become >= 1.0.0 < 2.0.0
+func expandWildcardVersion(parts [][]string) ([][]string, error) {
+ var expandedParts [][]string
+ for _, p := range parts {
+ var newParts []string
+ for _, ap := range p {
+ if strings.Index(ap, "x") != -1 {
+ opStr, vStr, err := splitComparatorVersion(ap)
+ if err != nil {
+ return nil, err
+ }
+
+ versionWildcardType := getWildcardType(vStr)
+ flatVersion := createVersionFromWildcard(vStr)
+
+ var resultOperator string
+ var shouldIncrementVersion bool
+ switch opStr {
+ case ">":
+ resultOperator = ">="
+ shouldIncrementVersion = true
+ case ">=":
+ resultOperator = ">="
+ case "<":
+ resultOperator = "<"
+ case "<=":
+ resultOperator = "<"
+ shouldIncrementVersion = true
+ case "", "=", "==":
+ newParts = append(newParts, ">="+flatVersion)
+ resultOperator = "<"
+ shouldIncrementVersion = true
+ case "!=", "!":
+ newParts = append(newParts, "<"+flatVersion)
+ resultOperator = ">="
+ shouldIncrementVersion = true
+ }
+
+ var resultVersion string
+ if shouldIncrementVersion {
+ switch versionWildcardType {
+ case patchWildcard:
+ resultVersion, _ = incrementMinorVersion(flatVersion)
+ case minorWildcard:
+ resultVersion, _ = incrementMajorVersion(flatVersion)
+ }
+ } else {
+ resultVersion = flatVersion
+ }
+
+ ap = resultOperator + resultVersion
+ }
+ newParts = append(newParts, ap)
+ }
+ expandedParts = append(expandedParts, newParts)
+ }
+
+ return expandedParts, nil
+}
+
+func parseComparator(s string) comparator {
+ switch s {
+ case "==":
+ fallthrough
+ case "":
+ fallthrough
+ case "=":
+ return compEQ
+ case ">":
+ return compGT
+ case ">=":
+ return compGE
+ case "<":
+ return compLT
+ case "<=":
+ return compLE
+ case "!":
+ fallthrough
+ case "!=":
+ return compNE
+ }
+
+ return nil
+}
+
+// MustParseRange is like ParseRange but panics if the range cannot be parsed.
+func MustParseRange(s string) Range {
+ r, err := ParseRange(s)
+ if err != nil {
+ panic(`semver: ParseRange(` + s + `): ` + err.Error())
+ }
+ return r
+}
diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go
new file mode 100644
index 000000000..8ee0842e6
--- /dev/null
+++ b/vendor/github.com/blang/semver/semver.go
@@ -0,0 +1,418 @@
+package semver
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+const (
+ numbers string = "0123456789"
+ alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
+ alphanum = alphas + numbers
+)
+
+// SpecVersion is the latest fully supported spec version of semver
+var SpecVersion = Version{
+ Major: 2,
+ Minor: 0,
+ Patch: 0,
+}
+
+// Version represents a semver compatible version
+type Version struct {
+ Major uint64
+ Minor uint64
+ Patch uint64
+ Pre []PRVersion
+ Build []string //No Precendence
+}
+
+// Version to string
+func (v Version) String() string {
+ b := make([]byte, 0, 5)
+ b = strconv.AppendUint(b, v.Major, 10)
+ b = append(b, '.')
+ b = strconv.AppendUint(b, v.Minor, 10)
+ b = append(b, '.')
+ b = strconv.AppendUint(b, v.Patch, 10)
+
+ if len(v.Pre) > 0 {
+ b = append(b, '-')
+ b = append(b, v.Pre[0].String()...)
+
+ for _, pre := range v.Pre[1:] {
+ b = append(b, '.')
+ b = append(b, pre.String()...)
+ }
+ }
+
+ if len(v.Build) > 0 {
+ b = append(b, '+')
+ b = append(b, v.Build[0]...)
+
+ for _, build := range v.Build[1:] {
+ b = append(b, '.')
+ b = append(b, build...)
+ }
+ }
+
+ return string(b)
+}
+
+// Equals checks if v is equal to o.
+func (v Version) Equals(o Version) bool {
+ return (v.Compare(o) == 0)
+}
+
+// EQ checks if v is equal to o.
+func (v Version) EQ(o Version) bool {
+ return (v.Compare(o) == 0)
+}
+
+// NE checks if v is not equal to o.
+func (v Version) NE(o Version) bool {
+ return (v.Compare(o) != 0)
+}
+
+// GT checks if v is greater than o.
+func (v Version) GT(o Version) bool {
+ return (v.Compare(o) == 1)
+}
+
+// GTE checks if v is greater than or equal to o.
+func (v Version) GTE(o Version) bool {
+ return (v.Compare(o) >= 0)
+}
+
+// GE checks if v is greater than or equal to o.
+func (v Version) GE(o Version) bool {
+ return (v.Compare(o) >= 0)
+}
+
+// LT checks if v is less than o.
+func (v Version) LT(o Version) bool {
+ return (v.Compare(o) == -1)
+}
+
+// LTE checks if v is less than or equal to o.
+func (v Version) LTE(o Version) bool {
+ return (v.Compare(o) <= 0)
+}
+
+// LE checks if v is less than or equal to o.
+func (v Version) LE(o Version) bool {
+ return (v.Compare(o) <= 0)
+}
+
+// Compare compares Versions v to o:
+// -1 == v is less than o
+// 0 == v is equal to o
+// 1 == v is greater than o
+func (v Version) Compare(o Version) int {
+ if v.Major != o.Major {
+ if v.Major > o.Major {
+ return 1
+ }
+ return -1
+ }
+ if v.Minor != o.Minor {
+ if v.Minor > o.Minor {
+ return 1
+ }
+ return -1
+ }
+ if v.Patch != o.Patch {
+ if v.Patch > o.Patch {
+ return 1
+ }
+ return -1
+ }
+
+ // Quick comparison if a version has no prerelease versions
+ if len(v.Pre) == 0 && len(o.Pre) == 0 {
+ return 0
+ } else if len(v.Pre) == 0 && len(o.Pre) > 0 {
+ return 1
+ } else if len(v.Pre) > 0 && len(o.Pre) == 0 {
+ return -1
+ }
+
+ i := 0
+ for ; i < len(v.Pre) && i < len(o.Pre); i++ {
+ if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 {
+ continue
+ } else if comp == 1 {
+ return 1
+ } else {
+ return -1
+ }
+ }
+
+ // If all pr versions are the equal but one has further prversion, this one greater
+ if i == len(v.Pre) && i == len(o.Pre) {
+ return 0
+ } else if i == len(v.Pre) && i < len(o.Pre) {
+ return -1
+ } else {
+ return 1
+ }
+
+}
+
+// Validate validates v and returns error in case
+func (v Version) Validate() error {
+ // Major, Minor, Patch already validated using uint64
+
+ for _, pre := range v.Pre {
+ if !pre.IsNum { //Numeric prerelease versions already uint64
+ if len(pre.VersionStr) == 0 {
+ return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr)
+ }
+ if !containsOnly(pre.VersionStr, alphanum) {
+ return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr)
+ }
+ }
+ }
+
+ for _, build := range v.Build {
+ if len(build) == 0 {
+ return fmt.Errorf("Build meta data can not be empty %q", build)
+ }
+ if !containsOnly(build, alphanum) {
+ return fmt.Errorf("Invalid character(s) found in build meta data %q", build)
+ }
+ }
+
+ return nil
+}
+
+// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
+func New(s string) (vp *Version, err error) {
+ v, err := Parse(s)
+ vp = &v
+ return
+}
+
+// Make is an alias for Parse, parses version string and returns a validated Version or error
+func Make(s string) (Version, error) {
+ return Parse(s)
+}
+
+// ParseTolerant allows for certain version specifications that do not strictly adhere to semver
+// specs to be parsed by this library. It does so by normalizing versions before passing them to
+// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions
+// with only major and minor components specified
+func ParseTolerant(s string) (Version, error) {
+ s = strings.TrimSpace(s)
+ s = strings.TrimPrefix(s, "v")
+
+ // Split into major.minor.(patch+pr+meta)
+ parts := strings.SplitN(s, ".", 3)
+ if len(parts) < 3 {
+ if strings.ContainsAny(parts[len(parts)-1], "+-") {
+ return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data")
+ }
+ for len(parts) < 3 {
+ parts = append(parts, "0")
+ }
+ s = strings.Join(parts, ".")
+ }
+
+ return Parse(s)
+}
+
+// Parse parses version string and returns a validated Version or error
+func Parse(s string) (Version, error) {
+ if len(s) == 0 {
+ return Version{}, errors.New("Version string empty")
+ }
+
+ // Split into major.minor.(patch+pr+meta)
+ parts := strings.SplitN(s, ".", 3)
+ if len(parts) != 3 {
+ return Version{}, errors.New("No Major.Minor.Patch elements found")
+ }
+
+ // Major
+ if !containsOnly(parts[0], numbers) {
+ return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0])
+ }
+ if hasLeadingZeroes(parts[0]) {
+ return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0])
+ }
+ major, err := strconv.ParseUint(parts[0], 10, 64)
+ if err != nil {
+ return Version{}, err
+ }
+
+ // Minor
+ if !containsOnly(parts[1], numbers) {
+ return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1])
+ }
+ if hasLeadingZeroes(parts[1]) {
+ return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1])
+ }
+ minor, err := strconv.ParseUint(parts[1], 10, 64)
+ if err != nil {
+ return Version{}, err
+ }
+
+ v := Version{}
+ v.Major = major
+ v.Minor = minor
+
+ var build, prerelease []string
+ patchStr := parts[2]
+
+ if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 {
+ build = strings.Split(patchStr[buildIndex+1:], ".")
+ patchStr = patchStr[:buildIndex]
+ }
+
+ if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 {
+ prerelease = strings.Split(patchStr[preIndex+1:], ".")
+ patchStr = patchStr[:preIndex]
+ }
+
+ if !containsOnly(patchStr, numbers) {
+ return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr)
+ }
+ if hasLeadingZeroes(patchStr) {
+ return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr)
+ }
+ patch, err := strconv.ParseUint(patchStr, 10, 64)
+ if err != nil {
+ return Version{}, err
+ }
+
+ v.Patch = patch
+
+ // Prerelease
+ for _, prstr := range prerelease {
+ parsedPR, err := NewPRVersion(prstr)
+ if err != nil {
+ return Version{}, err
+ }
+ v.Pre = append(v.Pre, parsedPR)
+ }
+
+ // Build meta data
+ for _, str := range build {
+ if len(str) == 0 {
+ return Version{}, errors.New("Build meta data is empty")
+ }
+ if !containsOnly(str, alphanum) {
+ return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str)
+ }
+ v.Build = append(v.Build, str)
+ }
+
+ return v, nil
+}
+
+// MustParse is like Parse but panics if the version cannot be parsed.
+func MustParse(s string) Version {
+ v, err := Parse(s)
+ if err != nil {
+ panic(`semver: Parse(` + s + `): ` + err.Error())
+ }
+ return v
+}
+
+// PRVersion represents a PreRelease Version
+type PRVersion struct {
+ VersionStr string
+ VersionNum uint64
+ IsNum bool
+}
+
+// NewPRVersion creates a new valid prerelease version
+func NewPRVersion(s string) (PRVersion, error) {
+ if len(s) == 0 {
+ return PRVersion{}, errors.New("Prerelease is empty")
+ }
+ v := PRVersion{}
+ if containsOnly(s, numbers) {
+ if hasLeadingZeroes(s) {
+ return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s)
+ }
+ num, err := strconv.ParseUint(s, 10, 64)
+
+ // Might never be hit, but just in case
+ if err != nil {
+ return PRVersion{}, err
+ }
+ v.VersionNum = num
+ v.IsNum = true
+ } else if containsOnly(s, alphanum) {
+ v.VersionStr = s
+ v.IsNum = false
+ } else {
+ return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s)
+ }
+ return v, nil
+}
+
+// IsNumeric checks if prerelease-version is numeric
+func (v PRVersion) IsNumeric() bool {
+ return v.IsNum
+}
+
+// Compare compares two PreRelease Versions v and o:
+// -1 == v is less than o
+// 0 == v is equal to o
+// 1 == v is greater than o
+func (v PRVersion) Compare(o PRVersion) int {
+ if v.IsNum && !o.IsNum {
+ return -1
+ } else if !v.IsNum && o.IsNum {
+ return 1
+ } else if v.IsNum && o.IsNum {
+ if v.VersionNum == o.VersionNum {
+ return 0
+ } else if v.VersionNum > o.VersionNum {
+ return 1
+ } else {
+ return -1
+ }
+ } else { // both are Alphas
+ if v.VersionStr == o.VersionStr {
+ return 0
+ } else if v.VersionStr > o.VersionStr {
+ return 1
+ } else {
+ return -1
+ }
+ }
+}
+
+// PreRelease version to string
+func (v PRVersion) String() string {
+ if v.IsNum {
+ return strconv.FormatUint(v.VersionNum, 10)
+ }
+ return v.VersionStr
+}
+
+func containsOnly(s string, set string) bool {
+ return strings.IndexFunc(s, func(r rune) bool {
+ return !strings.ContainsRune(set, r)
+ }) == -1
+}
+
+func hasLeadingZeroes(s string) bool {
+ return len(s) > 1 && s[0] == '0'
+}
+
+// NewBuildVersion creates a new valid build version
+func NewBuildVersion(s string) (string, error) {
+ if len(s) == 0 {
+ return "", errors.New("Buildversion is empty")
+ }
+ if !containsOnly(s, alphanum) {
+ return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s)
+ }
+ return s, nil
+}
diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go
new file mode 100644
index 000000000..e18f88082
--- /dev/null
+++ b/vendor/github.com/blang/semver/sort.go
@@ -0,0 +1,28 @@
+package semver
+
+import (
+ "sort"
+)
+
+// Versions represents multiple versions.
+type Versions []Version
+
+// Len returns length of version collection
+func (s Versions) Len() int {
+ return len(s)
+}
+
+// Swap swaps two versions inside the collection by its indices
+func (s Versions) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Less checks if version at index i is less than version at index j
+func (s Versions) Less(i, j int) bool {
+ return s[i].LT(s[j])
+}
+
+// Sort sorts a slice of versions
+func Sort(versions []Version) {
+ sort.Sort(Versions(versions))
+}
diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go
new file mode 100644
index 000000000..eb4d80266
--- /dev/null
+++ b/vendor/github.com/blang/semver/sql.go
@@ -0,0 +1,30 @@
+package semver
+
+import (
+ "database/sql/driver"
+ "fmt"
+)
+
+// Scan implements the database/sql.Scanner interface.
+func (v *Version) Scan(src interface{}) (err error) {
+ var str string
+ switch src := src.(type) {
+ case string:
+ str = src
+ case []byte:
+ str = string(src)
+ default:
+ return fmt.Errorf("Version.Scan: cannot convert %T to string.", src)
+ }
+
+ if t, err := Parse(str); err == nil {
+ *v = t
+ }
+
+ return
+}
+
+// Value implements the database/sql/driver.Valuer interface.
+func (v Version) Value() (driver.Value, error) {
+ return v.String(), nil
+}
diff --git a/vendor/github.com/buger/goterm/README.md b/vendor/github.com/buger/goterm/README.md
new file mode 100644
index 000000000..536b7b885
--- /dev/null
+++ b/vendor/github.com/buger/goterm/README.md
@@ -0,0 +1,119 @@
+## Description
+
+This library provides basic building blocks for building advanced console UIs.
+
+Initially created for [Gor](http://github.com/buger/gor).
+
+Full API documentation: http://godoc.org/github.com/buger/goterm
+
+## Basic usage
+
+Full screen console app, printing current time:
+
+```go
+import (
+ tm "github.com/buger/goterm"
+ "time"
+)
+
+func main() {
+ tm.Clear() // Clear current screen
+
+ for {
+ // By moving cursor to top-left position we ensure that console output
+ // will be overwritten each time, instead of adding new.
+ tm.MoveCursor(1,1)
+
+ tm.Println("Current Time:", time.Now().Format(time.RFC1123))
+
+ tm.Flush() // Call it every time at the end of rendering
+
+ time.Sleep(time.Second)
+ }
+}
+```
+
+This can be seen in [examples/time_example.go](examples/time_example.go). To
+run it yourself, go into your `$GOPATH/src/github.com/buger/goterm` directory
+and run `go run ./examples/time_example.go`
+
+
+Print red bold message on white background:
+
+```go
+tm.Println(tm.Background(tm.Color(tm.Bold("Important header"), tm.RED), tm.WHITE))
+```
+
+
+Create box and move it to center of the screen:
+
+```go
+tm.Clear()
+
+// Create Box with 30% width of current screen, and height of 20 lines
+box := tm.NewBox(30|tm.PCT, 20, 0)
+
+// Add some content to the box
+// Note that you can add ANY content, even tables
+fmt.Fprint(box, "Some box content")
+
+// Move Box to approx center of the screen
+tm.Print(tm.MoveTo(box.String(), 40|tm.PCT, 40|tm.PCT))
+
+tm.Flush()
+```
+
+This can be found in [examples/box_example.go](examples/box_example.go).
+
+Draw table:
+
+```go
+// Based on http://golang.org/pkg/text/tabwriter
+totals := tm.NewTable(0, 10, 5, ' ', 0)
+fmt.Fprintf(totals, "Time\tStarted\tActive\tFinished\n")
+fmt.Fprintf(totals, "%s\t%d\t%d\t%d\n", "All", started, started-finished, finished)
+tm.Println(totals)
+tm.Flush()
+```
+
+This can be found in [examples/table_example.go](examples/table_example.go).
+
+## Line charts
+
+Chart example:
+
+![screen shot 2013-07-09 at 5 05 37 pm](https://f.cloud.github.com/assets/14009/767676/e3dd35aa-e887-11e2-9cd2-f6451eb26adc.png)
+
+
+```go
+ import (
+ tm "github.com/buger/goterm"
+ )
+
+ chart := tm.NewLineChart(100, 20)
+
+ data := new(tm.DataTable)
+ data.addColumn("Time")
+ data.addColumn("Sin(x)")
+ data.addColumn("Cos(x+1)")
+
+ for i := 0.1; i < 10; i += 0.1 {
+ data.addRow(i, math.Sin(i), math.Cos(i+1))
+ }
+
+ tm.Println(chart.Draw(data))
+```
+
+This can be found in [examples/chart_example.go](examples/chart_example.go).
+
+Drawing 2 separate graphs in different scales. Each graph have its own Y axe.
+
+```go
+chart.Flags = tm.DRAW_INDEPENDENT
+```
+
+Drawing graph with relative scale (Grapwh draw starting from min value instead of zero)
+
+```go
+chart.Flags = tm.DRAW_RELATIVE
+```
diff --git a/vendor/github.com/buger/goterm/box.go b/vendor/github.com/buger/goterm/box.go
new file mode 100644
index 000000000..7df929d7d
--- /dev/null
+++ b/vendor/github.com/buger/goterm/box.go
@@ -0,0 +1,122 @@
+package goterm
+
+import (
+ "bytes"
+ "strings"
+)
+
+const DEFAULT_BORDER = "- │ ┌ ┐ └ ┘"
+
+// Box allows you to create independent parts of screen, with its own buffer and borders.
+// Can be used for creating modal windows
+//
+// Generates boxes likes this:
+// ┌--------┐
+// │hello │
+// │world │
+// │ │
+// └--------┘
+//
+type Box struct {
+ Buf *bytes.Buffer
+
+ Width int
+ Height int
+
+ // To get even padding: PaddingX ~= PaddingY*4
+ PaddingX int
+ PaddingY int
+
+ // Should contain 6 border pieces separated by spaces
+ //
+ // Example border:
+ // "- │ ┌ ┐ └ ┘"
+ Border string
+
+ Flags int // Not used now
+}
+
+// Create new Box.
+// Width and height can be relative:
+//
+// // Create box with 50% with of current screen and 10 lines height
+// box := tm.NewBox(50|tm.PCT, 10, 0)
+//
+func NewBox(width, height int, flags int) *Box {
+ width, height = GetXY(width, height)
+
+ box := new(Box)
+ box.Buf = new(bytes.Buffer)
+ box.Width = width
+ box.Height = height
+ box.Border = DEFAULT_BORDER
+ box.PaddingX = 1
+ box.PaddingY = 0
+ box.Flags = flags
+
+ return box
+}
+
+func (b *Box) Write(p []byte) (int, error) {
+ return b.Buf.Write(p)
+}
+
+// Render Box
+func (b *Box) String() (out string) {
+ borders := strings.Split(b.Border, " ")
+ lines := strings.Split(b.Buf.String(), "\n")
+
+ // Border + padding
+ prefix := borders[1] + strings.Repeat(" ", b.PaddingX)
+ suffix := strings.Repeat(" ", b.PaddingX) + borders[1]
+
+ offset := b.PaddingY + 1 // 1 is border width
+
+ // Content width without borders and padding
+ contentWidth := b.Width - (b.PaddingX+1)*2
+
+ for y := 0; y < b.Height; y++ {
+ var line string
+
+ switch {
+ // Draw borders for first line
+ case y == 0:
+ line = borders[2] + strings.Repeat(borders[0], b.Width-2) + borders[3]
+
+ // Draw borders for last line
+ case y == (b.Height - 1):
+ line = borders[4] + strings.Repeat(borders[0], b.Width-2) + borders[5]
+
+ // Draw top and bottom padding
+ case y <= b.PaddingY || y >= (b.Height-b.PaddingY):
+ line = borders[1] + strings.Repeat(" ", b.Width-2) + borders[1]
+
+ // Render content
+ default:
+ if len(lines) > y-offset {
+ line = lines[y-offset]
+ } else {
+ line = ""
+ }
+
+ if len(line) > contentWidth-1 {
+ // If line is too large limit it
+ line = line[0:contentWidth]
+ } else {
+ // If line is too small enlarge it by adding spaces
+ line = line + strings.Repeat(" ", contentWidth-len(line))
+ }
+
+ line = prefix + line + suffix
+ }
+
+ // Don't add newline for last element
+ if y != b.Height-1 {
+ line = line + "\n"
+ }
+
+ out += line
+ }
+
+ return out
+}
diff --git a/vendor/github.com/buger/goterm/plot.go b/vendor/github.com/buger/goterm/plot.go
new file mode 100644
index 000000000..77b9fb097
--- /dev/null
+++ b/vendor/github.com/buger/goterm/plot.go
@@ -0,0 +1,328 @@
+package goterm
+
+import (
+ "fmt"
+ "math"
+ "strings"
+)
+
+const (
+ AXIS_LEFT = iota
+ AXIS_RIGHT
+)
+
+const (
+ DRAW_INDEPENDENT = 1 << iota
+ DRAW_RELATIVE
+)
+
+type DataTable struct {
+ columns []string
+
+ rows [][]float64
+}
+
+func (d *DataTable) AddColumn(name string) {
+ d.columns = append(d.columns, name)
+}
+
+func (d *DataTable) AddRow(elms ...float64) {
+ d.rows = append(d.rows, elms)
+}
+
+type Chart interface {
+ Draw(data DataTable, flags int) string
+}
+
+type LineChart struct {
+ Buf []string
+ chartBuf []string
+
+ data *DataTable
+
+ Width int
+ Height int
+
+ chartHeight int
+ chartWidth int
+
+ paddingX int
+
+ paddingY int
+
+ Flags int
+}
+
+func genBuf(size int) []string {
+ buf := make([]string, size)
+
+ for i := 0; i < size; i++ {
+ buf[i] = " "
+ }
+
+ return buf
+}
+
+// Format float
+func ff(num interface{}) string {
+ return fmt.Sprintf("%.1f", num)
+}
+
+func NewLineChart(width, height int) *LineChart {
+ chart := new(LineChart)
+ chart.Width = width
+ chart.Height = height
+ chart.Buf = genBuf(width * height)
+
+ // axis lines + axies text
+ chart.paddingY = 2
+
+ return chart
+}
+
+func (c *LineChart) DrawAxes(maxX, minX, maxY, minY float64, index int) {
+ side := AXIS_LEFT
+
+ if c.Flags&DRAW_INDEPENDENT != 0 {
+ if index%2 == 0 {
+ side = AXIS_RIGHT
+ }
+
+ c.DrawLine(c.paddingX-1, 1, c.Width-c.paddingX, 1, "-")
+ } else {
+ c.DrawLine(c.paddingX-1, 1, c.Width-1, 1, "-")
+ }
+
+ if side == AXIS_LEFT {
+ c.DrawLine(c.paddingX-1, 1, c.paddingX-1, c.Height-1, "│")
+ } else {
+ c.DrawLine(c.Width-c.paddingX, 1, c.Width-c.paddingX, c.Height-1, "│")
+ }
+
+ left := 0
+ if side == AXIS_RIGHT {
+ left = c.Width - c.paddingX + 1
+ }
+
+ if c.Flags&DRAW_RELATIVE != 0 {
+ c.writeText(ff(minY), left, 1)
+ } else {
+ if minY > 0 {
+ c.writeText("0", left, 1)
+ } else {
+ c.writeText(ff(minY), left, 1)
+ }
+ }
+
+ c.writeText(ff(maxY), left, c.Height-1)
+
+ c.writeText(ff(minX), c.paddingX, 0)
+
+ x_col := c.data.columns[0]
+ c.writeText(c.data.columns[0], c.Width/2-len(x_col)/2, 1)
+
+ if c.Flags&DRAW_INDEPENDENT != 0 || len(c.data.columns) < 3 {
+ col := c.data.columns[index]
+
+ for idx, char := range strings.Split(col, "") {
+ start_from := c.Height/2 + len(col)/2 - idx
+
+ if side == AXIS_LEFT {
+ c.writeText(char, c.paddingX-1, start_from)
+ } else {
+ c.writeText(char, c.Width-c.paddingX, start_from)
+ }
+ }
+ }
+
+ if c.Flags&DRAW_INDEPENDENT != 0 {
+ c.writeText(ff(maxX), c.Width-c.paddingX-len(ff(maxX)), 0)
+ } else {
+ c.writeText(ff(maxX), c.Width-len(ff(maxX)), 0)
+ }
+}
+
+func (c *LineChart) writeText(text string, x, y int) {
+ coord := y*c.Width + x
+
+ for idx, char := range strings.Split(text, "") {
+ c.Buf[coord+idx] = char
+ }
+}
+
+func (c *LineChart) Draw(data *DataTable) (out string) {
+ var scaleY, scaleX float64
+
+ c.data = data
+
+ if c.Flags&DRAW_INDEPENDENT != 0 && len(data.columns) > 3 {
+ fmt.Println("Error: Can't use DRAW_INDEPENDENT for more then 2 graphs")
+ return ""
+ }
+
+ charts := len(data.columns) - 1
+
+ prevPoint := [2]int{-1, -1}
+
+ maxX, minX, maxY, minY := getBoundaryValues(data, -1)
+
+ c.paddingX = int(math.Max(float64(len(ff(minY))), float64(len(ff(maxY))))) + 1
+
+ c.chartHeight = c.Height - c.paddingY
+
+ if c.Flags&DRAW_INDEPENDENT != 0 {
+ c.chartWidth = c.Width - 2*c.paddingX
+ } else {
+ c.chartWidth = c.Width - c.paddingX - 1
+ }
+
+ scaleX = float64(c.chartWidth) / (maxX - minX)
+
+ if c.Flags&DRAW_RELATIVE != 0 || minY < 0 {
+ scaleY = float64(c.chartHeight) / (maxY - minY)
+ } else {
+ scaleY = float64(c.chartHeight) / maxY
+ }
+
+ for i := 1; i < charts+1; i++ {
+ if c.Flags&DRAW_INDEPENDENT != 0 {
+ maxX, minX, maxY, minY = getBoundaryValues(data, i)
+
+ scaleX = float64(c.chartWidth-1) / (maxX - minX)
+ scaleY = float64(c.chartHeight) / maxY
+
+ if c.Flags&DRAW_RELATIVE != 0 || minY < 0 {
+ scaleY = float64(c.chartHeight) / (maxY - minY)
+ }
+ }
+
+ symbol := Color("•", i)
+
+ c_data := getChartData(data, i)
+
+ for _, point := range c_data {
+ x := int((point[0]-minX)*scaleX) + c.paddingX
+ y := int((point[1])*scaleY) + c.paddingY
+
+ if c.Flags&DRAW_RELATIVE != 0 || minY < 0 {
+ y = int((point[1]-minY)*scaleY) + c.paddingY
+ }
+
+ if prevPoint[0] == -1 {
+ prevPoint[0] = x
+ prevPoint[1] = y
+ }
+
+ if prevPoint[0] <= x {
+ c.DrawLine(prevPoint[0], prevPoint[1], x, y, symbol)
+ }
+
+ prevPoint[0] = x
+ prevPoint[1] = y
+ }
+
+ c.DrawAxes(maxX, minX, maxY, minY, i)
+ }
+
+ for row := c.Height - 1; row >= 0; row-- {
+ out += strings.Join(c.Buf[row*c.Width:(row+1)*c.Width], "") + "\n"
+ }
+
+ return
+}
+
+func (c *LineChart) DrawLine(x0, y0, x1, y1 int, symbol string) {
+ drawLine(x0, y0, x1, y1, func(x, y int) {
+ coord := y*c.Width + x
+
+ if coord > 0 && coord < len(c.Buf) {
+ c.Buf[coord] = symbol
+ }
+ })
+}
+
+func getBoundaryValues(data *DataTable, index int) (maxX, minX, maxY, minY float64) {
+ maxX = data.rows[0][0]
+ minX = data.rows[0][0]
+ maxY = data.rows[0][1]
+ minY = data.rows[0][1]
+
+ for _, r := range data.rows {
+ maxX = math.Max(maxX, r[0])
+ minX = math.Min(minX, r[0])
+
+ for idx, c := range r {
+ if idx > 0 {
+ if index == -1 || index == idx {
+ maxY = math.Max(maxY, c)
+ minY = math.Min(minY, c)
+ }
+ }
+ }
+ }
+
+ if maxY > 0 {
+ maxY = maxY * 1.1
+ } else {
+ maxY = maxY * 0.9
+ }
+
+ if minY > 0 {
+ minY = minY * 0.9
+ } else {
+ minY = minY * 1.1
+ }
+
+ return
+}
+
+// DataTable can contain data for multiple graphs, we need to extract only 1
+func getChartData(data *DataTable, index int) (out [][]float64) {
+ for _, r := range data.rows {
+ out = append(out, []float64{r[0], r[index]})
+ }
+
+ return
+}
+
+// Algorithm for drawing line between two points
+//
+// http://en.wikipedia.org/wiki/Bresenham's_line_algorithm
+func drawLine(x0, y0, x1, y1 int, plot func(int, int)) {
+ dx := x1 - x0
+ if dx < 0 {
+ dx = -dx
+ }
+ dy := y1 - y0
+ if dy < 0 {
+ dy = -dy
+ }
+ var sx, sy int
+ if x0 < x1 {
+ sx = 1
+ } else {
+ sx = -1
+ }
+ if y0 < y1 {
+ sy = 1
+ } else {
+ sy = -1
+ }
+ err := dx - dy
+
+ for {
+ plot(x0, y0)
+ if x0 == x1 && y0 == y1 {
+ break
+ }
+ e2 := 2 * err
+ if e2 > -dy {
+ err -= dy
+ x0 += sx
+ }
+ if e2 < dx {
+ err += dx
+ y0 += sy
+ }
+ }
+}
diff --git a/vendor/github.com/buger/goterm/table.go b/vendor/github.com/buger/goterm/table.go
new file mode 100644
index 000000000..d8dae55ce
--- /dev/null
+++ b/vendor/github.com/buger/goterm/table.go
@@ -0,0 +1,34 @@
+package goterm
+
+import (
+ "bytes"
+ "text/tabwriter"
+)
+
+// Tabwriter with own buffer:
+//
+// totals := tm.NewTable(0, 10, 5, ' ', 0)
+// fmt.Fprintf(totals, "Time\tStarted\tActive\tFinished\n")
+// fmt.Fprintf(totals, "%s\t%d\t%d\t%d\n", "All", started, started-finished, finished)
+// tm.Println(totals)
+//
+// Based on http://golang.org/pkg/text/tabwriter
+type Table struct {
+ tabwriter.Writer
+
+ Buf *bytes.Buffer
+}
+
+// Same as here http://golang.org/pkg/text/tabwriter/#Writer.Init
+func NewTable(minwidth, tabwidth, padding int, padchar byte, flags uint) *Table {
+ tbl := new(Table)
+ tbl.Buf = new(bytes.Buffer)
+ tbl.Init(tbl.Buf, minwidth, tabwidth, padding, padchar, flags)
+
+ return tbl
+}
+
+func (t *Table) String() string {
+ t.Flush()
+ return t.Buf.String()
+}
diff --git a/vendor/github.com/buger/goterm/terminal.go b/vendor/github.com/buger/goterm/terminal.go
new file mode 100644
index 000000000..6b45c78bc
--- /dev/null
+++ b/vendor/github.com/buger/goterm/terminal.go
@@ -0,0 +1,258 @@
+// Provides basic bulding blocks for advanced console UI
+//
+// Coordinate system:
+//
+// 1/1---X---->
+// |
+// Y
+// |
+// v
+//
+// Documentation for ANSI codes: http://en.wikipedia.org/wiki/ANSI_escape_code#Colors
+//
+// Inspired by: http://www.darkcoding.net/software/pretty-command-line-console-output-on-unix-in-python-and-go-lang/
+package goterm
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "os"
+ "strings"
+)
+
+// Reset all custom styles
+const RESET = "\033[0m"
+
+// Reset to default color
+const RESET_COLOR = "\033[32m"
+
+// Return curor to start of line and clean it
+const RESET_LINE = "\r\033[K"
+
+// List of possible colors
+const (
+ BLACK = iota
+ RED
+ GREEN
+ YELLOW
+ BLUE
+ MAGENTA
+ CYAN
+ WHITE
+)
+
+var Output *bufio.Writer = bufio.NewWriter(os.Stdout)
+
+func getColor(code int) string {
+ return fmt.Sprintf("\033[3%dm", code)
+}
+
+func getBgColor(code int) string {
+ return fmt.Sprintf("\033[4%dm", code)
+}
+
+// Set percent flag: num | PCT
+//
+// Check percent flag: num & PCT
+//
+// Reset percent flag: num & 0xFF
+const shift = uint(^uint(0)>>63) << 4
+const PCT = 0x8000 << shift
+
+type winsize struct {
+ Row uint16
+ Col uint16
+ Xpixel uint16
+ Ypixel uint16
+}
+
+// Global screen buffer
+// Its not recommented write to buffer dirrectly, use package Print,Printf,Println fucntions instead.
+var Screen *bytes.Buffer = new(bytes.Buffer)
+
+// Get relative or absolute coorditantes
+// To get relative, set PCT flag to number:
+//
+// // Get 10% of total width to `x` and 20 to y
+// x, y = tm.GetXY(10|tm.PCT, 20)
+//
+func GetXY(x int, y int) (int, int) {
+ if y == -1 {
+ y = CurrentHeight() + 1
+ }
+
+ if x&PCT != 0 {
+ x = int((x & 0xFF) * Width() / 100)
+ }
+
+ if y&PCT != 0 {
+ y = int((y & 0xFF) * Height() / 100)
+ }
+
+ return x, y
+}
+
+type sf func(int, string) string
+
+// Apply given transformation func for each line in string
+func applyTransform(str string, transform sf) (out string) {
+ out = ""
+
+ for idx, line := range strings.Split(str, "\n") {
+ out += transform(idx, line)
+ }
+
+ return
+}
+
+// Clear screen
+func Clear() {
+ Output.WriteString("\033[2J")
+}
+
+// Move cursor to given position
+func MoveCursor(x int, y int) {
+ fmt.Fprintf(Screen, "\033[%d;%dH", x, y)
+}
+
+// Move cursor up relative the current position
+func MoveCursorUp(bias int) {
+ fmt.Fprintf(Screen, "\033[%dA", bias)
+}
+
+// Move cursor down relative the current position
+func MoveCursorDown(bias int) {
+ fmt.Fprintf(Screen, "\033[%dB", bias)
+}
+
+// Move cursor forward relative the current position
+func MoveCursorForward(bias int) {
+ fmt.Fprintf(Screen, "\033[%dC", bias)
+}
+
+// Move cursor backward relative the current position
+func MoveCursorBackward(bias int) {
+ fmt.Fprintf(Screen, "\033[%dD", bias)
+}
+
+// Move string to possition
+func MoveTo(str string, x int, y int) (out string) {
+ x, y = GetXY(x, y)
+
+ return applyTransform(str, func(idx int, line string) string {
+ return fmt.Sprintf("\033[%d;%dH%s", y+idx, x, line)
+ })
+}
+
+// Return carrier to start of line
+func ResetLine(str string) (out string) {
+ return applyTransform(str, func(idx int, line string) string {
+ return fmt.Sprintf(RESET_LINE, line)
+ })
+}
+
+// Make bold
+func Bold(str string) string {
+ return applyTransform(str, func(idx int, line string) string {
+ return fmt.Sprintf("\033[1m%s\033[0m", line)
+ })
+}
+
+// Apply given color to string:
+//
+// tm.Color("RED STRING", tm.RED)
+//
+func Color(str string, color int) string {
+ return applyTransform(str, func(idx int, line string) string {
+ return fmt.Sprintf("%s%s%s", getColor(color), line, RESET)
+ })
+}
+
+func Highlight(str, substr string, color int) string {
+ hiSubstr := Color(substr, color)
+ return strings.Replace(str, substr, hiSubstr, -1)
+}
+
+func HighlightRegion(str string, from, to, color int) string {
+ return str[:from] + Color(str[from:to], color) + str[to:]
+}
+
+// Change background color of string:
+//
+// tm.Background("string", tm.RED)
+//
+func Background(str string, color int) string {
+ return applyTransform(str, func(idx int, line string) string {
+ return fmt.Sprintf("%s%s%s", getBgColor(color), line, RESET)
+ })
+}
+
+// Get console width
+func Width() int {
+ ws, err := getWinsize()
+
+ if err != nil {
+ return -1
+ }
+
+ return int(ws.Col)
+}
+
+// Get console height
+func Height() int {
+ ws, err := getWinsize()
+ if err != nil {
+ return -1
+ }
+ return int(ws.Row)
+}
+
+// Get current height. Line count in Screen buffer.
+func CurrentHeight() int {
+ return strings.Count(Screen.String(), "\n")
+}
+
+// Flush buffer and ensure that it will not overflow screen
+func Flush() {
+ for idx, str := range strings.Split(Screen.String(), "\n") {
+ if idx > Height() {
+ return
+ }
+
+ Output.WriteString(str + "\n")
+ }
+
+ Output.Flush()
+ Screen.Reset()
+}
+
+func Print(a ...interface{}) (n int, err error) {
+ return fmt.Fprint(Screen, a...)
+}
+
+func Println(a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(Screen, a...)
+}
+
+func Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(Screen, format, a...)
+}
+
+func Context(data string, idx, max int) string {
+ var start, end int
+
+ if len(data[:idx]) < (max / 2) {
+ start = 0
+ } else {
+ start = idx - max/2
+ }
+
+ if len(data)-idx < (max / 2) {
+ end = len(data) - 1
+ } else {
+ end = idx + max/2
+ }
+
+ return data[start:end]
+}
diff --git a/vendor/github.com/buger/goterm/terminal_nosysioctl.go b/vendor/github.com/buger/goterm/terminal_nosysioctl.go
new file mode 100644
index 000000000..690615008
--- /dev/null
+++ b/vendor/github.com/buger/goterm/terminal_nosysioctl.go
@@ -0,0 +1,12 @@
+// +build windows plan9 solaris
+
+package goterm
+
+func getWinsize() (*winsize, error) {
+ ws := new(winsize)
+
+ ws.Col = 80
+ ws.Row = 24
+
+ return ws, nil
+}
diff --git a/vendor/github.com/buger/goterm/terminal_sysioctl.go b/vendor/github.com/buger/goterm/terminal_sysioctl.go
new file mode 100644
index 000000000..e98430fb9
--- /dev/null
+++ b/vendor/github.com/buger/goterm/terminal_sysioctl.go
@@ -0,0 +1,36 @@
+// +build !windows,!plan9,!solaris
+
+package goterm
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "syscall"
+ "unsafe"
+)
+
+func getWinsize() (*winsize, error) {
+ ws := new(winsize)
+
+ var _TIOCGWINSZ int64
+
+ switch runtime.GOOS {
+ case "linux":
+ _TIOCGWINSZ = 0x5413
+ case "darwin":
+ _TIOCGWINSZ = 1074295912
+ }
+
+ r1, _, errno := syscall.Syscall(syscall.SYS_IOCTL,
+ uintptr(syscall.Stdin),
+ uintptr(_TIOCGWINSZ),
+ uintptr(unsafe.Pointer(ws)),
+ )
+
+ if int(r1) == -1 {
+ fmt.Println("Error:", os.NewSyscallError("GetWinsize", errno))
+ return nil, os.NewSyscallError("GetWinsize", errno)
+ }
+ return ws, nil
+}
diff --git a/vendor/github.com/containerd/cgroups/LICENSE b/vendor/github.com/containerd/cgroups/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/containerd/cgroups/README.md b/vendor/github.com/containerd/cgroups/README.md
new file mode 100644
index 000000000..69e932a9f
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/README.md
@@ -0,0 +1,112 @@
+# cgroups
+
+[![Build Status](https://travis-ci.org/containerd/cgroups.svg?branch=master)](https://travis-ci.org/containerd/cgroups)
+
+[![codecov](https://codecov.io/gh/containerd/cgroups/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/cgroups)
+
+Go package for creating, managing, inspecting, and destroying cgroups.
+The resources format for settings on the cgroup uses the OCI runtime-spec found
+[here](https://github.com/opencontainers/runtime-spec).
+
+## Examples
+
+### Create a new cgroup
+
+This creates a new cgroup using a static path for all subsystems under `/test`.
+
+* /sys/fs/cgroup/cpu/test
+* /sys/fs/cgroup/memory/test
+* etc....
+
+It uses a single hierarchy and specifies cpu shares as a resource constraint and
+uses the v1 implementation of cgroups.
+
+
+```go
+shares := uint64(100)
+control, err := cgroups.New(cgroups.V1, cgroups.StaticPath("/test"), &specs.LinuxResources{
+ CPU: &specs.CPU{
+ Shares: &shares,
+ },
+})
+defer control.Delete()
+```
+
+### Create with systemd slice support
+
+
+```go
+control, err := cgroups.New(cgroups.Systemd, cgroups.Slice("system.slice", "runc-test"), &specs.LinuxResources{
+ CPU: &specs.CPU{
+ Shares: &shares,
+ },
+})
+
+```
+
+### Load an existing cgroup
+
+```go
+control, err = cgroups.Load(cgroups.V1, cgroups.StaticPath("/test"))
+```
+
+### Add a process to the cgroup
+
+```go
+if err := control.Add(cgroups.Process{Pid:1234}); err != nil {
+}
+```
+
+### Update the cgroup
+
+To update the resources applied in the cgroup
+
+```go
+shares = uint64(200)
+if err := control.Update(&specs.LinuxResources{
+ CPU: &specs.CPU{
+ Shares: &shares,
+ },
+}); err != nil {
+}
+```
+
+### Freeze and Thaw the cgroup
+
+```go
+if err := control.Freeze(); err != nil {
+}
+if err := control.Thaw(); err != nil {
+}
+```
+
+### List all processes in the cgroup or recursively
+
+```go
+processes, err := control.Processes(cgroups.Devices, recursive)
+```
+
+### Get Stats on the cgroup
+
+```go
+stats, err := control.Stat()
+```
+
+By adding `cgroups.IgnoreNotExist` all non-existent files will be ignored, e.g. swap memory stats without swap enabled
+```go
+stats, err := control.Stat(cgroups.IgnoreNotExist)
+```
+
+### Move process across cgroups
+
+This allows you to take processes from one cgroup and move them to another.
+
+```go
+err := control.MoveTo(destination)
+```
+
+### Create subcgroup
+
+```go
+subCgroup, err := control.New("child", resources)
+```
diff --git a/vendor/github.com/containerd/cgroups/blkio.go b/vendor/github.com/containerd/cgroups/blkio.go
new file mode 100644
index 000000000..e1fd73ce0
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/blkio.go
@@ -0,0 +1,323 @@
+package cgroups
+
+import (
+ "bufio"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "syscall"
+
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func NewBlkio(root string) *blkioController {
+ return &blkioController{
+ root: filepath.Join(root, string(Blkio)),
+ }
+}
+
+type blkioController struct {
+ root string
+}
+
+func (b *blkioController) Name() Name {
+ return Blkio
+}
+
+func (b *blkioController) Path(path string) string {
+ return filepath.Join(b.root, path)
+}
+
+func (b *blkioController) Create(path string, resources *specs.LinuxResources) error {
+ if err := os.MkdirAll(b.Path(path), defaultDirPerm); err != nil {
+ return err
+ }
+ if resources.BlockIO == nil {
+ return nil
+ }
+ for _, t := range createBlkioSettings(resources.BlockIO) {
+ if t.value != nil {
+ if err := ioutil.WriteFile(
+ filepath.Join(b.Path(path), fmt.Sprintf("blkio.%s", t.name)),
+ t.format(t.value),
+ defaultFilePerm,
+ ); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (b *blkioController) Update(path string, resources *specs.LinuxResources) error {
+ return b.Create(path, resources)
+}
+
+func (b *blkioController) Stat(path string, stats *Stats) error {
+ stats.Blkio = &BlkioStat{}
+ settings := []blkioStatSettings{
+ {
+ name: "throttle.io_serviced",
+ entry: &stats.Blkio.IoServicedRecursive,
+ },
+ {
+ name: "throttle.io_service_bytes",
+ entry: &stats.Blkio.IoServiceBytesRecursive,
+ },
+ }
+ // Try to read CFQ stats available on all CFQ enabled kernels first
+ if _, err := os.Lstat(filepath.Join(b.Path(path), fmt.Sprintf("blkio.io_serviced_recursive"))); err == nil {
+ settings = append(settings,
+ blkioStatSettings{
+ name: "sectors_recursive",
+ entry: &stats.Blkio.SectorsRecursive,
+ },
+ blkioStatSettings{
+ name: "io_service_bytes_recursive",
+ entry: &stats.Blkio.IoServiceBytesRecursive,
+ },
+ blkioStatSettings{
+ name: "io_serviced_recursive",
+ entry: &stats.Blkio.IoServicedRecursive,
+ },
+ blkioStatSettings{
+ name: "io_queued_recursive",
+ entry: &stats.Blkio.IoQueuedRecursive,
+ },
+ blkioStatSettings{
+ name: "io_service_time_recursive",
+ entry: &stats.Blkio.IoServiceTimeRecursive,
+ },
+ blkioStatSettings{
+ name: "io_wait_time_recursive",
+ entry: &stats.Blkio.IoWaitTimeRecursive,
+ },
+ blkioStatSettings{
+ name: "io_merged_recursive",
+ entry: &stats.Blkio.IoMergedRecursive,
+ },
+ blkioStatSettings{
+ name: "time_recursive",
+ entry: &stats.Blkio.IoTimeRecursive,
+ },
+ )
+ }
+
+ devices, err := getDevices("/dev")
+ if err != nil {
+ return err
+ }
+
+ for _, t := range settings {
+ if err := b.readEntry(devices, path, t.name, t.entry); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *blkioController) readEntry(devices map[deviceKey]string, path, name string, entry *[]BlkioEntry) error {
+ f, err := os.Open(filepath.Join(b.Path(path), fmt.Sprintf("blkio.%s", name)))
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ if err := sc.Err(); err != nil {
+ return err
+ }
+ // format: dev type amount
+ fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine)
+ if len(fields) < 3 {
+ if len(fields) == 2 && fields[0] == "Total" {
+ // skip total line
+ continue
+ } else {
+ return fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text())
+ }
+ }
+ major, err := strconv.ParseUint(fields[0], 10, 64)
+ if err != nil {
+ return err
+ }
+ minor, err := strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ return err
+ }
+ op := ""
+ valueField := 2
+ if len(fields) == 4 {
+ op = fields[2]
+ valueField = 3
+ }
+ v, err := strconv.ParseUint(fields[valueField], 10, 64)
+ if err != nil {
+ return err
+ }
+ *entry = append(*entry, BlkioEntry{
+ Device: devices[deviceKey{major, minor}],
+ Major: major,
+ Minor: minor,
+ Op: op,
+ Value: v,
+ })
+ }
+ return nil
+}
+
+func createBlkioSettings(blkio *specs.LinuxBlockIO) []blkioSettings {
+ settings := []blkioSettings{
+ {
+ name: "weight",
+ value: blkio.Weight,
+ format: uintf,
+ },
+ {
+ name: "leaf_weight",
+ value: blkio.LeafWeight,
+ format: uintf,
+ },
+ }
+ for _, wd := range blkio.WeightDevice {
+ settings = append(settings,
+ blkioSettings{
+ name: "weight_device",
+ value: wd,
+ format: weightdev,
+ },
+ blkioSettings{
+ name: "leaf_weight_device",
+ value: wd,
+ format: weightleafdev,
+ })
+ }
+ for _, t := range []struct {
+ name string
+ list []specs.LinuxThrottleDevice
+ }{
+ {
+ name: "throttle.read_bps_device",
+ list: blkio.ThrottleReadBpsDevice,
+ },
+ {
+ name: "throttle.read_iops_device",
+ list: blkio.ThrottleReadIOPSDevice,
+ },
+ {
+ name: "throttle.write_bps_device",
+ list: blkio.ThrottleWriteBpsDevice,
+ },
+ {
+ name: "throttle.write_iops_device",
+ list: blkio.ThrottleWriteIOPSDevice,
+ },
+ } {
+ for _, td := range t.list {
+ settings = append(settings, blkioSettings{
+ name: t.name,
+ value: td,
+ format: throttleddev,
+ })
+ }
+ }
+ return settings
+}
+
+type blkioSettings struct {
+ name string
+ value interface{}
+ format func(v interface{}) []byte
+}
+
+type blkioStatSettings struct {
+ name string
+ entry *[]BlkioEntry
+}
+
+func uintf(v interface{}) []byte {
+ return []byte(strconv.FormatUint(uint64(*v.(*uint16)), 10))
+}
+
+func weightdev(v interface{}) []byte {
+ wd := v.(specs.LinuxWeightDevice)
+ return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.Weight))
+}
+
+func weightleafdev(v interface{}) []byte {
+ wd := v.(specs.LinuxWeightDevice)
+ return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.LeafWeight))
+}
+
+func throttleddev(v interface{}) []byte {
+ td := v.(specs.LinuxThrottleDevice)
+ return []byte(fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate))
+}
+
+func splitBlkioStatLine(r rune) bool {
+ return r == ' ' || r == ':'
+}
+
+type deviceKey struct {
+ major, minor uint64
+}
+
+// getDevices makes a best effort attempt to read all the devices into a map
+// keyed by major and minor number. Since devices may be mapped multiple times,
+// we err on taking the first occurrence.
+func getDevices(path string) (map[deviceKey]string, error) {
+ // TODO(stevvooe): We are ignoring lots of errors. It might be kind of
+ // challenging to debug this if we aren't mapping devices correctly.
+ // Consider logging these errors.
+ devices := map[deviceKey]string{}
+ if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ switch {
+ case fi.IsDir():
+ switch fi.Name() {
+ case "pts", "shm", "fd", "mqueue", ".lxc", ".lxd-mounts":
+ return filepath.SkipDir
+ default:
+ return nil
+ }
+ case fi.Name() == "console":
+ return nil
+ default:
+ if fi.Mode()&os.ModeDevice == 0 {
+ // skip non-devices
+ return nil
+ }
+
+ st, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ return fmt.Errorf("%s: unable to convert to system stat", p)
+ }
+
+ key := deviceKey{major(st.Rdev), minor(st.Rdev)}
+ if _, ok := devices[key]; ok {
+ return nil // skip it if we have already populated the path.
+ }
+
+ devices[key] = p
+ }
+
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+
+ return devices, nil
+}
+
+func major(devNumber uint64) uint64 {
+ return (devNumber >> 8) & 0xfff
+}
+
+func minor(devNumber uint64) uint64 {
+ return (devNumber & 0xff) | ((devNumber >> 12) & 0xfff00)
+}
diff --git a/vendor/github.com/containerd/cgroups/cgroup.go b/vendor/github.com/containerd/cgroups/cgroup.go
new file mode 100644
index 000000000..74a324f9e
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/cgroup.go
@@ -0,0 +1,389 @@
+package cgroups
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// New returns a new control via the cgroup cgroups interface
+func New(hierarchy Hierarchy, path Path, resources *specs.LinuxResources) (Cgroup, error) {
+ subsystems, err := hierarchy()
+ if err != nil {
+ return nil, err
+ }
+ for _, s := range subsystems {
+ if err := initializeSubsystem(s, path, resources); err != nil {
+ return nil, err
+ }
+ }
+ return &cgroup{
+ path: path,
+ subsystems: subsystems,
+ }, nil
+}
+
+// Load will load an existing cgroup and allow it to be controlled
+func Load(hierarchy Hierarchy, path Path) (Cgroup, error) {
+ subsystems, err := hierarchy()
+ if err != nil {
+ return nil, err
+ }
+ // check the the subsystems still exist
+ for _, s := range pathers(subsystems) {
+ p, err := path(s.Name())
+ if err != nil {
+ return nil, err
+ }
+ if _, err := os.Lstat(s.Path(p)); err != nil {
+ if os.IsNotExist(err) {
+ return nil, ErrCgroupDeleted
+ }
+ return nil, err
+ }
+ }
+ return &cgroup{
+ path: path,
+ subsystems: subsystems,
+ }, nil
+}
+
+type cgroup struct {
+ path Path
+
+ subsystems []Subsystem
+ mu sync.Mutex
+ err error
+}
+
+// New returns a new sub cgroup
+func (c *cgroup) New(name string, resources *specs.LinuxResources) (Cgroup, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err != nil {
+ return nil, c.err
+ }
+ path := subPath(c.path, name)
+ for _, s := range c.subsystems {
+ if err := initializeSubsystem(s, path, resources); err != nil {
+ return nil, err
+ }
+ }
+ return &cgroup{
+ path: path,
+ subsystems: c.subsystems,
+ }, nil
+}
+
+// Subsystems returns all the subsystems that are currently being
+// consumed by the group
+func (c *cgroup) Subsystems() []Subsystem {
+ return c.subsystems
+}
+
+// Add moves the provided process into the new cgroup
+func (c *cgroup) Add(process Process) error {
+ if process.Pid <= 0 {
+ return ErrInvalidPid
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err != nil {
+ return c.err
+ }
+ return c.add(process)
+}
+
+func (c *cgroup) add(process Process) error {
+ for _, s := range pathers(c.subsystems) {
+ p, err := c.path(s.Name())
+ if err != nil {
+ return err
+ }
+ if err := ioutil.WriteFile(
+ filepath.Join(s.Path(p), cgroupProcs),
+ []byte(strconv.Itoa(process.Pid)),
+ defaultFilePerm,
+ ); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Delete will remove the control group from each of the subsystems registered
+func (c *cgroup) Delete() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err != nil {
+ return c.err
+ }
+ var errors []string
+ for _, s := range c.subsystems {
+ if d, ok := s.(deleter); ok {
+ sp, err := c.path(s.Name())
+ if err != nil {
+ return err
+ }
+ if err := d.Delete(sp); err != nil {
+ errors = append(errors, string(s.Name()))
+ }
+ continue
+ }
+ if p, ok := s.(pather); ok {
+ sp, err := c.path(s.Name())
+ if err != nil {
+ return err
+ }
+ path := p.Path(sp)
+ if err := remove(path); err != nil {
+ errors = append(errors, path)
+ }
+ }
+ }
+ if len(errors) > 0 {
+ return fmt.Errorf("cgroups: unable to remove paths %s", strings.Join(errors, ", "))
+ }
+ c.err = ErrCgroupDeleted
+ return nil
+}
+
+// Stat returns the current stats for the cgroup
+func (c *cgroup) Stat(handlers ...ErrorHandler) (*Stats, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err != nil {
+ return nil, c.err
+ }
+ if len(handlers) == 0 {
+ handlers = append(handlers, errPassthrough)
+ }
+ var (
+ stats = &Stats{}
+ wg = &sync.WaitGroup{}
+ errs = make(chan error, len(c.subsystems))
+ )
+ for _, s := range c.subsystems {
+ if ss, ok := s.(stater); ok {
+ sp, err := c.path(s.Name())
+ if err != nil {
+ return nil, err
+ }
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ if err := ss.Stat(sp, stats); err != nil {
+ for _, eh := range handlers {
+ if herr := eh(err); herr != nil {
+ errs <- herr
+ }
+ }
+ }
+ }()
+ }
+ }
+ wg.Wait()
+ close(errs)
+ for err := range errs {
+ return nil, err
+ }
+ return stats, nil
+}
+
+// Update updates the cgroup with the new resource values provided
+//
+// Be prepared to handle EBUSY when trying to update a cgroup with
+// live processes and other operations like Stats being performed at the
+// same time
+func (c *cgroup) Update(resources *specs.LinuxResources) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err != nil {
+ return c.err
+ }
+ for _, s := range c.subsystems {
+ if u, ok := s.(updater); ok {
+ sp, err := c.path(s.Name())
+ if err != nil {
+ return err
+ }
+ if err := u.Update(sp, resources); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// Processes returns the processes running inside the cgroup along
+// with the subsystem used, pid, and path
+func (c *cgroup) Processes(subsystem Name, recursive bool) ([]Process, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err != nil {
+ return nil, c.err
+ }
+ return c.processes(subsystem, recursive)
+}
+
+func (c *cgroup) processes(subsystem Name, recursive bool) ([]Process, error) {
+ s := c.getSubsystem(subsystem)
+ sp, err := c.path(subsystem)
+ if err != nil {
+ return nil, err
+ }
+ path := s.(pather).Path(sp)
+ var processes []Process
+ err = filepath.Walk(path, func(p string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if !recursive && info.IsDir() {
+ if p == path {
+ return nil
+ }
+ return filepath.SkipDir
+ }
+ dir, name := filepath.Split(p)
+ if name != cgroupProcs {
+ return nil
+ }
+ procs, err := readPids(dir, subsystem)
+ if err != nil {
+ return err
+ }
+ processes = append(processes, procs...)
+ return nil
+ })
+ return processes, err
+}
+
+// Freeze freezes the entire cgroup and all the processes inside it
+func (c *cgroup) Freeze() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err != nil {
+ return c.err
+ }
+ s := c.getSubsystem(Freezer)
+ if s == nil {
+ return ErrFreezerNotSupported
+ }
+ sp, err := c.path(Freezer)
+ if err != nil {
+ return err
+ }
+ return s.(*freezerController).Freeze(sp)
+}
+
+// Thaw thaws out the cgroup and all the processes inside it
+func (c *cgroup) Thaw() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err != nil {
+ return c.err
+ }
+ s := c.getSubsystem(Freezer)
+ if s == nil {
+ return ErrFreezerNotSupported
+ }
+ sp, err := c.path(Freezer)
+ if err != nil {
+ return err
+ }
+ return s.(*freezerController).Thaw(sp)
+}
+
+// OOMEventFD returns the memory cgroup's out of memory event fd that triggers
+// when processes inside the cgroup receive an oom event
+func (c *cgroup) OOMEventFD() (uintptr, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err != nil {
+ return 0, c.err
+ }
+ s := c.getSubsystem(Memory)
+ if s == nil {
+ return 0, ErrMemoryNotSupported
+ }
+ sp, err := c.path(Memory)
+ if err != nil {
+ return 0, err
+ }
+ return s.(*memoryController).OOMEventFD(sp)
+}
+
+// State returns the state of the cgroup and its processes
+func (c *cgroup) State() State {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.checkExists()
+ if c.err != nil && c.err == ErrCgroupDeleted {
+ return Deleted
+ }
+ s := c.getSubsystem(Freezer)
+ if s == nil {
+ return Thawed
+ }
+ sp, err := c.path(Freezer)
+ if err != nil {
+ return Unknown
+ }
+ state, err := s.(*freezerController).state(sp)
+ if err != nil {
+ return Unknown
+ }
+ return state
+}
+
+// MoveTo does a recursive move subsystem by subsystem of all the processes
+// inside the group
+func (c *cgroup) MoveTo(destination Cgroup) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err != nil {
+ return c.err
+ }
+ for _, s := range c.subsystems {
+ processes, err := c.processes(s.Name(), true)
+ if err != nil {
+ return err
+ }
+ for _, p := range processes {
+ if err := destination.Add(p); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (c *cgroup) getSubsystem(n Name) Subsystem {
+ for _, s := range c.subsystems {
+ if s.Name() == n {
+ return s
+ }
+ }
+ return nil
+}
+
+func (c *cgroup) checkExists() {
+ for _, s := range pathers(c.subsystems) {
+ p, err := c.path(s.Name())
+ if err != nil {
+ return
+ }
+ if _, err := os.Lstat(s.Path(p)); err != nil {
+ if os.IsNotExist(err) {
+ c.err = ErrCgroupDeleted
+ return
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/containerd/cgroups/control.go b/vendor/github.com/containerd/cgroups/control.go
new file mode 100644
index 000000000..c83b2417f
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/control.go
@@ -0,0 +1,58 @@
+package cgroups
+
+import (
+ "os"
+
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+const (
+ cgroupProcs = "cgroup.procs"
+ defaultDirPerm = 0755
+)
+
+// defaultFilePerm is a var so that the test framework can change the filemode
+// of all files created when the tests are running. The difference between the
+// tests and real world use is that files like "cgroup.procs" will exist when writing
+// to a read cgroup filesystem and do not exist prior when running in the tests.
+// this is set to a non 0 value in the test code
+var defaultFilePerm = os.FileMode(0)
+
+type Process struct {
+ // Subsystem is the name of the subsystem that the process is in
+ Subsystem Name
+ // Pid is the process id of the process
+ Pid int
+ // Path is the full path of the subsystem and location that the process is in
+ Path string
+}
+
+// Cgroup handles interactions with the individual groups to perform
+// actions on them as them main interface to this cgroup package
+type Cgroup interface {
+ // New creates a new cgroup under the calling cgroup
+ New(string, *specs.LinuxResources) (Cgroup, error)
+ // Add adds a process to the cgroup
+ Add(Process) error
+ // Delete removes the cgroup as a whole
+ Delete() error
+ // MoveTo moves all the processes under the calling cgroup to the provided one
+ // subsystems are moved one at a time
+ MoveTo(Cgroup) error
+ // Stat returns the stats for all subsystems in the cgroup
+ Stat(...ErrorHandler) (*Stats, error)
+ // Update updates all the subsystems with the provided resource changes
+ Update(resources *specs.LinuxResources) error
+ // Processes returns all the processes in a select subsystem for the cgroup
+ Processes(Name, bool) ([]Process, error)
+ // Freeze freezes or pauses all processes inside the cgroup
+ Freeze() error
+ // Thaw thaw or resumes all processes inside the cgroup
+ Thaw() error
+ // OOMEventFD returns the memory subsystem's event fd for OOM events
+ OOMEventFD() (uintptr, error)
+ // State returns the cgroups current state
+ State() State
+ // Subsystems returns all the subsystems in the cgroup
+ Subsystems() []Subsystem
+}
diff --git a/vendor/github.com/containerd/cgroups/cpu.go b/vendor/github.com/containerd/cgroups/cpu.go
new file mode 100644
index 000000000..036bb8fd0
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/cpu.go
@@ -0,0 +1,120 @@
+package cgroups
+
+import (
+ "bufio"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func NewCpu(root string) *cpuController {
+ return &cpuController{
+ root: filepath.Join(root, string(Cpu)),
+ }
+}
+
+type cpuController struct {
+ root string
+}
+
+func (c *cpuController) Name() Name {
+ return Cpu
+}
+
+func (c *cpuController) Path(path string) string {
+ return filepath.Join(c.root, path)
+}
+
+func (c *cpuController) Create(path string, resources *specs.LinuxResources) error {
+ if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil {
+ return err
+ }
+ if cpu := resources.CPU; cpu != nil {
+ for _, t := range []struct {
+ name string
+ ivalue *int64
+ uvalue *uint64
+ }{
+ {
+ name: "rt_period_us",
+ uvalue: cpu.RealtimePeriod,
+ },
+ {
+ name: "rt_runtime_us",
+ ivalue: cpu.RealtimeRuntime,
+ },
+ {
+ name: "shares",
+ uvalue: cpu.Shares,
+ },
+ {
+ name: "cfs_period_us",
+ uvalue: cpu.Period,
+ },
+ {
+ name: "cfs_quota_us",
+ ivalue: cpu.Quota,
+ },
+ } {
+ var value []byte
+ if t.uvalue != nil {
+ value = []byte(strconv.FormatUint(*t.uvalue, 10))
+ } else if t.ivalue != nil {
+ value = []byte(strconv.FormatInt(*t.ivalue, 10))
+ }
+ if value != nil {
+ if err := ioutil.WriteFile(
+ filepath.Join(c.Path(path), fmt.Sprintf("cpu.%s", t.name)),
+ value,
+ defaultFilePerm,
+ ); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (c *cpuController) Update(path string, resources *specs.LinuxResources) error {
+ return c.Create(path, resources)
+}
+
+func (c *cpuController) Stat(path string, stats *Stats) error {
+ f, err := os.Open(filepath.Join(c.Path(path), "cpu.stat"))
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ // get or create the cpu field because cpuacct can also set values on this struct
+ stats.cpuMu.Lock()
+ cpu := stats.Cpu
+ if cpu == nil {
+ cpu = &CpuStat{}
+ stats.Cpu = cpu
+ }
+ stats.cpuMu.Unlock()
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ if err := sc.Err(); err != nil {
+ return err
+ }
+ key, v, err := parseKV(sc.Text())
+ if err != nil {
+ return err
+ }
+ switch key {
+ case "nr_periods":
+ cpu.Throttling.Periods = v
+ case "nr_throttled":
+ cpu.Throttling.ThrottledPeriods = v
+ case "throttled_time":
+ cpu.Throttling.ThrottledTime = v
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/containerd/cgroups/cpuacct.go b/vendor/github.com/containerd/cgroups/cpuacct.go
new file mode 100644
index 000000000..329234887
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/cpuacct.go
@@ -0,0 +1,112 @@
+package cgroups
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+const nanosecondsInSecond = 1000000000
+
+var clockTicks = getClockTicks()
+
+func NewCpuacct(root string) *cpuacctController {
+ return &cpuacctController{
+ root: filepath.Join(root, string(Cpuacct)),
+ }
+}
+
+type cpuacctController struct {
+ root string
+}
+
+func (c *cpuacctController) Name() Name {
+ return Cpuacct
+}
+
+func (c *cpuacctController) Path(path string) string {
+ return filepath.Join(c.root, path)
+}
+
+func (c *cpuacctController) Stat(path string, stats *Stats) error {
+ user, kernel, err := c.getUsage(path)
+ if err != nil {
+ return err
+ }
+ total, err := readUint(filepath.Join(c.Path(path), "cpuacct.usage"))
+ if err != nil {
+ return err
+ }
+ percpu, err := c.percpuUsage(path)
+ if err != nil {
+ return err
+ }
+ stats.cpuMu.Lock()
+ cpu := stats.Cpu
+ if cpu == nil {
+ cpu = &CpuStat{}
+ stats.Cpu = cpu
+ }
+ stats.cpuMu.Unlock()
+ cpu.Usage.Total = total
+ cpu.Usage.User = user
+ cpu.Usage.Kernel = kernel
+ cpu.Usage.PerCpu = percpu
+ return nil
+}
+
+func (c *cpuacctController) percpuUsage(path string) ([]uint64, error) {
+ var usage []uint64
+ data, err := ioutil.ReadFile(filepath.Join(c.Path(path), "cpuacct.usage_percpu"))
+ if err != nil {
+ return nil, err
+ }
+ for _, v := range strings.Fields(string(data)) {
+ u, err := strconv.ParseUint(v, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ usage = append(usage, u)
+ }
+ return usage, nil
+}
+
+func (c *cpuacctController) getUsage(path string) (user uint64, kernel uint64, err error) {
+ statPath := filepath.Join(c.Path(path), "cpuacct.stat")
+ data, err := ioutil.ReadFile(statPath)
+ if err != nil {
+ return 0, 0, err
+ }
+ fields := strings.Fields(string(data))
+ if len(fields) != 4 {
+ return 0, 0, fmt.Errorf("%q is expected to have 4 fields", statPath)
+ }
+ for _, t := range []struct {
+ index int
+ name string
+ value *uint64
+ }{
+ {
+ index: 0,
+ name: "user",
+ value: &user,
+ },
+ {
+ index: 2,
+ name: "system",
+ value: &kernel,
+ },
+ } {
+ if fields[t.index] != t.name {
+ return 0, 0, fmt.Errorf("expected field %q but found %q in %q", t.name, fields[t.index], statPath)
+ }
+ v, err := strconv.ParseUint(fields[t.index+1], 10, 64)
+ if err != nil {
+ return 0, 0, err
+ }
+ *t.value = v
+ }
+ return (user * nanosecondsInSecond) / clockTicks, (kernel * nanosecondsInSecond) / clockTicks, nil
+}
diff --git a/vendor/github.com/containerd/cgroups/cpuset.go b/vendor/github.com/containerd/cgroups/cpuset.go
new file mode 100644
index 000000000..f0f95f569
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/cpuset.go
@@ -0,0 +1,139 @@
+package cgroups
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func NewCputset(root string) *cpusetController {
+ return &cpusetController{
+ root: filepath.Join(root, string(Cpuset)),
+ }
+}
+
+type cpusetController struct {
+ root string
+}
+
+func (c *cpusetController) Name() Name {
+ return Cpuset
+}
+
+func (c *cpusetController) Path(path string) string {
+ return filepath.Join(c.root, path)
+}
+
+func (c *cpusetController) Create(path string, resources *specs.LinuxResources) error {
+ if err := c.ensureParent(c.Path(path), c.root); err != nil {
+ return err
+ }
+ if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil {
+ return err
+ }
+ if err := c.copyIfNeeded(c.Path(path), filepath.Dir(c.Path(path))); err != nil {
+ return err
+ }
+ if resources.CPU != nil {
+ for _, t := range []struct {
+ name string
+ value *string
+ }{
+ {
+ name: "cpus",
+ value: &resources.CPU.Cpus,
+ },
+ {
+ name: "mems",
+ value: &resources.CPU.Mems,
+ },
+ } {
+ if t.value != nil {
+ if err := ioutil.WriteFile(
+ filepath.Join(c.Path(path), fmt.Sprintf("cpuset.%s", t.name)),
+ []byte(*t.value),
+ defaultFilePerm,
+ ); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (c *cpusetController) getValues(path string) (cpus []byte, mems []byte, err error) {
+ if cpus, err = ioutil.ReadFile(filepath.Join(path, "cpuset.cpus")); err != nil && !os.IsNotExist(err) {
+ return
+ }
+ if mems, err = ioutil.ReadFile(filepath.Join(path, "cpuset.mems")); err != nil && !os.IsNotExist(err) {
+ return
+ }
+ return cpus, mems, nil
+}
+
+// ensureParent makes sure that the parent directory of current is created
+// and populated with the proper cpus and mems files copied from
+// it's parent.
+func (c *cpusetController) ensureParent(current, root string) error {
+ parent := filepath.Dir(current)
+ if _, err := filepath.Rel(root, parent); err != nil {
+ return nil
+ }
+ // Avoid infinite recursion.
+ if parent == current {
+ return fmt.Errorf("cpuset: cgroup parent path outside cgroup root")
+ }
+ if cleanPath(parent) != root {
+ if err := c.ensureParent(parent, root); err != nil {
+ return err
+ }
+ }
+ if err := os.MkdirAll(current, defaultDirPerm); err != nil {
+ return err
+ }
+ return c.copyIfNeeded(current, parent)
+}
+
+// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent
+// directory to the current directory if the file's contents are 0
+func (c *cpusetController) copyIfNeeded(current, parent string) error {
+ var (
+ err error
+ currentCpus, currentMems []byte
+ parentCpus, parentMems []byte
+ )
+ if currentCpus, currentMems, err = c.getValues(current); err != nil {
+ return err
+ }
+ if parentCpus, parentMems, err = c.getValues(parent); err != nil {
+ return err
+ }
+ if isEmpty(currentCpus) {
+ if err := ioutil.WriteFile(
+ filepath.Join(current, "cpuset.cpus"),
+ parentCpus,
+ defaultFilePerm,
+ ); err != nil {
+ return err
+ }
+ }
+ if isEmpty(currentMems) {
+ if err := ioutil.WriteFile(
+ filepath.Join(current, "cpuset.mems"),
+ parentMems,
+ defaultFilePerm,
+ ); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func isEmpty(b []byte) bool {
+ return len(bytes.Trim(b, "\n")) == 0
+}
diff --git a/vendor/github.com/containerd/cgroups/devices.go b/vendor/github.com/containerd/cgroups/devices.go
new file mode 100644
index 000000000..f0dca5c54
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/devices.go
@@ -0,0 +1,74 @@
+package cgroups
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+const (
+ allowDeviceFile = "devices.allow"
+ denyDeviceFile = "devices.deny"
+ wildcard = -1
+)
+
+func NewDevices(root string) *devicesController {
+ return &devicesController{
+ root: filepath.Join(root, string(Devices)),
+ }
+}
+
+type devicesController struct {
+ root string
+}
+
+func (d *devicesController) Name() Name {
+ return Devices
+}
+
+func (d *devicesController) Path(path string) string {
+ return filepath.Join(d.root, path)
+}
+
+func (d *devicesController) Create(path string, resources *specs.LinuxResources) error {
+ if err := os.MkdirAll(d.Path(path), defaultDirPerm); err != nil {
+ return err
+ }
+ for _, device := range resources.Devices {
+ file := denyDeviceFile
+ if device.Allow {
+ file = allowDeviceFile
+ }
+ if err := ioutil.WriteFile(
+ filepath.Join(d.Path(path), file),
+ []byte(deviceString(device)),
+ defaultFilePerm,
+ ); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (d *devicesController) Update(path string, resources *specs.LinuxResources) error {
+ return d.Create(path, resources)
+}
+
+func deviceString(device specs.LinuxDeviceCgroup) string {
+ return fmt.Sprintf("%c %s:%s %s",
+ &device.Type,
+ deviceNumber(device.Major),
+ deviceNumber(device.Minor),
+ &device.Access,
+ )
+}
+
+func deviceNumber(number *int64) string {
+ if number == nil || *number == wildcard {
+ return "*"
+ }
+ return fmt.Sprint(*number)
+}
diff --git a/vendor/github.com/containerd/cgroups/errors.go b/vendor/github.com/containerd/cgroups/errors.go
new file mode 100644
index 000000000..d18b4b1df
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/errors.go
@@ -0,0 +1,31 @@
+package cgroups
+
+import (
+ "errors"
+ "os"
+)
+
+var (
+ ErrInvalidPid = errors.New("cgroups: pid must be greater than 0")
+ ErrMountPointNotExist = errors.New("cgroups: cgroup mountpoint does not exist")
+ ErrInvalidFormat = errors.New("cgroups: parsing file with invalid format failed")
+ ErrFreezerNotSupported = errors.New("cgroups: freezer cgroup not supported on this system")
+ ErrMemoryNotSupported = errors.New("cgroups: memory cgroup not supported on this system")
+ ErrCgroupDeleted = errors.New("cgroups: cgroup deleted")
+ ErrNoCgroupMountDestination = errors.New("cgroups: cannot found cgroup mount destination")
+)
+
+// ErrorHandler is a function that handles and acts on errors
+type ErrorHandler func(err error) error
+
+// IgnoreNotExist ignores any errors that are for not existing files
+func IgnoreNotExist(err error) error {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+}
+
+func errPassthrough(err error) error {
+ return err
+}
diff --git a/vendor/github.com/containerd/cgroups/freezer.go b/vendor/github.com/containerd/cgroups/freezer.go
new file mode 100644
index 000000000..f53430b51
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/freezer.go
@@ -0,0 +1,69 @@
+package cgroups
+
+import (
+ "io/ioutil"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+func NewFreezer(root string) *freezerController {
+ return &freezerController{
+ root: filepath.Join(root, string(Freezer)),
+ }
+}
+
+type freezerController struct {
+ root string
+}
+
+func (f *freezerController) Name() Name {
+ return Freezer
+}
+
+func (f *freezerController) Path(path string) string {
+ return filepath.Join(f.root, path)
+}
+
+func (f *freezerController) Freeze(path string) error {
+ if err := f.changeState(path, Frozen); err != nil {
+ return err
+ }
+ return f.waitState(path, Frozen)
+}
+
+func (f *freezerController) Thaw(path string) error {
+ if err := f.changeState(path, Thawed); err != nil {
+ return err
+ }
+ return f.waitState(path, Thawed)
+}
+
+func (f *freezerController) changeState(path string, state State) error {
+ return ioutil.WriteFile(
+ filepath.Join(f.root, path, "freezer.state"),
+ []byte(strings.ToUpper(string(state))),
+ defaultFilePerm,
+ )
+}
+
+func (f *freezerController) state(path string) (State, error) {
+ current, err := ioutil.ReadFile(filepath.Join(f.root, path, "freezer.state"))
+ if err != nil {
+ return "", err
+ }
+ return State(strings.ToLower(strings.TrimSpace(string(current)))), nil
+}
+
+func (f *freezerController) waitState(path string, state State) error {
+ for {
+ current, err := f.state(path)
+ if err != nil {
+ return err
+ }
+ if current == state {
+ return nil
+ }
+ time.Sleep(1 * time.Millisecond)
+ }
+}
diff --git a/vendor/github.com/containerd/cgroups/hierarchy.go b/vendor/github.com/containerd/cgroups/hierarchy.go
new file mode 100644
index 000000000..b61660d41
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/hierarchy.go
@@ -0,0 +1,4 @@
+package cgroups
+
+// Hierarchy enableds both unified and split hierarchy for cgroups
+type Hierarchy func() ([]Subsystem, error)
diff --git a/vendor/github.com/containerd/cgroups/hugetlb.go b/vendor/github.com/containerd/cgroups/hugetlb.go
new file mode 100644
index 000000000..650bb069f
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/hugetlb.go
@@ -0,0 +1,92 @@
+package cgroups
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func NewHugetlb(root string) (*hugetlbController, error) {
+ sizes, err := hugePageSizes()
+ if err != nil {
+ return nil, err
+ }
+
+ return &hugetlbController{
+ root: filepath.Join(root, string(Hugetlb)),
+ sizes: sizes,
+ }, nil
+}
+
+type hugetlbController struct {
+ root string
+ sizes []string
+}
+
+func (h *hugetlbController) Name() Name {
+ return Hugetlb
+}
+
+func (h *hugetlbController) Path(path string) string {
+ return filepath.Join(h.root, path)
+}
+
+func (h *hugetlbController) Create(path string, resources *specs.LinuxResources) error {
+ if err := os.MkdirAll(h.Path(path), defaultDirPerm); err != nil {
+ return err
+ }
+ for _, limit := range resources.HugepageLimits {
+ if err := ioutil.WriteFile(
+ filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", limit.Pagesize, "limit_in_bytes"}, ".")),
+ []byte(strconv.FormatUint(limit.Limit, 10)),
+ defaultFilePerm,
+ ); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (h *hugetlbController) Stat(path string, stats *Stats) error {
+ stats.Hugetlb = make(map[string]HugetlbStat)
+ for _, size := range h.sizes {
+ s, err := h.readSizeStat(path, size)
+ if err != nil {
+ return err
+ }
+ stats.Hugetlb[size] = s
+ }
+ return nil
+}
+
+func (h *hugetlbController) readSizeStat(path, size string) (HugetlbStat, error) {
+ var s HugetlbStat
+ for _, t := range []struct {
+ name string
+ value *uint64
+ }{
+ {
+ name: "usage_in_bytes",
+ value: &s.Usage,
+ },
+ {
+ name: "max_usage_in_bytes",
+ value: &s.Max,
+ },
+ {
+ name: "failcnt",
+ value: &s.Failcnt,
+ },
+ } {
+ v, err := readUint(filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", size, t.name}, ".")))
+ if err != nil {
+ return s, err
+ }
+ *t.value = v
+ }
+ return s, nil
+}
diff --git a/vendor/github.com/containerd/cgroups/memory.go b/vendor/github.com/containerd/cgroups/memory.go
new file mode 100644
index 000000000..9e2ddf2e4
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/memory.go
@@ -0,0 +1,304 @@
+package cgroups
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func NewMemory(root string) *memoryController {
+ return &memoryController{
+ root: filepath.Join(root, string(Memory)),
+ }
+}
+
+type memoryController struct {
+ root string
+}
+
+func (m *memoryController) Name() Name {
+ return Memory
+}
+
+func (m *memoryController) Path(path string) string {
+ return filepath.Join(m.root, path)
+}
+
+func (m *memoryController) Create(path string, resources *specs.LinuxResources) error {
+ if err := os.MkdirAll(m.Path(path), defaultDirPerm); err != nil {
+ return err
+ }
+ if resources.Memory == nil {
+ return nil
+ }
+ if resources.Memory.Kernel != nil {
+ // Check if kernel memory is enabled
+ // We have to limit the kernel memory here as it won't be accounted at all
+ // until a limit is set on the cgroup and limit cannot be set once the
+ // cgroup has children, or if there are already tasks in the cgroup.
+ for _, i := range []int64{1, -1} {
+ if err := ioutil.WriteFile(
+ filepath.Join(m.Path(path), "memory.kmem.limit_in_bytes"),
+ []byte(strconv.FormatInt(i, 10)),
+ defaultFilePerm,
+ ); err != nil {
+ return checkEBUSY(err)
+ }
+ }
+ }
+ return m.set(path, getMemorySettings(resources))
+}
+
+func (m *memoryController) Update(path string, resources *specs.LinuxResources) error {
+ if resources.Memory == nil {
+ return nil
+ }
+ g := func(v *int64) bool {
+ return v != nil && *v > 0
+ }
+ settings := getMemorySettings(resources)
+ if g(resources.Memory.Limit) && g(resources.Memory.Swap) {
+ // if the updated swap value is larger than the current memory limit set the swap changes first
+ // then set the memory limit as swap must always be larger than the current limit
+ current, err := readUint(filepath.Join(m.Path(path), "memory.limit_in_bytes"))
+ if err != nil {
+ return err
+ }
+ if current < uint64(*resources.Memory.Swap) {
+ settings[0], settings[1] = settings[1], settings[0]
+ }
+ }
+ return m.set(path, settings)
+}
+
+func (m *memoryController) Stat(path string, stats *Stats) error {
+ f, err := os.Open(filepath.Join(m.Path(path), "memory.stat"))
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ stats.Memory = &MemoryStat{}
+ if err := m.parseStats(f, stats.Memory); err != nil {
+ return err
+ }
+ for _, t := range []struct {
+ module string
+ entry *MemoryEntry
+ }{
+ {
+ module: "",
+ entry: &stats.Memory.Usage,
+ },
+ {
+ module: "memsw",
+ entry: &stats.Memory.Swap,
+ },
+ {
+ module: "kmem",
+ entry: &stats.Memory.Kernel,
+ },
+ {
+ module: "kmem.tcp",
+ entry: &stats.Memory.KernelTCP,
+ },
+ } {
+ for _, tt := range []struct {
+ name string
+ value *uint64
+ }{
+ {
+ name: "usage_in_bytes",
+ value: &t.entry.Usage,
+ },
+ {
+ name: "max_usage_in_bytes",
+ value: &t.entry.Max,
+ },
+ {
+ name: "failcnt",
+ value: &t.entry.Failcnt,
+ },
+ {
+ name: "limit_in_bytes",
+ value: &t.entry.Limit,
+ },
+ } {
+ parts := []string{"memory"}
+ if t.module != "" {
+ parts = append(parts, t.module)
+ }
+ parts = append(parts, tt.name)
+ v, err := readUint(filepath.Join(m.Path(path), strings.Join(parts, ".")))
+ if err != nil {
+ return err
+ }
+ *tt.value = v
+ }
+ }
+ return nil
+}
+
+func (m *memoryController) OOMEventFD(path string) (uintptr, error) {
+ root := m.Path(path)
+ f, err := os.Open(filepath.Join(root, "memory.oom_control"))
+ if err != nil {
+ return 0, err
+ }
+ defer f.Close()
+ fd, _, serr := unix.RawSyscall(unix.SYS_EVENTFD2, 0, unix.FD_CLOEXEC, 0)
+ if serr != 0 {
+ return 0, serr
+ }
+ if err := writeEventFD(root, f.Fd(), fd); err != nil {
+ unix.Close(int(fd))
+ return 0, err
+ }
+ return fd, nil
+}
+
+func writeEventFD(root string, cfd, efd uintptr) error {
+ f, err := os.OpenFile(filepath.Join(root, "cgroup.event_control"), os.O_WRONLY, 0)
+ if err != nil {
+ return err
+ }
+ _, err = f.WriteString(fmt.Sprintf("%d %d", efd, cfd))
+ f.Close()
+ return err
+}
+
+func (m *memoryController) parseStats(r io.Reader, stat *MemoryStat) error {
+ var (
+ raw = make(map[string]uint64)
+ sc = bufio.NewScanner(r)
+ line int
+ )
+ for sc.Scan() {
+ if err := sc.Err(); err != nil {
+ return err
+ }
+ key, v, err := parseKV(sc.Text())
+ if err != nil {
+ return fmt.Errorf("%d: %v", line, err)
+ }
+ raw[key] = v
+ line++
+ }
+ stat.Cache = raw["cache"]
+ stat.RSS = raw["rss"]
+ stat.RSSHuge = raw["rss_huge"]
+ stat.MappedFile = raw["mapped_file"]
+ stat.Dirty = raw["dirty"]
+ stat.Writeback = raw["writeback"]
+ stat.PgPgIn = raw["pgpgin"]
+ stat.PgPgOut = raw["pgpgout"]
+ stat.PgFault = raw["pgfault"]
+ stat.PgMajFault = raw["pgmajfault"]
+ stat.InactiveAnon = raw["inactive_anon"]
+ stat.ActiveAnon = raw["active_anon"]
+ stat.InactiveFile = raw["inactive_file"]
+ stat.ActiveFile = raw["active_file"]
+ stat.Unevictable = raw["unevictable"]
+ stat.HierarchicalMemoryLimit = raw["hierarchical_memory_limit"]
+ stat.HierarchicalSwapLimit = raw["hierarchical_memsw_limit"]
+ stat.TotalCache = raw["total_cache"]
+ stat.TotalRSS = raw["total_rss"]
+ stat.TotalRSSHuge = raw["total_rss_huge"]
+ stat.TotalMappedFile = raw["total_mapped_file"]
+ stat.TotalDirty = raw["total_dirty"]
+ stat.TotalWriteback = raw["total_writeback"]
+ stat.TotalPgPgIn = raw["total_pgpgin"]
+ stat.TotalPgPgOut = raw["total_pgpgout"]
+ stat.TotalPgFault = raw["total_pgfault"]
+ stat.TotalPgMajFault = raw["total_pgmajfault"]
+ stat.TotalInactiveAnon = raw["total_inactive_anon"]
+ stat.TotalActiveAnon = raw["total_active_anon"]
+ stat.TotalInactiveFile = raw["total_inactive_file"]
+ stat.TotalActiveFile = raw["total_active_file"]
+ stat.TotalUnevictable = raw["total_unevictable"]
+ return nil
+}
+
+func (m *memoryController) set(path string, settings []memorySettings) error {
+ for _, t := range settings {
+ if t.value != nil {
+ if err := ioutil.WriteFile(
+ filepath.Join(m.Path(path), fmt.Sprintf("memory.%s", t.name)),
+ []byte(strconv.FormatInt(*t.value, 10)),
+ defaultFilePerm,
+ ); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+type memorySettings struct {
+ name string
+ value *int64
+}
+
+func getMemorySettings(resources *specs.LinuxResources) []memorySettings {
+ mem := resources.Memory
+ var swappiness *int64
+ if mem.Swappiness != nil {
+ v := int64(*mem.Swappiness)
+ swappiness = &v
+ }
+ return []memorySettings{
+ {
+ name: "limit_in_bytes",
+ value: mem.Limit,
+ },
+ {
+ name: "memsw.limit_in_bytes",
+ value: mem.Swap,
+ },
+ {
+ name: "kmem.limit_in_bytes",
+ value: mem.Kernel,
+ },
+ {
+ name: "kmem.tcp.limit_in_bytes",
+ value: mem.KernelTCP,
+ },
+ {
+ name: "oom_control",
+ value: getOomControlValue(mem),
+ },
+ {
+ name: "swappiness",
+ value: swappiness,
+ },
+ }
+}
+
+func checkEBUSY(err error) error {
+ if pathErr, ok := err.(*os.PathError); ok {
+ if errNo, ok := pathErr.Err.(syscall.Errno); ok {
+ if errNo == unix.EBUSY {
+ return fmt.Errorf(
+ "failed to set memory.kmem.limit_in_bytes, because either tasks have already joined this cgroup or it has children")
+ }
+ }
+ }
+ return err
+}
+
+func getOomControlValue(mem *specs.LinuxMemory) *int64 {
+ if mem.DisableOOMKiller != nil && *mem.DisableOOMKiller {
+ i := int64(1)
+ return &i
+ }
+ return nil
+}
diff --git a/vendor/github.com/containerd/cgroups/named.go b/vendor/github.com/containerd/cgroups/named.go
new file mode 100644
index 000000000..f0fbf018e
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/named.go
@@ -0,0 +1,23 @@
+package cgroups
+
+import "path/filepath"
+
+func NewNamed(root string, name Name) *namedController {
+ return &namedController{
+ root: root,
+ name: name,
+ }
+}
+
+type namedController struct {
+ root string
+ name Name
+}
+
+func (n *namedController) Name() Name {
+ return n.name
+}
+
+func (n *namedController) Path(path string) string {
+ return filepath.Join(n.root, string(n.name), path)
+}
diff --git a/vendor/github.com/containerd/cgroups/net_cls.go b/vendor/github.com/containerd/cgroups/net_cls.go
new file mode 100644
index 000000000..1558444bd
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/net_cls.go
@@ -0,0 +1,42 @@
+package cgroups
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func NewNetCls(root string) *netclsController {
+ return &netclsController{
+ root: filepath.Join(root, string(NetCLS)),
+ }
+}
+
+type netclsController struct {
+ root string
+}
+
+func (n *netclsController) Name() Name {
+ return NetCLS
+}
+
+func (n *netclsController) Path(path string) string {
+ return filepath.Join(n.root, path)
+}
+
+func (n *netclsController) Create(path string, resources *specs.LinuxResources) error {
+ if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil {
+ return err
+ }
+ if resources.Network != nil && resources.Network.ClassID != nil && *resources.Network.ClassID > 0 {
+ return ioutil.WriteFile(
+ filepath.Join(n.Path(path), "net_cls.classid"),
+ []byte(strconv.FormatUint(uint64(*resources.Network.ClassID), 10)),
+ defaultFilePerm,
+ )
+ }
+ return nil
+}
diff --git a/vendor/github.com/containerd/cgroups/net_prio.go b/vendor/github.com/containerd/cgroups/net_prio.go
new file mode 100644
index 000000000..0959b8e70
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/net_prio.go
@@ -0,0 +1,50 @@
+package cgroups
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func NewNetPrio(root string) *netprioController {
+ return &netprioController{
+ root: filepath.Join(root, string(NetPrio)),
+ }
+}
+
+type netprioController struct {
+ root string
+}
+
+func (n *netprioController) Name() Name {
+ return NetPrio
+}
+
+func (n *netprioController) Path(path string) string {
+ return filepath.Join(n.root, path)
+}
+
+func (n *netprioController) Create(path string, resources *specs.LinuxResources) error {
+ if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil {
+ return err
+ }
+ if resources.Network != nil {
+ for _, prio := range resources.Network.Priorities {
+ if err := ioutil.WriteFile(
+ filepath.Join(n.Path(path), "net_prio_ifpriomap"),
+ formatPrio(prio.Name, prio.Priority),
+ defaultFilePerm,
+ ); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func formatPrio(name string, prio uint32) []byte {
+ return []byte(fmt.Sprintf("%s %d", name, prio))
+}
diff --git a/vendor/github.com/containerd/cgroups/paths.go b/vendor/github.com/containerd/cgroups/paths.go
new file mode 100644
index 000000000..1afc24b66
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/paths.go
@@ -0,0 +1,88 @@
+package cgroups
+
+import (
+ "fmt"
+ "path/filepath"
+
+ "github.com/pkg/errors"
+)
+
+type Path func(subsystem Name) (string, error)
+
+func RootPath(subsysem Name) (string, error) {
+ return "/", nil
+}
+
+// StaticPath returns a static path to use for all cgroups
+func StaticPath(path string) Path {
+ return func(_ Name) (string, error) {
+ return path, nil
+ }
+}
+
+// NestedPath will nest the cgroups based on the calling processes cgroup
+// placing its child processes inside its own path
+func NestedPath(suffix string) Path {
+ paths, err := parseCgroupFile("/proc/self/cgroup")
+ if err != nil {
+ return errorPath(err)
+ }
+ return existingPath(paths, suffix)
+}
+
+// PidPath will return the correct cgroup paths for an existing process running inside a cgroup
+// This is commonly used for the Load function to restore an existing container
+func PidPath(pid int) Path {
+ p := fmt.Sprintf("/proc/%d/cgroup", pid)
+ paths, err := parseCgroupFile(p)
+ if err != nil {
+ return errorPath(errors.Wrapf(err, "parse cgroup file %s", p))
+ }
+ return existingPath(paths, "")
+}
+
+func existingPath(paths map[string]string, suffix string) Path {
+ // localize the paths based on the root mount dest for nested cgroups
+ for n, p := range paths {
+ dest, err := getCgroupDestination(string(n))
+ if err != nil {
+ return errorPath(err)
+ }
+ rel, err := filepath.Rel(dest, p)
+ if err != nil {
+ return errorPath(err)
+ }
+ if rel == "." {
+ rel = dest
+ }
+ paths[n] = filepath.Join("/", rel)
+ }
+ return func(name Name) (string, error) {
+ root, ok := paths[string(name)]
+ if !ok {
+ if root, ok = paths[fmt.Sprintf("name=%s", name)]; !ok {
+ return "", fmt.Errorf("unable to find %q in controller set", name)
+ }
+ }
+ if suffix != "" {
+ return filepath.Join(root, suffix), nil
+ }
+ return root, nil
+ }
+}
+
+func subPath(path Path, subName string) Path {
+ return func(name Name) (string, error) {
+ p, err := path(name)
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(p, subName), nil
+ }
+}
+
+func errorPath(err error) Path {
+ return func(_ Name) (string, error) {
+ return "", err
+ }
+}
diff --git a/vendor/github.com/containerd/cgroups/perf_event.go b/vendor/github.com/containerd/cgroups/perf_event.go
new file mode 100644
index 000000000..0fa43ec1f
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/perf_event.go
@@ -0,0 +1,21 @@
+package cgroups
+
+import "path/filepath"
+
+func NewPerfEvent(root string) *PerfEventController {
+ return &PerfEventController{
+ root: filepath.Join(root, string(PerfEvent)),
+ }
+}
+
+type PerfEventController struct {
+ root string
+}
+
+func (p *PerfEventController) Name() Name {
+ return PerfEvent
+}
+
+func (p *PerfEventController) Path(path string) string {
+ return filepath.Join(p.root, path)
+}
diff --git a/vendor/github.com/containerd/cgroups/pids.go b/vendor/github.com/containerd/cgroups/pids.go
new file mode 100644
index 000000000..bdcc10a90
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/pids.go
@@ -0,0 +1,69 @@
+package cgroups
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func NewPids(root string) *pidsController {
+ return &pidsController{
+ root: filepath.Join(root, string(Pids)),
+ }
+}
+
+type pidsController struct {
+ root string
+}
+
+func (p *pidsController) Name() Name {
+ return Pids
+}
+
+func (p *pidsController) Path(path string) string {
+ return filepath.Join(p.root, path)
+}
+
+func (p *pidsController) Create(path string, resources *specs.LinuxResources) error {
+ if err := os.MkdirAll(p.Path(path), defaultDirPerm); err != nil {
+ return err
+ }
+ if resources.Pids != nil && resources.Pids.Limit > 0 {
+ return ioutil.WriteFile(
+ filepath.Join(p.Path(path), "pids.max"),
+ []byte(strconv.FormatInt(resources.Pids.Limit, 10)),
+ defaultFilePerm,
+ )
+ }
+ return nil
+}
+
+func (p *pidsController) Update(path string, resources *specs.LinuxResources) error {
+ return p.Create(path, resources)
+}
+
+func (p *pidsController) Stat(path string, stats *Stats) error {
+ current, err := readUint(filepath.Join(p.Path(path), "pids.current"))
+ if err != nil {
+ return err
+ }
+ var max uint64
+ maxData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "pids.max"))
+ if err != nil {
+ return err
+ }
+ if maxS := strings.TrimSpace(string(maxData)); maxS != "max" {
+ if max, err = parseUint(maxS, 10, 64); err != nil {
+ return err
+ }
+ }
+ stats.Pids = &PidsStat{
+ Current: current,
+ Limit: max,
+ }
+ return nil
+}
diff --git a/vendor/github.com/containerd/cgroups/state.go b/vendor/github.com/containerd/cgroups/state.go
new file mode 100644
index 000000000..f1d7b7b60
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/state.go
@@ -0,0 +1,12 @@
+package cgroups
+
+// State is a type that represents the state of the current cgroup
+type State string
+
+const (
+ Unknown State = ""
+ Thawed State = "thawed"
+ Frozen State = "frozen"
+ Freezing State = "freezing"
+ Deleted State = "deleted"
+)
diff --git a/vendor/github.com/containerd/cgroups/stats.go b/vendor/github.com/containerd/cgroups/stats.go
new file mode 100644
index 000000000..47fbfa96b
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/stats.go
@@ -0,0 +1,109 @@
+package cgroups
+
+import "sync"
+
+type Stats struct {
+ cpuMu sync.Mutex
+
+ Hugetlb map[string]HugetlbStat
+ Pids *PidsStat
+ Cpu *CpuStat
+ Memory *MemoryStat
+ Blkio *BlkioStat
+}
+
+type HugetlbStat struct {
+ Usage uint64
+ Max uint64
+ Failcnt uint64
+}
+
+type PidsStat struct {
+ Current uint64
+ Limit uint64
+}
+
+type CpuStat struct {
+ Usage CpuUsage
+ Throttling Throttle
+}
+
+type CpuUsage struct {
+ // Units: nanoseconds.
+ Total uint64
+ PerCpu []uint64
+ Kernel uint64
+ User uint64
+}
+
+type Throttle struct {
+ Periods uint64
+ ThrottledPeriods uint64
+ ThrottledTime uint64
+}
+
+type MemoryStat struct {
+ Cache uint64
+ RSS uint64
+ RSSHuge uint64
+ MappedFile uint64
+ Dirty uint64
+ Writeback uint64
+ PgPgIn uint64
+ PgPgOut uint64
+ PgFault uint64
+ PgMajFault uint64
+ InactiveAnon uint64
+ ActiveAnon uint64
+ InactiveFile uint64
+ ActiveFile uint64
+ Unevictable uint64
+ HierarchicalMemoryLimit uint64
+ HierarchicalSwapLimit uint64
+ TotalCache uint64
+ TotalRSS uint64
+ TotalRSSHuge uint64
+ TotalMappedFile uint64
+ TotalDirty uint64
+ TotalWriteback uint64
+ TotalPgPgIn uint64
+ TotalPgPgOut uint64
+ TotalPgFault uint64
+ TotalPgMajFault uint64
+ TotalInactiveAnon uint64
+ TotalActiveAnon uint64
+ TotalInactiveFile uint64
+ TotalActiveFile uint64
+ TotalUnevictable uint64
+
+ Usage MemoryEntry
+ Swap MemoryEntry
+ Kernel MemoryEntry
+ KernelTCP MemoryEntry
+}
+
+type MemoryEntry struct {
+ Limit uint64
+ Usage uint64
+ Max uint64
+ Failcnt uint64
+}
+
+type BlkioStat struct {
+ IoServiceBytesRecursive []BlkioEntry
+ IoServicedRecursive []BlkioEntry
+ IoQueuedRecursive []BlkioEntry
+ IoServiceTimeRecursive []BlkioEntry
+ IoWaitTimeRecursive []BlkioEntry
+ IoMergedRecursive []BlkioEntry
+ IoTimeRecursive []BlkioEntry
+ SectorsRecursive []BlkioEntry
+}
+
+type BlkioEntry struct {
+ Op string
+ Device string
+ Major uint64
+ Minor uint64
+ Value uint64
+}
diff --git a/vendor/github.com/containerd/cgroups/subsystem.go b/vendor/github.com/containerd/cgroups/subsystem.go
new file mode 100644
index 000000000..aab403b8a
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/subsystem.go
@@ -0,0 +1,94 @@
+package cgroups
+
+import (
+ "fmt"
+
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// Name is a typed name for a cgroup subsystem
+type Name string
+
+const (
+ Devices Name = "devices"
+ Hugetlb Name = "hugetlb"
+ Freezer Name = "freezer"
+ Pids Name = "pids"
+ NetCLS Name = "net_cls"
+ NetPrio Name = "net_prio"
+ PerfEvent Name = "perf_event"
+ Cpuset Name = "cpuset"
+ Cpu Name = "cpu"
+ Cpuacct Name = "cpuacct"
+ Memory Name = "memory"
+ Blkio Name = "blkio"
+)
+
+// Subsystems returns a complete list of the default cgroups
+// avaliable on most linux systems
+func Subsystems() []Name {
+ n := []Name{
+ Hugetlb,
+ Freezer,
+ Pids,
+ NetCLS,
+ NetPrio,
+ PerfEvent,
+ Cpuset,
+ Cpu,
+ Cpuacct,
+ Memory,
+ Blkio,
+ }
+ if !isUserNS {
+ n = append(n, Devices)
+ }
+ return n
+}
+
+type Subsystem interface {
+ Name() Name
+}
+
+type pather interface {
+ Subsystem
+ Path(path string) string
+}
+
+type creator interface {
+ Subsystem
+ Create(path string, resources *specs.LinuxResources) error
+}
+
+type deleter interface {
+ Subsystem
+ Delete(path string) error
+}
+
+type stater interface {
+ Subsystem
+ Stat(path string, stats *Stats) error
+}
+
+type updater interface {
+ Subsystem
+ Update(path string, resources *specs.LinuxResources) error
+}
+
+// SingleSubsystem returns a single cgroup subsystem within the base Hierarchy
+func SingleSubsystem(baseHierarchy Hierarchy, subsystem Name) Hierarchy {
+ return func() ([]Subsystem, error) {
+ subsystems, err := baseHierarchy()
+ if err != nil {
+ return nil, err
+ }
+ for _, s := range subsystems {
+ if s.Name() == subsystem {
+ return []Subsystem{
+ s,
+ }, nil
+ }
+ }
+ return nil, fmt.Errorf("unable to find subsystem %s", subsystem)
+ }
+}
diff --git a/vendor/github.com/containerd/cgroups/systemd.go b/vendor/github.com/containerd/cgroups/systemd.go
new file mode 100644
index 000000000..5e052a821
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/systemd.go
@@ -0,0 +1,101 @@
+package cgroups
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ systemdDbus "github.com/coreos/go-systemd/dbus"
+ "github.com/godbus/dbus"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+const (
+ SystemdDbus Name = "systemd"
+ defaultSlice = "system.slice"
+)
+
+func Systemd() ([]Subsystem, error) {
+ root, err := v1MountPoint()
+ if err != nil {
+ return nil, err
+ }
+ defaultSubsystems, err := defaults(root)
+ if err != nil {
+ return nil, err
+ }
+ s, err := NewSystemd(root)
+ if err != nil {
+ return nil, err
+ }
+ // make sure the systemd controller is added first
+ return append([]Subsystem{s}, defaultSubsystems...), nil
+}
+
+func Slice(slice, name string) Path {
+ if slice == "" {
+ slice = defaultSlice
+ }
+ return func(subsystem Name) (string, error) {
+ return filepath.Join(slice, unitName(name)), nil
+ }
+}
+
+func NewSystemd(root string) (*SystemdController, error) {
+ conn, err := systemdDbus.New()
+ if err != nil {
+ return nil, err
+ }
+ return &SystemdController{
+ root: root,
+ conn: conn,
+ }, nil
+}
+
+type SystemdController struct {
+ mu sync.Mutex
+ conn *systemdDbus.Conn
+ root string
+}
+
+func (s *SystemdController) Name() Name {
+ return SystemdDbus
+}
+
+func (s *SystemdController) Create(path string, resources *specs.LinuxResources) error {
+ slice, name := splitName(path)
+ properties := []systemdDbus.Property{
+ systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)),
+ systemdDbus.PropWants(slice),
+ newProperty("DefaultDependencies", false),
+ newProperty("Delegate", true),
+ newProperty("MemoryAccounting", true),
+ newProperty("CPUAccounting", true),
+ newProperty("BlockIOAccounting", true),
+ }
+ _, err := s.conn.StartTransientUnit(name, "replace", properties, nil)
+ return err
+}
+
+func (s *SystemdController) Delete(path string) error {
+ _, name := splitName(path)
+ _, err := s.conn.StopUnit(name, "replace", nil)
+ return err
+}
+
+func newProperty(name string, units interface{}) systemdDbus.Property {
+ return systemdDbus.Property{
+ Name: name,
+ Value: dbus.MakeVariant(units),
+ }
+}
+
+func unitName(name string) string {
+ return fmt.Sprintf("%s.slice", name)
+}
+
+func splitName(path string) (slice string, unit string) {
+ slice, unit = filepath.Split(path)
+ return strings.TrimSuffix(slice, "/"), unit
+}
diff --git a/vendor/github.com/containerd/cgroups/ticks.go b/vendor/github.com/containerd/cgroups/ticks.go
new file mode 100644
index 000000000..f471c5ae7
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/ticks.go
@@ -0,0 +1,10 @@
+package cgroups
+
+func getClockTicks() uint64 {
+ // The value comes from `C.sysconf(C._SC_CLK_TCK)`, and
+ // on Linux it's a constant which is safe to be hard coded,
+ // so we can avoid using cgo here.
+ // See https://github.com/containerd/cgroups/pull/12 for
+ // more details.
+ return 100
+}
diff --git a/vendor/github.com/containerd/cgroups/utils.go b/vendor/github.com/containerd/cgroups/utils.go
new file mode 100644
index 000000000..d5c7230a0
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/utils.go
@@ -0,0 +1,280 @@
+package cgroups
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ units "github.com/docker/go-units"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+var isUserNS = runningInUserNS()
+
+// runningInUserNS detects whether we are currently running in a user namespace.
+// Copied from github.com/lxc/lxd/shared/util.go
+func runningInUserNS() bool {
+ file, err := os.Open("/proc/self/uid_map")
+ if err != nil {
+ // This kernel-provided file only exists if user namespaces are supported
+ return false
+ }
+ defer file.Close()
+
+ buf := bufio.NewReader(file)
+ l, _, err := buf.ReadLine()
+ if err != nil {
+ return false
+ }
+
+ line := string(l)
+ var a, b, c int64
+ fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
+ /*
+ * We assume we are in the initial user namespace if we have a full
+ * range - 4294967295 uids starting at uid 0.
+ */
+ if a == 0 && b == 0 && c == 4294967295 {
+ return false
+ }
+ return true
+}
+
+// defaults returns all known groups
+func defaults(root string) ([]Subsystem, error) {
+ h, err := NewHugetlb(root)
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ s := []Subsystem{
+ NewNamed(root, "systemd"),
+ NewFreezer(root),
+ NewPids(root),
+ NewNetCls(root),
+ NewNetPrio(root),
+ NewPerfEvent(root),
+ NewCputset(root),
+ NewCpu(root),
+ NewCpuacct(root),
+ NewMemory(root),
+ NewBlkio(root),
+ }
+ // only add the devices cgroup if we are not in a user namespace
+ // because modifications are not allowed
+ if !isUserNS {
+ s = append(s, NewDevices(root))
+ }
+ // add the hugetlb cgroup if error wasn't due to missing hugetlb
+ // cgroup support on the host
+ if err == nil {
+ s = append(s, h)
+ }
+ return s, nil
+}
+
+// remove will remove a cgroup path handling EAGAIN and EBUSY errors and
+// retrying the remove after a exp timeout
+func remove(path string) error {
+ delay := 10 * time.Millisecond
+ for i := 0; i < 5; i++ {
+ if i != 0 {
+ time.Sleep(delay)
+ delay *= 2
+ }
+ if err := os.RemoveAll(path); err == nil {
+ return nil
+ }
+ }
+ return fmt.Errorf("cgroups: unable to remove path %q", path)
+}
+
+// readPids will read all the pids in a cgroup by the provided path
+func readPids(path string, subsystem Name) ([]Process, error) {
+ f, err := os.Open(filepath.Join(path, cgroupProcs))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ var (
+ out []Process
+ s = bufio.NewScanner(f)
+ )
+ for s.Scan() {
+ if t := s.Text(); t != "" {
+ pid, err := strconv.Atoi(t)
+ if err != nil {
+ return nil, err
+ }
+ out = append(out, Process{
+ Pid: pid,
+ Subsystem: subsystem,
+ Path: path,
+ })
+ }
+ }
+ return out, nil
+}
+
+func hugePageSizes() ([]string, error) {
+ var (
+ pageSizes []string
+ sizeList = []string{"B", "kB", "MB", "GB", "TB", "PB"}
+ )
+ files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages")
+ if err != nil {
+ return nil, err
+ }
+ for _, st := range files {
+ nameArray := strings.Split(st.Name(), "-")
+ pageSize, err := units.RAMInBytes(nameArray[1])
+ if err != nil {
+ return nil, err
+ }
+ pageSizes = append(pageSizes, units.CustomSize("%g%s", float64(pageSize), 1024.0, sizeList))
+ }
+ return pageSizes, nil
+}
+
+func readUint(path string) (uint64, error) {
+ v, err := ioutil.ReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ return parseUint(strings.TrimSpace(string(v)), 10, 64)
+}
+
+func parseUint(s string, base, bitSize int) (uint64, error) {
+ v, err := strconv.ParseUint(s, base, bitSize)
+ if err != nil {
+ intValue, intErr := strconv.ParseInt(s, base, bitSize)
+ // 1. Handle negative values greater than MinInt64 (and)
+ // 2. Handle negative values lesser than MinInt64
+ if intErr == nil && intValue < 0 {
+ return 0, nil
+ } else if intErr != nil &&
+ intErr.(*strconv.NumError).Err == strconv.ErrRange &&
+ intValue < 0 {
+ return 0, nil
+ }
+ return 0, err
+ }
+ return v, nil
+}
+
+func parseKV(raw string) (string, uint64, error) {
+ parts := strings.Fields(raw)
+ switch len(parts) {
+ case 2:
+ v, err := parseUint(parts[1], 10, 64)
+ if err != nil {
+ return "", 0, err
+ }
+ return parts[0], v, nil
+ default:
+ return "", 0, ErrInvalidFormat
+ }
+}
+
+func parseCgroupFile(path string) (map[string]string, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return parseCgroupFromReader(f)
+}
+
+func parseCgroupFromReader(r io.Reader) (map[string]string, error) {
+ var (
+ cgroups = make(map[string]string)
+ s = bufio.NewScanner(r)
+ )
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ var (
+ text = s.Text()
+ parts = strings.SplitN(text, ":", 3)
+ )
+ if len(parts) < 3 {
+ return nil, fmt.Errorf("invalid cgroup entry: %q", text)
+ }
+ for _, subs := range strings.Split(parts[1], ",") {
+ if subs != "" {
+ cgroups[subs] = parts[2]
+ }
+ }
+ }
+ return cgroups, nil
+}
+
+func getCgroupDestination(subsystem string) (string, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return "", err
+ }
+ fields := strings.Fields(s.Text())
+ for _, opt := range strings.Split(fields[len(fields)-1], ",") {
+ if opt == subsystem {
+ return fields[3], nil
+ }
+ }
+ }
+ return "", ErrNoCgroupMountDestination
+}
+
+func pathers(subystems []Subsystem) []pather {
+ var out []pather
+ for _, s := range subystems {
+ if p, ok := s.(pather); ok {
+ out = append(out, p)
+ }
+ }
+ return out
+}
+
+func initializeSubsystem(s Subsystem, path Path, resources *specs.LinuxResources) error {
+ if c, ok := s.(creator); ok {
+ p, err := path(s.Name())
+ if err != nil {
+ return err
+ }
+ if err := c.Create(p, resources); err != nil {
+ return err
+ }
+ } else if c, ok := s.(pather); ok {
+ p, err := path(s.Name())
+ if err != nil {
+ return err
+ }
+ // do the default create if the group does not have a custom one
+ if err := os.MkdirAll(c.Path(p), defaultDirPerm); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func cleanPath(path string) string {
+ if path == "" {
+ return ""
+ }
+ path = filepath.Clean(path)
+ if !filepath.IsAbs(path) {
+ path, _ = filepath.Rel(string(os.PathSeparator), filepath.Clean(string(os.PathSeparator)+path))
+ }
+ return filepath.Clean(path)
+}
diff --git a/vendor/github.com/containerd/cgroups/v1.go b/vendor/github.com/containerd/cgroups/v1.go
new file mode 100644
index 000000000..f6608f7c7
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/v1.go
@@ -0,0 +1,65 @@
+package cgroups
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// V1 returns all the groups in the default cgroups mountpoint in a single hierarchy
+func V1() ([]Subsystem, error) {
+ root, err := v1MountPoint()
+ if err != nil {
+ return nil, err
+ }
+ subsystems, err := defaults(root)
+ if err != nil {
+ return nil, err
+ }
+ var enabled []Subsystem
+ for _, s := range pathers(subsystems) {
+ // check and remove the default groups that do not exist
+ if _, err := os.Lstat(s.Path("/")); err == nil {
+ enabled = append(enabled, s)
+ }
+ }
+ return enabled, nil
+}
+
+// v1MountPoint returns the mount point where the cgroup
+// mountpoints are mounted in a single hiearchy
+func v1MountPoint() (string, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ if err := scanner.Err(); err != nil {
+ return "", err
+ }
+ var (
+ text = scanner.Text()
+ fields = strings.Split(text, " ")
+ // safe as mountinfo encodes mountpoints with spaces as \040.
+ index = strings.Index(text, " - ")
+ postSeparatorFields = strings.Fields(text[index+3:])
+ numPostFields = len(postSeparatorFields)
+ )
+ // this is an error as we can't detect if the mount is for "cgroup"
+ if numPostFields == 0 {
+ return "", fmt.Errorf("Found no fields post '-' in %q", text)
+ }
+ if postSeparatorFields[0] == "cgroup" {
+ // check that the mount is properly formated.
+ if numPostFields < 3 {
+ return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
+ }
+ return filepath.Dir(fields[4]), nil
+ }
+ }
+ return "", ErrMountPointNotExist
+}
diff --git a/vendor/github.com/containernetworking/cni/LICENSE b/vendor/github.com/containernetworking/cni/LICENSE
new file mode 100644
index 000000000..8f71f43fe
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/LICENSE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/github.com/containernetworking/cni/README.md b/vendor/github.com/containernetworking/cni/README.md
new file mode 100644
index 000000000..27f08506b
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/README.md
@@ -0,0 +1,176 @@
+[![Build Status](https://travis-ci.org/containernetworking/cni.svg?branch=master)](https://travis-ci.org/containernetworking/cni)
+[![Coverage Status](https://coveralls.io/repos/github/containernetworking/cni/badge.svg?branch=master)](https://coveralls.io/github/containernetworking/cni?branch=master)
+[![Slack Status](https://cryptic-tundra-43194.herokuapp.com/badge.svg)](https://cryptic-tundra-43194.herokuapp.com/)
+
+# CNI - the Container Network Interface
+
+## What is CNI?
+
+The CNI (_Container Network Interface_) project consists of a specification and libraries for writing plugins to configure network interfaces in Linux containers, along with a number of supported plugins.
+CNI concerns itself only with network connectivity of containers and removing allocated resources when the container is deleted.
+Because of this focus, CNI has a wide range of support and the specification is simple to implement.
+
+As well as the [specification](SPEC.md), this repository contains the Go source code of a library for integrating CNI into applications, an example command-line tool, a template for making new plugins, and the supported plugins.
+
+The template code makes it straight-forward to create a CNI plugin for an existing container networking project.
+CNI also makes a good framework for creating a new container networking project from scratch.
+
+## Why develop CNI?
+
+Application containers on Linux are a rapidly evolving area, and within this area networking is not well addressed as it is highly environment-specific.
+We believe that many container runtimes and orchestrators will seek to solve the same problem of making the network layer pluggable.
+
+To avoid duplication, we think it is prudent to define a common interface between the network plugins and container execution: hence we put forward this specification, along with libraries for Go and a set of plugins.
+
+## Who is using CNI?
+### Container runtimes
+- [rkt - container engine](https://coreos.com/blog/rkt-cni-networking.html)
+- [Kurma - container runtime](http://kurma.io/)
+- [Kubernetes - a system to simplify container operations](http://kubernetes.io/docs/admin/network-plugins/)
+- [Cloud Foundry - a platform for cloud applications](https://github.com/cloudfoundry-incubator/netman-release)
+- [Mesos - a distributed systems kernel](https://github.com/apache/mesos/blob/master/docs/cni.md)
+
+### 3rd party plugins
+- [Project Calico - a layer 3 virtual network](https://github.com/projectcalico/calico-cni)
+- [Weave - a multi-host Docker network](https://github.com/weaveworks/weave)
+- [Contiv Networking - policy networking for various use cases](https://github.com/contiv/netplugin)
+- [SR-IOV](https://github.com/hustcat/sriov-cni)
+- [Cilium - BPF & XDP for containers](https://github.com/cilium/cilium)
+- [Infoblox - enterprise IP address management for containers](https://github.com/infobloxopen/cni-infoblox)
+
+The CNI team also maintains some [core plugins](plugins).
+
+
+## Contributing to CNI
+
+We welcome contributions, including [bug reports](https://github.com/containernetworking/cni/issues), and code and documentation improvements.
+If you intend to contribute to code or documentation, please read [CONTRIBUTING.md](CONTRIBUTING.md). Also see the [contact section](#contact) in this README.
+
+## How do I use CNI?
+
+### Requirements
+
+CNI requires Go 1.5+ to build.
+
+Go 1.5 users will need to set GO15VENDOREXPERIMENT=1 to get vendored
+dependencies. This flag is set by default in 1.6.
+
+### Included Plugins
+
+This repository includes a number of common plugins in the `plugins/` directory.
+Please see the [Documentation/](Documentation/) directory for documentation about particular plugins.
+
+### Running the plugins
+
+The scripts/ directory contains two scripts, `priv-net-run.sh` and `docker-run.sh`, that can be used to exercise the plugins.
+
+**note - priv-net-run.sh depends on `jq`**
+
+Start out by creating a netconf file to describe a network:
+
+```bash
+$ mkdir -p /etc/cni/net.d
+$ cat >/etc/cni/net.d/10-mynet.conf <<EOF
+{
+ "cniVersion": "0.2.0",
+ "name": "mynet",
+ "type": "bridge",
+ "bridge": "cni0",
+ "isGateway": true,
+ "ipMasq": true,
+ "ipam": {
+ "type": "host-local",
+ "subnet": "10.22.0.0/16",
+ "routes": [
+ { "dst": "0.0.0.0/0" }
+ ]
+ }
+}
+EOF
+$ cat >/etc/cni/net.d/99-loopback.conf <<EOF
+{
+ "cniVersion": "0.2.0",
+ "type": "loopback"
+}
+EOF
+```
+
+The directory `/etc/cni/net.d` is the default location in which the scripts will look for net configurations.
+
+Next, build the plugins:
+
+```bash
+$ ./build
+```
+
+Finally, execute a command (`ifconfig` in this example) in a private network namespace that has joined the `mynet` network:
+
+```bash
+$ CNI_PATH=`pwd`/bin
+$ cd scripts
+$ sudo CNI_PATH=$CNI_PATH ./priv-net-run.sh ifconfig
+eth0 Link encap:Ethernet HWaddr f2:c2:6f:54:b8:2b
+ inet addr:10.22.0.2 Bcast:0.0.0.0 Mask:255.255.0.0
+ inet6 addr: fe80::f0c2:6fff:fe54:b82b/64 Scope:Link
+ UP BROADCAST MULTICAST MTU:1500 Metric:1
+ RX packets:1 errors:0 dropped:0 overruns:0 frame:0
+ TX packets:0 errors:0 dropped:1 overruns:0 carrier:0
+ collisions:0 txqueuelen:0
+ RX bytes:90 (90.0 B) TX bytes:0 (0.0 B)
+
+lo Link encap:Local Loopback
+ inet addr:127.0.0.1 Mask:255.0.0.0
+ inet6 addr: ::1/128 Scope:Host
+ UP LOOPBACK RUNNING MTU:65536 Metric:1
+ RX packets:0 errors:0 dropped:0 overruns:0 frame:0
+ TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
+ collisions:0 txqueuelen:0
+ RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
+```
+
+The environment variable `CNI_PATH` tells the scripts and library where to look for plugin executables.
+
+## Running a Docker container with network namespace set up by CNI plugins
+
+Use the instructions in the previous section to define a netconf and build the plugins.
+Next, docker-run.sh script wraps `docker run`, to execute the plugins prior to entering the container:
+
+```bash
+$ CNI_PATH=`pwd`/bin
+$ cd scripts
+$ sudo CNI_PATH=$CNI_PATH ./docker-run.sh --rm busybox:latest ifconfig
+eth0 Link encap:Ethernet HWaddr fa:60:70:aa:07:d1
+ inet addr:10.22.0.2 Bcast:0.0.0.0 Mask:255.255.0.0
+ inet6 addr: fe80::f860:70ff:feaa:7d1/64 Scope:Link
+ UP BROADCAST MULTICAST MTU:1500 Metric:1
+ RX packets:1 errors:0 dropped:0 overruns:0 frame:0
+ TX packets:0 errors:0 dropped:1 overruns:0 carrier:0
+ collisions:0 txqueuelen:0
+ RX bytes:90 (90.0 B) TX bytes:0 (0.0 B)
+
+lo Link encap:Local Loopback
+ inet addr:127.0.0.1 Mask:255.0.0.0
+ inet6 addr: ::1/128 Scope:Host
+ UP LOOPBACK RUNNING MTU:65536 Metric:1
+ RX packets:0 errors:0 dropped:0 overruns:0 frame:0
+ TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
+ collisions:0 txqueuelen:0
+ RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
+```
+
+## What might CNI do in the future?
+
+CNI currently covers a wide range of needs for network configuration due to it simple model and API.
+However, in the future CNI might want to branch out into other directions:
+
+- Dynamic updates to existing network configuration
+- Dynamic policies for network bandwidth and firewall rules
+
+If these topics of are interest, please contact the team via the mailing list or IRC and find some like-minded people in the community to put a proposal together.
+
+## Contact
+
+For any questions about CNI, please reach out on the mailing list:
+- Email: [cni-dev](https://groups.google.com/forum/#!forum/cni-dev)
+- IRC: #[containernetworking](irc://irc.freenode.org:6667/#containernetworking) channel on freenode.org
+- Slack: [containernetworking.slack.com](https://cryptic-tundra-43194.herokuapp.com)
diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go
new file mode 100644
index 000000000..a23cbb2c5
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/libcni/api.go
@@ -0,0 +1,219 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package libcni
+
+import (
+ "os"
+ "strings"
+
+ "github.com/containernetworking/cni/pkg/invoke"
+ "github.com/containernetworking/cni/pkg/types"
+ "github.com/containernetworking/cni/pkg/version"
+)
+
+type RuntimeConf struct {
+ ContainerID string
+ NetNS string
+ IfName string
+ Args [][2]string
+ // A dictionary of capability-specific data passed by the runtime
+ // to plugins as top-level keys in the 'runtimeConfig' dictionary
+ // of the plugin's stdin data. libcni will ensure that only keys
+ // in this map which match the capabilities of the plugin are passed
+ // to the plugin
+ CapabilityArgs map[string]interface{}
+}
+
+type NetworkConfig struct {
+ Network *types.NetConf
+ Bytes []byte
+}
+
+type NetworkConfigList struct {
+ Name string
+ CNIVersion string
+ Plugins []*NetworkConfig
+ Bytes []byte
+}
+
+type CNI interface {
+ AddNetworkList(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error)
+ DelNetworkList(net *NetworkConfigList, rt *RuntimeConf) error
+
+ AddNetwork(net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
+ DelNetwork(net *NetworkConfig, rt *RuntimeConf) error
+}
+
+type CNIConfig struct {
+ Path []string
+}
+
+// CNIConfig implements the CNI interface
+var _ CNI = &CNIConfig{}
+
+func buildOneConfig(list *NetworkConfigList, orig *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (*NetworkConfig, error) {
+ var err error
+
+ inject := map[string]interface{}{
+ "name": list.Name,
+ "cniVersion": list.CNIVersion,
+ }
+ // Add previous plugin result
+ if prevResult != nil {
+ inject["prevResult"] = prevResult
+ }
+
+ // Ensure every config uses the same name and version
+ orig, err = InjectConf(orig, inject)
+ if err != nil {
+ return nil, err
+ }
+
+ return injectRuntimeConfig(orig, rt)
+}
+
+// This function takes a libcni RuntimeConf structure and injects values into
+// a "runtimeConfig" dictionary in the CNI network configuration JSON that
+// will be passed to the plugin on stdin.
+//
+// Only "capabilities arguments" passed by the runtime are currently injected.
+// These capabilities arguments are filtered through the plugin's advertised
+// capabilities from its config JSON, and any keys in the CapabilityArgs
+// matching plugin capabilities are added to the "runtimeConfig" dictionary
+// sent to the plugin via JSON on stdin. For exmaple, if the plugin's
+// capabilities include "portMappings", and the CapabilityArgs map includes a
+// "portMappings" key, that key and its value are added to the "runtimeConfig"
+// dictionary to be passed to the plugin's stdin.
+func injectRuntimeConfig(orig *NetworkConfig, rt *RuntimeConf) (*NetworkConfig, error) {
+ var err error
+
+ rc := make(map[string]interface{})
+ for capability, supported := range orig.Network.Capabilities {
+ if !supported {
+ continue
+ }
+ if data, ok := rt.CapabilityArgs[capability]; ok {
+ rc[capability] = data
+ }
+ }
+
+ if len(rc) > 0 {
+ orig, err = InjectConf(orig, map[string]interface{}{"runtimeConfig": rc})
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return orig, nil
+}
+
+// AddNetworkList executes a sequence of plugins with the ADD command
+func (c *CNIConfig) AddNetworkList(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) {
+ var prevResult types.Result
+ for _, net := range list.Plugins {
+ pluginPath, err := invoke.FindInPath(net.Network.Type, c.Path)
+ if err != nil {
+ return nil, err
+ }
+
+ newConf, err := buildOneConfig(list, net, prevResult, rt)
+ if err != nil {
+ return nil, err
+ }
+
+ prevResult, err = invoke.ExecPluginWithResult(pluginPath, newConf.Bytes, c.args("ADD", rt))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return prevResult, nil
+}
+
+// DelNetworkList executes a sequence of plugins with the DEL command
+func (c *CNIConfig) DelNetworkList(list *NetworkConfigList, rt *RuntimeConf) error {
+ for i := len(list.Plugins) - 1; i >= 0; i-- {
+ net := list.Plugins[i]
+
+ pluginPath, err := invoke.FindInPath(net.Network.Type, c.Path)
+ if err != nil {
+ return err
+ }
+
+ newConf, err := buildOneConfig(list, net, nil, rt)
+ if err != nil {
+ return err
+ }
+
+ if err := invoke.ExecPluginWithoutResult(pluginPath, newConf.Bytes, c.args("DEL", rt)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// AddNetwork executes the plugin with the ADD command
+func (c *CNIConfig) AddNetwork(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) {
+ pluginPath, err := invoke.FindInPath(net.Network.Type, c.Path)
+ if err != nil {
+ return nil, err
+ }
+
+ net, err = injectRuntimeConfig(net, rt)
+ if err != nil {
+ return nil, err
+ }
+
+ return invoke.ExecPluginWithResult(pluginPath, net.Bytes, c.args("ADD", rt))
+}
+
+// DelNetwork executes the plugin with the DEL command
+func (c *CNIConfig) DelNetwork(net *NetworkConfig, rt *RuntimeConf) error {
+ pluginPath, err := invoke.FindInPath(net.Network.Type, c.Path)
+ if err != nil {
+ return err
+ }
+
+ net, err = injectRuntimeConfig(net, rt)
+ if err != nil {
+ return err
+ }
+
+ return invoke.ExecPluginWithoutResult(pluginPath, net.Bytes, c.args("DEL", rt))
+}
+
+// GetVersionInfo reports which versions of the CNI spec are supported by
+// the given plugin.
+func (c *CNIConfig) GetVersionInfo(pluginType string) (version.PluginInfo, error) {
+ pluginPath, err := invoke.FindInPath(pluginType, c.Path)
+ if err != nil {
+ return nil, err
+ }
+
+ return invoke.GetVersionInfo(pluginPath)
+}
+
+// =====
+func (c *CNIConfig) args(action string, rt *RuntimeConf) *invoke.Args {
+ return &invoke.Args{
+ Command: action,
+ ContainerID: rt.ContainerID,
+ NetNS: rt.NetNS,
+ PluginArgs: rt.Args,
+ IfName: rt.IfName,
+ Path: strings.Join(c.Path, string(os.PathListSeparator)),
+ }
+}
diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go
new file mode 100644
index 000000000..c7738c665
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/libcni/conf.go
@@ -0,0 +1,256 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package libcni
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+)
+
+type NotFoundError struct {
+ Dir string
+ Name string
+}
+
+func (e NotFoundError) Error() string {
+ return fmt.Sprintf(`no net configuration with name "%s" in %s`, e.Name, e.Dir)
+}
+
+type NoConfigsFoundError struct {
+ Dir string
+}
+
+func (e NoConfigsFoundError) Error() string {
+ return fmt.Sprintf(`no net configurations found in %s`, e.Dir)
+}
+
+func ConfFromBytes(bytes []byte) (*NetworkConfig, error) {
+ conf := &NetworkConfig{Bytes: bytes}
+ if err := json.Unmarshal(bytes, &conf.Network); err != nil {
+ return nil, fmt.Errorf("error parsing configuration: %s", err)
+ }
+ return conf, nil
+}
+
+func ConfFromFile(filename string) (*NetworkConfig, error) {
+ bytes, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, fmt.Errorf("error reading %s: %s", filename, err)
+ }
+ return ConfFromBytes(bytes)
+}
+
+func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) {
+ rawList := make(map[string]interface{})
+ if err := json.Unmarshal(bytes, &rawList); err != nil {
+ return nil, fmt.Errorf("error parsing configuration list: %s", err)
+ }
+
+ rawName, ok := rawList["name"]
+ if !ok {
+ return nil, fmt.Errorf("error parsing configuration list: no name")
+ }
+ name, ok := rawName.(string)
+ if !ok {
+ return nil, fmt.Errorf("error parsing configuration list: invalid name type %T", rawName)
+ }
+
+ var cniVersion string
+ rawVersion, ok := rawList["cniVersion"]
+ if ok {
+ cniVersion, ok = rawVersion.(string)
+ if !ok {
+ return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion type %T", rawVersion)
+ }
+ }
+
+ list := &NetworkConfigList{
+ Name: name,
+ CNIVersion: cniVersion,
+ Bytes: bytes,
+ }
+
+ var plugins []interface{}
+ plug, ok := rawList["plugins"]
+ if !ok {
+ return nil, fmt.Errorf("error parsing configuration list: no 'plugins' key")
+ }
+ plugins, ok = plug.([]interface{})
+ if !ok {
+ return nil, fmt.Errorf("error parsing configuration list: invalid 'plugins' type %T", plug)
+ }
+ if len(plugins) == 0 {
+ return nil, fmt.Errorf("error parsing configuration list: no plugins in list")
+ }
+
+ for i, conf := range plugins {
+ newBytes, err := json.Marshal(conf)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal plugin config %d: %v", i, err)
+ }
+ netConf, err := ConfFromBytes(newBytes)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to parse plugin config %d: %v", i, err)
+ }
+ list.Plugins = append(list.Plugins, netConf)
+ }
+
+ return list, nil
+}
+
+func ConfListFromFile(filename string) (*NetworkConfigList, error) {
+ bytes, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, fmt.Errorf("error reading %s: %s", filename, err)
+ }
+ return ConfListFromBytes(bytes)
+}
+
+func ConfFiles(dir string, extensions []string) ([]string, error) {
+ // In part, adapted from rkt/networking/podenv.go#listFiles
+ files, err := ioutil.ReadDir(dir)
+ switch {
+ case err == nil: // break
+ case os.IsNotExist(err):
+ return nil, nil
+ default:
+ return nil, err
+ }
+
+ confFiles := []string{}
+ for _, f := range files {
+ if f.IsDir() {
+ continue
+ }
+ fileExt := filepath.Ext(f.Name())
+ for _, ext := range extensions {
+ if fileExt == ext {
+ confFiles = append(confFiles, filepath.Join(dir, f.Name()))
+ }
+ }
+ }
+ return confFiles, nil
+}
+
+func LoadConf(dir, name string) (*NetworkConfig, error) {
+ files, err := ConfFiles(dir, []string{".conf", ".json"})
+ switch {
+ case err != nil:
+ return nil, err
+ case len(files) == 0:
+ return nil, NoConfigsFoundError{Dir: dir}
+ }
+ sort.Strings(files)
+
+ for _, confFile := range files {
+ conf, err := ConfFromFile(confFile)
+ if err != nil {
+ return nil, err
+ }
+ if conf.Network.Name == name {
+ return conf, nil
+ }
+ }
+ return nil, NotFoundError{dir, name}
+}
+
+func LoadConfList(dir, name string) (*NetworkConfigList, error) {
+ files, err := ConfFiles(dir, []string{".conflist"})
+ if err != nil {
+ return nil, err
+ }
+ sort.Strings(files)
+
+ for _, confFile := range files {
+ conf, err := ConfListFromFile(confFile)
+ if err != nil {
+ return nil, err
+ }
+ if conf.Name == name {
+ return conf, nil
+ }
+ }
+
+ // Try and load a network configuration file (instead of list)
+ // from the same name, then upconvert.
+ singleConf, err := LoadConf(dir, name)
+ if err != nil {
+ // A little extra logic so the error makes sense
+ if _, ok := err.(NoConfigsFoundError); len(files) != 0 && ok {
+ // Config lists found but no config files found
+ return nil, NotFoundError{dir, name}
+ }
+
+ return nil, err
+ }
+ return ConfListFromConf(singleConf)
+}
+
+func InjectConf(original *NetworkConfig, newValues map[string]interface{}) (*NetworkConfig, error) {
+ config := make(map[string]interface{})
+ err := json.Unmarshal(original.Bytes, &config)
+ if err != nil {
+ return nil, fmt.Errorf("unmarshal existing network bytes: %s", err)
+ }
+
+ for key, value := range newValues {
+ if key == "" {
+ return nil, fmt.Errorf("keys cannot be empty")
+ }
+
+ if value == nil {
+ return nil, fmt.Errorf("key '%s' value must not be nil", key)
+ }
+
+ config[key] = value
+ }
+
+ newBytes, err := json.Marshal(config)
+ if err != nil {
+ return nil, err
+ }
+
+ return ConfFromBytes(newBytes)
+}
+
+// ConfListFromConf "upconverts" a network config in to a NetworkConfigList,
+// with the single network as the only entry in the list.
+func ConfListFromConf(original *NetworkConfig) (*NetworkConfigList, error) {
+ // Re-deserialize the config's json, then make a raw map configlist.
+ // This may seem a bit strange, but it's to make the Bytes fields
+ // actually make sense. Otherwise, the generated json is littered with
+ // golang default values.
+
+ rawConfig := make(map[string]interface{})
+ if err := json.Unmarshal(original.Bytes, &rawConfig); err != nil {
+ return nil, err
+ }
+
+ rawConfigList := map[string]interface{}{
+ "name": original.Network.Name,
+ "cniVersion": original.Network.CNIVersion,
+ "plugins": []interface{}{rawConfig},
+ }
+
+ b, err := json.Marshal(rawConfigList)
+ if err != nil {
+ return nil, err
+ }
+ return ConfListFromBytes(b)
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go
new file mode 100644
index 000000000..39b639723
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go
@@ -0,0 +1,82 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package invoke
+
+import (
+ "os"
+ "strings"
+)
+
+type CNIArgs interface {
+ // For use with os/exec; i.e., return nil to inherit the
+ // environment from this process
+ AsEnv() []string
+}
+
+type inherited struct{}
+
+var inheritArgsFromEnv inherited
+
+func (_ *inherited) AsEnv() []string {
+ return nil
+}
+
+func ArgsFromEnv() CNIArgs {
+ return &inheritArgsFromEnv
+}
+
+type Args struct {
+ Command string
+ ContainerID string
+ NetNS string
+ PluginArgs [][2]string
+ PluginArgsStr string
+ IfName string
+ Path string
+}
+
+// Args implements the CNIArgs interface
+var _ CNIArgs = &Args{}
+
+func (args *Args) AsEnv() []string {
+ env := os.Environ()
+ pluginArgsStr := args.PluginArgsStr
+ if pluginArgsStr == "" {
+ pluginArgsStr = stringify(args.PluginArgs)
+ }
+
+ // Ensure that the custom values are first, so any value present in
+ // the process environment won't override them.
+ env = append([]string{
+ "CNI_COMMAND=" + args.Command,
+ "CNI_CONTAINERID=" + args.ContainerID,
+ "CNI_NETNS=" + args.NetNS,
+ "CNI_ARGS=" + pluginArgsStr,
+ "CNI_IFNAME=" + args.IfName,
+ "CNI_PATH=" + args.Path,
+ }, env...)
+ return env
+}
+
+// taken from rkt/networking/net_plugin.go
+func stringify(pluginArgs [][2]string) string {
+ entries := make([]string, len(pluginArgs))
+
+ for i, kv := range pluginArgs {
+ entries[i] = strings.Join(kv[:], "=")
+ }
+
+ return strings.Join(entries, ";")
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go
new file mode 100644
index 000000000..c78a69eeb
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go
@@ -0,0 +1,53 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package invoke
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/containernetworking/cni/pkg/types"
+)
+
+func DelegateAdd(delegatePlugin string, netconf []byte) (types.Result, error) {
+ if os.Getenv("CNI_COMMAND") != "ADD" {
+ return nil, fmt.Errorf("CNI_COMMAND is not ADD")
+ }
+
+ paths := filepath.SplitList(os.Getenv("CNI_PATH"))
+
+ pluginPath, err := FindInPath(delegatePlugin, paths)
+ if err != nil {
+ return nil, err
+ }
+
+ return ExecPluginWithResult(pluginPath, netconf, ArgsFromEnv())
+}
+
+func DelegateDel(delegatePlugin string, netconf []byte) error {
+ if os.Getenv("CNI_COMMAND") != "DEL" {
+ return fmt.Errorf("CNI_COMMAND is not DEL")
+ }
+
+ paths := filepath.SplitList(os.Getenv("CNI_PATH"))
+
+ pluginPath, err := FindInPath(delegatePlugin, paths)
+ if err != nil {
+ return err
+ }
+
+ return ExecPluginWithoutResult(pluginPath, netconf, ArgsFromEnv())
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
new file mode 100644
index 000000000..fc47e7c82
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
@@ -0,0 +1,95 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package invoke
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/containernetworking/cni/pkg/types"
+ "github.com/containernetworking/cni/pkg/version"
+)
+
+func ExecPluginWithResult(pluginPath string, netconf []byte, args CNIArgs) (types.Result, error) {
+ return defaultPluginExec.WithResult(pluginPath, netconf, args)
+}
+
+func ExecPluginWithoutResult(pluginPath string, netconf []byte, args CNIArgs) error {
+ return defaultPluginExec.WithoutResult(pluginPath, netconf, args)
+}
+
+func GetVersionInfo(pluginPath string) (version.PluginInfo, error) {
+ return defaultPluginExec.GetVersionInfo(pluginPath)
+}
+
+var defaultPluginExec = &PluginExec{
+ RawExec: &RawExec{Stderr: os.Stderr},
+ VersionDecoder: &version.PluginDecoder{},
+}
+
+type PluginExec struct {
+ RawExec interface {
+ ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error)
+ }
+ VersionDecoder interface {
+ Decode(jsonBytes []byte) (version.PluginInfo, error)
+ }
+}
+
+func (e *PluginExec) WithResult(pluginPath string, netconf []byte, args CNIArgs) (types.Result, error) {
+ stdoutBytes, err := e.RawExec.ExecPlugin(pluginPath, netconf, args.AsEnv())
+ if err != nil {
+ return nil, err
+ }
+
+ // Plugin must return result in same version as specified in netconf
+ versionDecoder := &version.ConfigDecoder{}
+ confVersion, err := versionDecoder.Decode(netconf)
+ if err != nil {
+ return nil, err
+ }
+
+ return version.NewResult(confVersion, stdoutBytes)
+}
+
+func (e *PluginExec) WithoutResult(pluginPath string, netconf []byte, args CNIArgs) error {
+ _, err := e.RawExec.ExecPlugin(pluginPath, netconf, args.AsEnv())
+ return err
+}
+
+// GetVersionInfo returns the version information available about the plugin.
+// For recent-enough plugins, it uses the information returned by the VERSION
+// command. For older plugins which do not recognize that command, it reports
+// version 0.1.0
+func (e *PluginExec) GetVersionInfo(pluginPath string) (version.PluginInfo, error) {
+ args := &Args{
+ Command: "VERSION",
+
+ // set fake values required by plugins built against an older version of skel
+ NetNS: "dummy",
+ IfName: "dummy",
+ Path: "dummy",
+ }
+ stdin := []byte(fmt.Sprintf(`{"cniVersion":%q}`, version.Current()))
+ stdoutBytes, err := e.RawExec.ExecPlugin(pluginPath, stdin, args.AsEnv())
+ if err != nil {
+ if err.Error() == "unknown CNI_COMMAND: VERSION" {
+ return version.PluginSupports("0.1.0"), nil
+ }
+ return nil, err
+ }
+
+ return e.VersionDecoder.Decode(stdoutBytes)
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/find.go b/vendor/github.com/containernetworking/cni/pkg/invoke/find.go
new file mode 100644
index 000000000..e815404c8
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/find.go
@@ -0,0 +1,43 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package invoke
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+)
+
+// FindInPath returns the full path of the plugin by searching in the provided path
+func FindInPath(plugin string, paths []string) (string, error) {
+ if plugin == "" {
+ return "", fmt.Errorf("no plugin name provided")
+ }
+
+ if len(paths) == 0 {
+ return "", fmt.Errorf("no paths provided")
+ }
+
+ for _, path := range paths {
+ for _, fe := range ExecutableFileExtensions {
+ fullpath := filepath.Join(path, plugin) + fe
+ if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() {
+ return fullpath, nil
+ }
+ }
+ }
+
+ return "", fmt.Errorf("failed to find plugin %q in path %s", plugin, paths)
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go
new file mode 100644
index 000000000..bab5737a9
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go
@@ -0,0 +1,20 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build darwin dragonfly freebsd linux netbsd opensbd solaris
+
+package invoke
+
+// Valid file extensions for plugin executables.
+var ExecutableFileExtensions = []string{""}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go
new file mode 100644
index 000000000..7665125b1
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go
@@ -0,0 +1,18 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package invoke
+
+// Valid file extensions for plugin executables.
+var ExecutableFileExtensions = []string{".exe", ""}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go
new file mode 100644
index 000000000..93f1e75d9
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go
@@ -0,0 +1,59 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package invoke
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os/exec"
+
+ "github.com/containernetworking/cni/pkg/types"
+)
+
+type RawExec struct {
+ Stderr io.Writer
+}
+
+func (e *RawExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) {
+ stdout := &bytes.Buffer{}
+
+ c := exec.Cmd{
+ Env: environ,
+ Path: pluginPath,
+ Args: []string{pluginPath},
+ Stdin: bytes.NewBuffer(stdinData),
+ Stdout: stdout,
+ Stderr: e.Stderr,
+ }
+ if err := c.Run(); err != nil {
+ return nil, pluginErr(err, stdout.Bytes())
+ }
+
+ return stdout.Bytes(), nil
+}
+
+func pluginErr(err error, output []byte) error {
+ if _, ok := err.(*exec.ExitError); ok {
+ emsg := types.Error{}
+ if perr := json.Unmarshal(output, &emsg); perr != nil {
+ emsg.Msg = fmt.Sprintf("netplugin failed but error parsing its diagnostic message %q: %v", string(output), perr)
+ }
+ return &emsg
+ }
+
+ return err
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go
new file mode 100644
index 000000000..2833aba78
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go
@@ -0,0 +1,135 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types020
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+ "os"
+
+ "github.com/containernetworking/cni/pkg/types"
+)
+
+const ImplementedSpecVersion string = "0.2.0"
+
+var SupportedVersions = []string{"", "0.1.0", ImplementedSpecVersion}
+
+// Compatibility types for CNI version 0.1.0 and 0.2.0
+
+func NewResult(data []byte) (types.Result, error) {
+ result := &Result{}
+ if err := json.Unmarshal(data, result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+func GetResult(r types.Result) (*Result, error) {
+ // We expect version 0.1.0/0.2.0 results
+ result020, err := r.GetAsVersion(ImplementedSpecVersion)
+ if err != nil {
+ return nil, err
+ }
+ result, ok := result020.(*Result)
+ if !ok {
+ return nil, fmt.Errorf("failed to convert result")
+ }
+ return result, nil
+}
+
+// Result is what gets returned from the plugin (via stdout) to the caller
+type Result struct {
+ CNIVersion string `json:"cniVersion,omitempty"`
+ IP4 *IPConfig `json:"ip4,omitempty"`
+ IP6 *IPConfig `json:"ip6,omitempty"`
+ DNS types.DNS `json:"dns,omitempty"`
+}
+
+func (r *Result) Version() string {
+ return ImplementedSpecVersion
+}
+
+func (r *Result) GetAsVersion(version string) (types.Result, error) {
+ for _, supportedVersion := range SupportedVersions {
+ if version == supportedVersion {
+ r.CNIVersion = version
+ return r, nil
+ }
+ }
+ return nil, fmt.Errorf("cannot convert version %q to %s", SupportedVersions, version)
+}
+
+func (r *Result) Print() error {
+ data, err := json.MarshalIndent(r, "", " ")
+ if err != nil {
+ return err
+ }
+ _, err = os.Stdout.Write(data)
+ return err
+}
+
+// String returns a formatted string in the form of "[IP4: $1,][ IP6: $2,] DNS: $3" where
+// $1 represents the receiver's IPv4, $2 represents the receiver's IPv6 and $3 the
+// receiver's DNS. If $1 or $2 are nil, they won't be present in the returned string.
+func (r *Result) String() string {
+ var str string
+ if r.IP4 != nil {
+ str = fmt.Sprintf("IP4:%+v, ", *r.IP4)
+ }
+ if r.IP6 != nil {
+ str += fmt.Sprintf("IP6:%+v, ", *r.IP6)
+ }
+ return fmt.Sprintf("%sDNS:%+v", str, r.DNS)
+}
+
+// IPConfig contains values necessary to configure an interface
+type IPConfig struct {
+ IP net.IPNet
+ Gateway net.IP
+ Routes []types.Route
+}
+
+// net.IPNet is not JSON (un)marshallable so this duality is needed
+// for our custom IPNet type
+
+// JSON (un)marshallable types
+type ipConfig struct {
+ IP types.IPNet `json:"ip"`
+ Gateway net.IP `json:"gateway,omitempty"`
+ Routes []types.Route `json:"routes,omitempty"`
+}
+
+func (c *IPConfig) MarshalJSON() ([]byte, error) {
+ ipc := ipConfig{
+ IP: types.IPNet(c.IP),
+ Gateway: c.Gateway,
+ Routes: c.Routes,
+ }
+
+ return json.Marshal(ipc)
+}
+
+func (c *IPConfig) UnmarshalJSON(data []byte) error {
+ ipc := ipConfig{}
+ if err := json.Unmarshal(data, &ipc); err != nil {
+ return err
+ }
+
+ c.IP = net.IPNet(ipc.IP)
+ c.Gateway = ipc.Gateway
+ c.Routes = ipc.Routes
+ return nil
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/args.go b/vendor/github.com/containernetworking/cni/pkg/types/args.go
new file mode 100644
index 000000000..bd8640fc9
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/types/args.go
@@ -0,0 +1,112 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// UnmarshallableBool typedef for builtin bool
+// because builtin type's methods can't be declared
+type UnmarshallableBool bool
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// Returns boolean true if the string is "1" or "[Tt]rue"
+// Returns boolean false if the string is "0" or "[Ff]alse"
+func (b *UnmarshallableBool) UnmarshalText(data []byte) error {
+ s := strings.ToLower(string(data))
+ switch s {
+ case "1", "true":
+ *b = true
+ case "0", "false":
+ *b = false
+ default:
+ return fmt.Errorf("Boolean unmarshal error: invalid input %s", s)
+ }
+ return nil
+}
+
+// UnmarshallableString typedef for builtin string
+type UnmarshallableString string
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// Returns the string
+func (s *UnmarshallableString) UnmarshalText(data []byte) error {
+ *s = UnmarshallableString(data)
+ return nil
+}
+
+// CommonArgs contains the IgnoreUnknown argument
+// and must be embedded by all Arg structs
+type CommonArgs struct {
+ IgnoreUnknown UnmarshallableBool `json:"ignoreunknown,omitempty"`
+}
+
+// GetKeyField is a helper function to receive Values
+// Values that represent a pointer to a struct
+func GetKeyField(keyString string, v reflect.Value) reflect.Value {
+ return v.Elem().FieldByName(keyString)
+}
+
+// UnmarshalableArgsError is used to indicate error unmarshalling args
+// from the args-string in the form "K=V;K2=V2;..."
+type UnmarshalableArgsError struct {
+ error
+}
+
+// LoadArgs parses args from a string in the form "K=V;K2=V2;..."
+func LoadArgs(args string, container interface{}) error {
+ if args == "" {
+ return nil
+ }
+
+ containerValue := reflect.ValueOf(container)
+
+ pairs := strings.Split(args, ";")
+ unknownArgs := []string{}
+ for _, pair := range pairs {
+ kv := strings.Split(pair, "=")
+ if len(kv) != 2 {
+ return fmt.Errorf("ARGS: invalid pair %q", pair)
+ }
+ keyString := kv[0]
+ valueString := kv[1]
+ keyField := GetKeyField(keyString, containerValue)
+ if !keyField.IsValid() {
+ unknownArgs = append(unknownArgs, pair)
+ continue
+ }
+ keyFieldIface := keyField.Addr().Interface()
+ u, ok := keyFieldIface.(encoding.TextUnmarshaler)
+ if !ok {
+ return UnmarshalableArgsError{fmt.Errorf(
+ "ARGS: cannot unmarshal into field '%s' - type '%s' does not implement encoding.TextUnmarshaler",
+ keyString, reflect.TypeOf(keyFieldIface))}
+ }
+ err := u.UnmarshalText([]byte(valueString))
+ if err != nil {
+ return fmt.Errorf("ARGS: error parsing value of pair %q: %v)", pair, err)
+ }
+ }
+
+ isIgnoreUnknown := GetKeyField("IgnoreUnknown", containerValue).Bool()
+ if len(unknownArgs) > 0 && !isIgnoreUnknown {
+ return fmt.Errorf("ARGS: unknown args %q", unknownArgs)
+ }
+ return nil
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/current/types.go b/vendor/github.com/containernetworking/cni/pkg/types/current/types.go
new file mode 100644
index 000000000..caac92ba7
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/types/current/types.go
@@ -0,0 +1,300 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package current
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+ "os"
+
+ "github.com/containernetworking/cni/pkg/types"
+ "github.com/containernetworking/cni/pkg/types/020"
+)
+
+const ImplementedSpecVersion string = "0.3.1"
+
+var SupportedVersions = []string{"0.3.0", ImplementedSpecVersion}
+
+func NewResult(data []byte) (types.Result, error) {
+ result := &Result{}
+ if err := json.Unmarshal(data, result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+func GetResult(r types.Result) (*Result, error) {
+ resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion)
+ if err != nil {
+ return nil, err
+ }
+ result, ok := resultCurrent.(*Result)
+ if !ok {
+ return nil, fmt.Errorf("failed to convert result")
+ }
+ return result, nil
+}
+
+var resultConverters = []struct {
+ versions []string
+ convert func(types.Result) (*Result, error)
+}{
+ {types020.SupportedVersions, convertFrom020},
+ {SupportedVersions, convertFrom030},
+}
+
+func convertFrom020(result types.Result) (*Result, error) {
+ oldResult, err := types020.GetResult(result)
+ if err != nil {
+ return nil, err
+ }
+
+ newResult := &Result{
+ CNIVersion: ImplementedSpecVersion,
+ DNS: oldResult.DNS,
+ Routes: []*types.Route{},
+ }
+
+ if oldResult.IP4 != nil {
+ newResult.IPs = append(newResult.IPs, &IPConfig{
+ Version: "4",
+ Address: oldResult.IP4.IP,
+ Gateway: oldResult.IP4.Gateway,
+ })
+ for _, route := range oldResult.IP4.Routes {
+ gw := route.GW
+ if gw == nil {
+ gw = oldResult.IP4.Gateway
+ }
+ newResult.Routes = append(newResult.Routes, &types.Route{
+ Dst: route.Dst,
+ GW: gw,
+ })
+ }
+ }
+
+ if oldResult.IP6 != nil {
+ newResult.IPs = append(newResult.IPs, &IPConfig{
+ Version: "6",
+ Address: oldResult.IP6.IP,
+ Gateway: oldResult.IP6.Gateway,
+ })
+ for _, route := range oldResult.IP6.Routes {
+ gw := route.GW
+ if gw == nil {
+ gw = oldResult.IP6.Gateway
+ }
+ newResult.Routes = append(newResult.Routes, &types.Route{
+ Dst: route.Dst,
+ GW: gw,
+ })
+ }
+ }
+
+ if len(newResult.IPs) == 0 {
+ return nil, fmt.Errorf("cannot convert: no valid IP addresses")
+ }
+
+ return newResult, nil
+}
+
+func convertFrom030(result types.Result) (*Result, error) {
+ newResult, ok := result.(*Result)
+ if !ok {
+ return nil, fmt.Errorf("failed to convert result")
+ }
+ newResult.CNIVersion = ImplementedSpecVersion
+ return newResult, nil
+}
+
+func NewResultFromResult(result types.Result) (*Result, error) {
+ version := result.Version()
+ for _, converter := range resultConverters {
+ for _, supportedVersion := range converter.versions {
+ if version == supportedVersion {
+ return converter.convert(result)
+ }
+ }
+ }
+ return nil, fmt.Errorf("unsupported CNI result22 version %q", version)
+}
+
+// Result is what gets returned from the plugin (via stdout) to the caller
+type Result struct {
+ CNIVersion string `json:"cniVersion,omitempty"`
+ Interfaces []*Interface `json:"interfaces,omitempty"`
+ IPs []*IPConfig `json:"ips,omitempty"`
+ Routes []*types.Route `json:"routes,omitempty"`
+ DNS types.DNS `json:"dns,omitempty"`
+}
+
+// Convert to the older 0.2.0 CNI spec Result type
+func (r *Result) convertTo020() (*types020.Result, error) {
+ oldResult := &types020.Result{
+ CNIVersion: types020.ImplementedSpecVersion,
+ DNS: r.DNS,
+ }
+
+ for _, ip := range r.IPs {
+ // Only convert the first IP address of each version as 0.2.0
+ // and earlier cannot handle multiple IP addresses
+ if ip.Version == "4" && oldResult.IP4 == nil {
+ oldResult.IP4 = &types020.IPConfig{
+ IP: ip.Address,
+ Gateway: ip.Gateway,
+ }
+ } else if ip.Version == "6" && oldResult.IP6 == nil {
+ oldResult.IP6 = &types020.IPConfig{
+ IP: ip.Address,
+ Gateway: ip.Gateway,
+ }
+ }
+
+ if oldResult.IP4 != nil && oldResult.IP6 != nil {
+ break
+ }
+ }
+
+ for _, route := range r.Routes {
+ is4 := route.Dst.IP.To4() != nil
+ if is4 && oldResult.IP4 != nil {
+ oldResult.IP4.Routes = append(oldResult.IP4.Routes, types.Route{
+ Dst: route.Dst,
+ GW: route.GW,
+ })
+ } else if !is4 && oldResult.IP6 != nil {
+ oldResult.IP6.Routes = append(oldResult.IP6.Routes, types.Route{
+ Dst: route.Dst,
+ GW: route.GW,
+ })
+ }
+ }
+
+ if oldResult.IP4 == nil && oldResult.IP6 == nil {
+ return nil, fmt.Errorf("cannot convert: no valid IP addresses")
+ }
+
+ return oldResult, nil
+}
+
+func (r *Result) Version() string {
+ return ImplementedSpecVersion
+}
+
+func (r *Result) GetAsVersion(version string) (types.Result, error) {
+ switch version {
+ case "0.3.0", ImplementedSpecVersion:
+ r.CNIVersion = version
+ return r, nil
+ case types020.SupportedVersions[0], types020.SupportedVersions[1], types020.SupportedVersions[2]:
+ return r.convertTo020()
+ }
+ return nil, fmt.Errorf("cannot convert version 0.3.x to %q", version)
+}
+
+func (r *Result) Print() error {
+ data, err := json.MarshalIndent(r, "", " ")
+ if err != nil {
+ return err
+ }
+ _, err = os.Stdout.Write(data)
+ return err
+}
+
+// String returns a formatted string in the form of "[Interfaces: $1,][ IP: $2,] DNS: $3" where
+// $1 represents the receiver's Interfaces, $2 represents the receiver's IP addresses and $3 the
+// receiver's DNS. If $1 or $2 are nil, they won't be present in the returned string.
+func (r *Result) String() string {
+ var str string
+ if len(r.Interfaces) > 0 {
+ str += fmt.Sprintf("Interfaces:%+v, ", r.Interfaces)
+ }
+ if len(r.IPs) > 0 {
+ str += fmt.Sprintf("IP:%+v, ", r.IPs)
+ }
+ if len(r.Routes) > 0 {
+ str += fmt.Sprintf("Routes:%+v, ", r.Routes)
+ }
+ return fmt.Sprintf("%sDNS:%+v", str, r.DNS)
+}
+
+// Convert this old version result to the current CNI version result
+func (r *Result) Convert() (*Result, error) {
+ return r, nil
+}
+
+// Interface contains values about the created interfaces
+type Interface struct {
+ Name string `json:"name"`
+ Mac string `json:"mac,omitempty"`
+ Sandbox string `json:"sandbox,omitempty"`
+}
+
+func (i *Interface) String() string {
+ return fmt.Sprintf("%+v", *i)
+}
+
+// Int returns a pointer to the int value passed in. Used to
+// set the IPConfig.Interface field.
+func Int(v int) *int {
+ return &v
+}
+
+// IPConfig contains values necessary to configure an IP address on an interface
+type IPConfig struct {
+ // IP version, either "4" or "6"
+ Version string
+ // Index into Result structs Interfaces list
+ Interface *int
+ Address net.IPNet
+ Gateway net.IP
+}
+
+func (i *IPConfig) String() string {
+ return fmt.Sprintf("%+v", *i)
+}
+
+// JSON (un)marshallable types
+type ipConfig struct {
+ Version string `json:"version"`
+ Interface *int `json:"interface,omitempty"`
+ Address types.IPNet `json:"address"`
+ Gateway net.IP `json:"gateway,omitempty"`
+}
+
+func (c *IPConfig) MarshalJSON() ([]byte, error) {
+ ipc := ipConfig{
+ Version: c.Version,
+ Interface: c.Interface,
+ Address: types.IPNet(c.Address),
+ Gateway: c.Gateway,
+ }
+
+ return json.Marshal(ipc)
+}
+
+func (c *IPConfig) UnmarshalJSON(data []byte) error {
+ ipc := ipConfig{}
+ if err := json.Unmarshal(data, &ipc); err != nil {
+ return err
+ }
+
+ c.Version = ipc.Version
+ c.Interface = ipc.Interface
+ c.Address = net.IPNet(ipc.Address)
+ c.Gateway = ipc.Gateway
+ return nil
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go
new file mode 100644
index 000000000..641275600
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go
@@ -0,0 +1,189 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net"
+ "os"
+)
+
+// like net.IPNet but adds JSON marshalling and unmarshalling
+type IPNet net.IPNet
+
+// ParseCIDR takes a string like "10.2.3.1/24" and
+// return IPNet with "10.2.3.1" and /24 mask
+func ParseCIDR(s string) (*net.IPNet, error) {
+ ip, ipn, err := net.ParseCIDR(s)
+ if err != nil {
+ return nil, err
+ }
+
+ ipn.IP = ip
+ return ipn, nil
+}
+
+func (n IPNet) MarshalJSON() ([]byte, error) {
+ return json.Marshal((*net.IPNet)(&n).String())
+}
+
+func (n *IPNet) UnmarshalJSON(data []byte) error {
+ var s string
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+
+ tmp, err := ParseCIDR(s)
+ if err != nil {
+ return err
+ }
+
+ *n = IPNet(*tmp)
+ return nil
+}
+
+// NetConf describes a network.
+type NetConf struct {
+ CNIVersion string `json:"cniVersion,omitempty"`
+
+ Name string `json:"name,omitempty"`
+ Type string `json:"type,omitempty"`
+ Capabilities map[string]bool `json:"capabilities,omitempty"`
+ IPAM struct {
+ Type string `json:"type,omitempty"`
+ } `json:"ipam,omitempty"`
+ DNS DNS `json:"dns"`
+}
+
+// NetConfList describes an ordered list of networks.
+type NetConfList struct {
+ CNIVersion string `json:"cniVersion,omitempty"`
+
+ Name string `json:"name,omitempty"`
+ Plugins []*NetConf `json:"plugins,omitempty"`
+}
+
+type ResultFactoryFunc func([]byte) (Result, error)
+
+// Result is an interface that provides the result of plugin execution
+type Result interface {
+ // The highest CNI specification result verison the result supports
+ // without having to convert
+ Version() string
+
+ // Returns the result converted into the requested CNI specification
+ // result version, or an error if conversion failed
+ GetAsVersion(version string) (Result, error)
+
+ // Prints the result in JSON format to stdout
+ Print() error
+
+ // Returns a JSON string representation of the result
+ String() string
+}
+
+func PrintResult(result Result, version string) error {
+ newResult, err := result.GetAsVersion(version)
+ if err != nil {
+ return err
+ }
+ return newResult.Print()
+}
+
+// DNS contains values interesting for DNS resolvers
+type DNS struct {
+ Nameservers []string `json:"nameservers,omitempty"`
+ Domain string `json:"domain,omitempty"`
+ Search []string `json:"search,omitempty"`
+ Options []string `json:"options,omitempty"`
+}
+
+type Route struct {
+ Dst net.IPNet
+ GW net.IP
+}
+
+func (r *Route) String() string {
+ return fmt.Sprintf("%+v", *r)
+}
+
+// Well known error codes
+// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes
+const (
+ ErrUnknown uint = iota // 0
+ ErrIncompatibleCNIVersion // 1
+ ErrUnsupportedField // 2
+)
+
+type Error struct {
+ Code uint `json:"code"`
+ Msg string `json:"msg"`
+ Details string `json:"details,omitempty"`
+}
+
+func (e *Error) Error() string {
+ details := ""
+ if e.Details != "" {
+ details = fmt.Sprintf("; %v", e.Details)
+ }
+ return fmt.Sprintf("%v%v", e.Msg, details)
+}
+
+func (e *Error) Print() error {
+ return prettyPrint(e)
+}
+
+// net.IPNet is not JSON (un)marshallable so this duality is needed
+// for our custom IPNet type
+
+// JSON (un)marshallable types
+type route struct {
+ Dst IPNet `json:"dst"`
+ GW net.IP `json:"gw,omitempty"`
+}
+
+func (r *Route) UnmarshalJSON(data []byte) error {
+ rt := route{}
+ if err := json.Unmarshal(data, &rt); err != nil {
+ return err
+ }
+
+ r.Dst = net.IPNet(rt.Dst)
+ r.GW = rt.GW
+ return nil
+}
+
+func (r *Route) MarshalJSON() ([]byte, error) {
+ rt := route{
+ Dst: IPNet(r.Dst),
+ GW: r.GW,
+ }
+
+ return json.Marshal(rt)
+}
+
+func prettyPrint(obj interface{}) error {
+ data, err := json.MarshalIndent(obj, "", " ")
+ if err != nil {
+ return err
+ }
+ _, err = os.Stdout.Write(data)
+ return err
+}
+
+// NotImplementedError is used to indicate that a method is not implemented for the given platform
+var NotImplementedError = errors.New("Not Implemented")
diff --git a/vendor/github.com/containernetworking/cni/pkg/version/conf.go b/vendor/github.com/containernetworking/cni/pkg/version/conf.go
new file mode 100644
index 000000000..3cca58bbe
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/version/conf.go
@@ -0,0 +1,37 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// ConfigDecoder can decode the CNI version available in network config data
+type ConfigDecoder struct{}
+
+func (*ConfigDecoder) Decode(jsonBytes []byte) (string, error) {
+ var conf struct {
+ CNIVersion string `json:"cniVersion"`
+ }
+ err := json.Unmarshal(jsonBytes, &conf)
+ if err != nil {
+ return "", fmt.Errorf("decoding version from network config: %s", err)
+ }
+ if conf.CNIVersion == "" {
+ return "0.1.0", nil
+ }
+ return conf.CNIVersion, nil
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go
new file mode 100644
index 000000000..8a4672810
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go
@@ -0,0 +1,81 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+)
+
+// PluginInfo reports information about CNI versioning
+type PluginInfo interface {
+ // SupportedVersions returns one or more CNI spec versions that the plugin
+ // supports. If input is provided in one of these versions, then the plugin
+ // promises to use the same CNI version in its response
+ SupportedVersions() []string
+
+ // Encode writes this CNI version information as JSON to the given Writer
+ Encode(io.Writer) error
+}
+
+type pluginInfo struct {
+ CNIVersion_ string `json:"cniVersion"`
+ SupportedVersions_ []string `json:"supportedVersions,omitempty"`
+}
+
+// pluginInfo implements the PluginInfo interface
+var _ PluginInfo = &pluginInfo{}
+
+func (p *pluginInfo) Encode(w io.Writer) error {
+ return json.NewEncoder(w).Encode(p)
+}
+
+func (p *pluginInfo) SupportedVersions() []string {
+ return p.SupportedVersions_
+}
+
+// PluginSupports returns a new PluginInfo that will report the given versions
+// as supported
+func PluginSupports(supportedVersions ...string) PluginInfo {
+ if len(supportedVersions) < 1 {
+ panic("programmer error: you must support at least one version")
+ }
+ return &pluginInfo{
+ CNIVersion_: Current(),
+ SupportedVersions_: supportedVersions,
+ }
+}
+
+// PluginDecoder can decode the response returned by a plugin's VERSION command
+type PluginDecoder struct{}
+
+func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) {
+ var info pluginInfo
+ err := json.Unmarshal(jsonBytes, &info)
+ if err != nil {
+ return nil, fmt.Errorf("decoding version info: %s", err)
+ }
+ if info.CNIVersion_ == "" {
+ return nil, fmt.Errorf("decoding version info: missing field cniVersion")
+ }
+ if len(info.SupportedVersions_) == 0 {
+ if info.CNIVersion_ == "0.2.0" {
+ return PluginSupports("0.1.0", "0.2.0"), nil
+ }
+ return nil, fmt.Errorf("decoding version info: missing field supportedVersions")
+ }
+ return &info, nil
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go b/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go
new file mode 100644
index 000000000..25c3810b2
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go
@@ -0,0 +1,49 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import "fmt"
+
+type ErrorIncompatible struct {
+ Config string
+ Supported []string
+}
+
+func (e *ErrorIncompatible) Details() string {
+ return fmt.Sprintf("config is %q, plugin supports %q", e.Config, e.Supported)
+}
+
+func (e *ErrorIncompatible) Error() string {
+ return fmt.Sprintf("incompatible CNI versions: %s", e.Details())
+}
+
+type Reconciler struct{}
+
+func (r *Reconciler) Check(configVersion string, pluginInfo PluginInfo) *ErrorIncompatible {
+ return r.CheckRaw(configVersion, pluginInfo.SupportedVersions())
+}
+
+func (*Reconciler) CheckRaw(configVersion string, supportedVersions []string) *ErrorIncompatible {
+ for _, supportedVersion := range supportedVersions {
+ if configVersion == supportedVersion {
+ return nil
+ }
+ }
+
+ return &ErrorIncompatible{
+ Config: configVersion,
+ Supported: supportedVersions,
+ }
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/version/version.go b/vendor/github.com/containernetworking/cni/pkg/version/version.go
new file mode 100644
index 000000000..efe8ea871
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/version/version.go
@@ -0,0 +1,61 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "fmt"
+
+ "github.com/containernetworking/cni/pkg/types"
+ "github.com/containernetworking/cni/pkg/types/020"
+ "github.com/containernetworking/cni/pkg/types/current"
+)
+
+// Current reports the version of the CNI spec implemented by this library
+func Current() string {
+ return "0.3.1"
+}
+
+// Legacy PluginInfo describes a plugin that is backwards compatible with the
+// CNI spec version 0.1.0. In particular, a runtime compiled against the 0.1.0
+// library ought to work correctly with a plugin that reports support for
+// Legacy versions.
+//
+// Any future CNI spec versions which meet this definition should be added to
+// this list.
+var Legacy = PluginSupports("0.1.0", "0.2.0")
+var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1")
+
+var resultFactories = []struct {
+ supportedVersions []string
+ newResult types.ResultFactoryFunc
+}{
+ {current.SupportedVersions, current.NewResult},
+ {types020.SupportedVersions, types020.NewResult},
+}
+
+// Finds a Result object matching the requested version (if any) and asks
+// that object to parse the plugin result, returning an error if parsing failed.
+func NewResult(version string, resultBytes []byte) (types.Result, error) {
+ reconciler := &Reconciler{}
+ for _, resultFactory := range resultFactories {
+ err := reconciler.CheckRaw(version, resultFactory.supportedVersions)
+ if err == nil {
+ // Result supports this version
+ return resultFactory.newResult(resultBytes)
+ }
+ }
+
+ return nil, fmt.Errorf("unsupported CNI result version %q", version)
+}
diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/README.md b/vendor/github.com/containernetworking/plugins/pkg/ns/README.md
new file mode 100644
index 000000000..c0f5cf2e8
--- /dev/null
+++ b/vendor/github.com/containernetworking/plugins/pkg/ns/README.md
@@ -0,0 +1,40 @@
+### Namespaces, Threads, and Go
+On Linux each OS thread can have a different network namespace. Go's thread scheduling model switches goroutines between OS threads based on OS thread load and whether the goroutine would block other goroutines. This can result in a goroutine switching network namespaces without notice and lead to errors in your code.
+
+### Namespace Switching
+Switching namespaces with the `ns.Set()` method is not recommended without additional strategies to prevent unexpected namespace changes when your goroutines switch OS threads.
+
+Go provides the `runtime.LockOSThread()` function to ensure a specific goroutine executes on its current OS thread and prevents any other goroutine from running in that thread until the locked one exits. Careful usage of `LockOSThread()` and goroutines can provide good control over which network namespace a given goroutine executes in.
+
+For example, you cannot rely on the `ns.Set()` namespace being the current namespace after the `Set()` call unless you do two things. First, the goroutine calling `Set()` must have previously called `LockOSThread()`. Second, you must ensure `runtime.UnlockOSThread()` is not called somewhere in-between. You also cannot rely on the initial network namespace remaining the current network namespace if any other code in your program switches namespaces, unless you have already called `LockOSThread()` in that goroutine. Note that `LockOSThread()` prevents the Go scheduler from optimally scheduling goroutines for best performance, so `LockOSThread()` should only be used in small, isolated goroutines that release the lock quickly.
+
+### Do() The Recommended Thing
+The `ns.Do()` method provides **partial** control over network namespaces for you by implementing these strategies. All code dependent on a particular network namespace (including the root namespace) should be wrapped in the `ns.Do()` method to ensure the correct namespace is selected for the duration of your code. For example:
+
+```go
+targetNs, err := ns.NewNS()
+if err != nil {
+ return err
+}
+err = targetNs.Do(func(hostNs ns.NetNS) error {
+ dummy := &netlink.Dummy{
+ LinkAttrs: netlink.LinkAttrs{
+ Name: "dummy0",
+ },
+ }
+ return netlink.LinkAdd(dummy)
+})
+```
+
+Note this requirement to wrap every network call is very onerous - any libraries you call might call out to network services such as DNS, and all such calls need to be protected after you call `ns.Do()`. The CNI plugins all exit very soon after calling `ns.Do()` which helps to minimize the problem.
+
+Also: If the runtime spawns a new OS thread, it will inherit the network namespace of the parent thread, which may have been temporarily switched, and thus the new OS thread will be permanently "stuck in the wrong namespace".
+
+In short, **there is no safe way to change network namespaces from within a long-lived, multithreaded Go process**. If your daemon process needs to be namespace aware, consider spawning a separate process (like a CNI plugin) for each namespace.
+
+### Further Reading
+ - https://github.com/golang/go/wiki/LockOSThread
+ - http://morsmachine.dk/go-scheduler
+ - https://github.com/containernetworking/cni/issues/262
+ - https://golang.org/pkg/runtime/
+ - https://www.weave.works/blog/linux-namespaces-and-go-don-t-mix
diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/ns.go b/vendor/github.com/containernetworking/plugins/pkg/ns/ns.go
new file mode 100644
index 000000000..c212f4893
--- /dev/null
+++ b/vendor/github.com/containernetworking/plugins/pkg/ns/ns.go
@@ -0,0 +1,178 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ns
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "sync"
+ "syscall"
+)
+
+type NetNS interface {
+ // Executes the passed closure in this object's network namespace,
+ // attempting to restore the original namespace before returning.
+ // However, since each OS thread can have a different network namespace,
+ // and Go's thread scheduling is highly variable, callers cannot
+ // guarantee any specific namespace is set unless operations that
+ // require that namespace are wrapped with Do(). Also, no code called
+ // from Do() should call runtime.UnlockOSThread(), or the risk
+ // of executing code in an incorrect namespace will be greater. See
+ // https://github.com/golang/go/wiki/LockOSThread for further details.
+ Do(toRun func(NetNS) error) error
+
+ // Sets the current network namespace to this object's network namespace.
+ // Note that since Go's thread scheduling is highly variable, callers
+ // cannot guarantee the requested namespace will be the current namespace
+ // after this function is called; to ensure this wrap operations that
+ // require the namespace with Do() instead.
+ Set() error
+
+ // Returns the filesystem path representing this object's network namespace
+ Path() string
+
+ // Returns a file descriptor representing this object's network namespace
+ Fd() uintptr
+
+ // Cleans up this instance of the network namespace; if this instance
+ // is the last user the namespace will be destroyed
+ Close() error
+}
+
+type netNS struct {
+ file *os.File
+ mounted bool
+ closed bool
+}
+
+// netNS implements the NetNS interface
+var _ NetNS = &netNS{}
+
+const (
+ // https://github.com/torvalds/linux/blob/master/include/uapi/linux/magic.h
+ NSFS_MAGIC = 0x6e736673
+ PROCFS_MAGIC = 0x9fa0
+)
+
+type NSPathNotExistErr struct{ msg string }
+
+func (e NSPathNotExistErr) Error() string { return e.msg }
+
+type NSPathNotNSErr struct{ msg string }
+
+func (e NSPathNotNSErr) Error() string { return e.msg }
+
+func IsNSorErr(nspath string) error {
+ stat := syscall.Statfs_t{}
+ if err := syscall.Statfs(nspath, &stat); err != nil {
+ if os.IsNotExist(err) {
+ err = NSPathNotExistErr{msg: fmt.Sprintf("failed to Statfs %q: %v", nspath, err)}
+ } else {
+ err = fmt.Errorf("failed to Statfs %q: %v", nspath, err)
+ }
+ return err
+ }
+
+ switch stat.Type {
+ case PROCFS_MAGIC, NSFS_MAGIC:
+ return nil
+ default:
+ return NSPathNotNSErr{msg: fmt.Sprintf("unknown FS magic on %q: %x", nspath, stat.Type)}
+ }
+}
+
+// Returns an object representing the namespace referred to by @path
+func GetNS(nspath string) (NetNS, error) {
+ err := IsNSorErr(nspath)
+ if err != nil {
+ return nil, err
+ }
+
+ fd, err := os.Open(nspath)
+ if err != nil {
+ return nil, err
+ }
+
+ return &netNS{file: fd}, nil
+}
+
+func (ns *netNS) Path() string {
+ return ns.file.Name()
+}
+
+func (ns *netNS) Fd() uintptr {
+ return ns.file.Fd()
+}
+
+func (ns *netNS) errorIfClosed() error {
+ if ns.closed {
+ return fmt.Errorf("%q has already been closed", ns.file.Name())
+ }
+ return nil
+}
+
+func (ns *netNS) Do(toRun func(NetNS) error) error {
+ if err := ns.errorIfClosed(); err != nil {
+ return err
+ }
+
+ containedCall := func(hostNS NetNS) error {
+ threadNS, err := GetCurrentNS()
+ if err != nil {
+ return fmt.Errorf("failed to open current netns: %v", err)
+ }
+ defer threadNS.Close()
+
+ // switch to target namespace
+ if err = ns.Set(); err != nil {
+ return fmt.Errorf("error switching to ns %v: %v", ns.file.Name(), err)
+ }
+ defer threadNS.Set() // switch back
+
+ return toRun(hostNS)
+ }
+
+ // save a handle to current network namespace
+ hostNS, err := GetCurrentNS()
+ if err != nil {
+ return fmt.Errorf("Failed to open current namespace: %v", err)
+ }
+ defer hostNS.Close()
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ var innerError error
+ go func() {
+ defer wg.Done()
+ runtime.LockOSThread()
+ innerError = containedCall(hostNS)
+ }()
+ wg.Wait()
+
+ return innerError
+}
+
+// WithNetNSPath executes the passed closure under the given network
+// namespace, restoring the original namespace afterwards.
+func WithNetNSPath(nspath string, toRun func(NetNS) error) error {
+ ns, err := GetNS(nspath)
+ if err != nil {
+ return err
+ }
+ defer ns.Close()
+ return ns.Do(toRun)
+}
diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
new file mode 100644
index 000000000..8949d21b5
--- /dev/null
+++ b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
@@ -0,0 +1,149 @@
+// Copyright 2015-2017 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ns
+
+import (
+ "crypto/rand"
+ "fmt"
+ "os"
+ "path"
+ "runtime"
+ "sync"
+
+ "golang.org/x/sys/unix"
+)
+
+// Returns an object representing the current OS thread's network namespace
+func GetCurrentNS() (NetNS, error) {
+ return GetNS(getCurrentThreadNetNSPath())
+}
+
+func getCurrentThreadNetNSPath() string {
+ // /proc/self/ns/net returns the namespace of the main thread, not
+ // of whatever thread this goroutine is running on. Make sure we
+ // use the thread's net namespace since the thread is switching around
+ return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid())
+}
+
+// Creates a new persistent network namespace and returns an object
+// representing that namespace, without switching to it
+func NewNS() (NetNS, error) {
+ const nsRunDir = "/var/run/netns"
+
+ b := make([]byte, 16)
+ _, err := rand.Reader.Read(b)
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate random netns name: %v", err)
+ }
+
+ err = os.MkdirAll(nsRunDir, 0755)
+ if err != nil {
+ return nil, err
+ }
+
+ // create an empty file at the mount point
+ nsName := fmt.Sprintf("cni-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
+ nsPath := path.Join(nsRunDir, nsName)
+ mountPointFd, err := os.Create(nsPath)
+ if err != nil {
+ return nil, err
+ }
+ mountPointFd.Close()
+
+ // Ensure the mount point is cleaned up on errors; if the namespace
+ // was successfully mounted this will have no effect because the file
+ // is in-use
+ defer os.RemoveAll(nsPath)
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ // do namespace work in a dedicated goroutine, so that we can safely
+ // Lock/Unlock OSThread without upsetting the lock/unlock state of
+ // the caller of this function
+ var fd *os.File
+ go (func() {
+ defer wg.Done()
+ runtime.LockOSThread()
+
+ var origNS NetNS
+ origNS, err = GetNS(getCurrentThreadNetNSPath())
+ if err != nil {
+ return
+ }
+ defer origNS.Close()
+
+ // create a new netns on the current thread
+ err = unix.Unshare(unix.CLONE_NEWNET)
+ if err != nil {
+ return
+ }
+ defer origNS.Set()
+
+ // bind mount the new netns from the current thread onto the mount point
+ err = unix.Mount(getCurrentThreadNetNSPath(), nsPath, "none", unix.MS_BIND, "")
+ if err != nil {
+ return
+ }
+
+ fd, err = os.Open(nsPath)
+ if err != nil {
+ return
+ }
+ })()
+ wg.Wait()
+
+ if err != nil {
+ unix.Unmount(nsPath, unix.MNT_DETACH)
+ return nil, fmt.Errorf("failed to create namespace: %v", err)
+ }
+
+ return &netNS{file: fd, mounted: true}, nil
+}
+
+func (ns *netNS) Close() error {
+ if err := ns.errorIfClosed(); err != nil {
+ return err
+ }
+
+ if err := ns.file.Close(); err != nil {
+ return fmt.Errorf("Failed to close %q: %v", ns.file.Name(), err)
+ }
+ ns.closed = true
+
+ if ns.mounted {
+ if err := unix.Unmount(ns.file.Name(), unix.MNT_DETACH); err != nil {
+ return fmt.Errorf("Failed to unmount namespace %s: %v", ns.file.Name(), err)
+ }
+ if err := os.RemoveAll(ns.file.Name()); err != nil {
+ return fmt.Errorf("Failed to clean up namespace %s: %v", ns.file.Name(), err)
+ }
+ ns.mounted = false
+ }
+
+ return nil
+}
+
+func (ns *netNS) Set() error {
+ if err := ns.errorIfClosed(); err != nil {
+ return err
+ }
+
+ if err := unix.Setns(int(ns.Fd()), unix.CLONE_NEWNET); err != nil {
+ return fmt.Errorf("Error switching to ns %v: %v", ns.file.Name(), err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/ns_unspecified.go b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_unspecified.go
new file mode 100644
index 000000000..41b446862
--- /dev/null
+++ b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_unspecified.go
@@ -0,0 +1,36 @@
+// Copyright 2015-2017 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !linux
+
+package ns
+
+import "github.com/containernetworking/cni/pkg/types"
+
+// Returns an object representing the current OS thread's network namespace
+func GetCurrentNS() (NetNS, error) {
+ return nil, types.NotImplementedError
+}
+
+func NewNS() (NetNS, error) {
+ return nil, types.NotImplementedError
+}
+
+func (ns *netNS) Close() error {
+ return types.NotImplementedError
+}
+
+func (ns *netNS) Set() error {
+ return types.NotImplementedError
+}
diff --git a/vendor/github.com/containers/image/LICENSE b/vendor/github.com/containers/image/LICENSE
new file mode 100644
index 000000000..953563530
--- /dev/null
+++ b/vendor/github.com/containers/image/LICENSE
@@ -0,0 +1,189 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/containers/image/README.md b/vendor/github.com/containers/image/README.md
new file mode 100644
index 000000000..237e9e38e
--- /dev/null
+++ b/vendor/github.com/containers/image/README.md
@@ -0,0 +1,79 @@
+[![GoDoc](https://godoc.org/github.com/containers/image?status.svg)](https://godoc.org/github.com/containers/image) [![Build Status](https://travis-ci.org/containers/image.svg?branch=master)](https://travis-ci.org/containers/image)
+=
+
+`image` is a set of Go libraries aimed at working in various way with
+containers' images and container image registries.
+
+The containers/image library allows application to pull and push images from
+container image registries, like the upstream docker registry. It also
+implements "simple image signing".
+
+The containers/image library also allows you to inspect a repository on a
+container registry without pulling down the image. This means it fetches the
+repository's manifest and it is able to show you a `docker inspect`-like json
+output about a whole repository or a tag. This library, in contrast to `docker
+inspect`, helps you gather useful information about a repository or a tag
+without requiring you to run `docker pull`.
+
+The containers/image library also allows you to translate from one image format
+to another, for example docker container images to OCI images. It also allows
+you to copy container images between various registries, possibly converting
+them as necessary, and to sign and verify images.
+
+## Command-line usage
+
+The containers/image project is only a library with no user interface;
+you can either incorporate it into your Go programs, or use the `skopeo` tool:
+
+The [skopeo](https://github.com/projectatomic/skopeo) tool uses the
+containers/image library and takes advantage of many of its features,
+e.g. `skopeo copy` exposes the `containers/image/copy.Image` functionality.
+
+## Dependencies
+
+This library does not ship a committed version of its dependencies in a `vendor`
+subdirectory. This is so you can make well-informed decisions about which
+libraries you should use with this package in your own projects, and because
+types defined in the `vendor` directory would be impossible to use from your projects.
+
+What this project tests against dependencies-wise is located
+[in vendor.conf](https://github.com/containers/image/blob/master/vendor.conf).
+
+## Building
+
+If you want to see what the library can do, or an example of how it is called,
+consider starting with the [skopeo](https://github.com/projectatomic/skopeo) tool
+instead.
+
+To integrate this library into your project, put it into `$GOPATH` or use
+your preferred vendoring tool to include a copy in your project.
+Ensure that the dependencies documented [in vendor.conf](https://github.com/containers/image/blob/master/vendor.conf)
+are also available
+(using those exact versions or different versions of your choosing).
+
+This library, by default, also depends on the GpgME and libostree C libraries. Either install them:
+```sh
+Fedora$ dnf install gpgme-devel libassuan-devel libostree-devel
+macOS$ brew install gpgme
+```
+or use the build tags described below to avoid the dependencies (e.g. using `go build -tags …`)
+
+### Supported build tags
+
+- `containers_image_openpgp`: Use a Golang-only OpenPGP implementation for signature verification instead of the default cgo/gpgme-based implementation;
+the primary downside is that creating new signatures with the Golang-only implementation is not supported.
+- `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries. The `github.com/containers/image/ostree` package is completely disabled
+and impossible to import when this build tag is in use.
+
+## Contributing
+
+When developing this library, please use `make` (or `make … BUILDTAGS=…`) to take advantage of the tests and validation.
+
+## License
+
+ASL 2.0
+
+## Contact
+
+- Mailing list: [containers-dev](https://groups.google.com/forum/?hl=en#!forum/containers-dev)
+- IRC: #[container-projects](irc://irc.freenode.net:6667/#container-projects) on freenode.net
diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go
new file mode 100644
index 000000000..0380bf72d
--- /dev/null
+++ b/vendor/github.com/containers/image/copy/copy.go
@@ -0,0 +1,670 @@
+package copy
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "reflect"
+ "runtime"
+ "strings"
+ "time"
+
+ pb "gopkg.in/cheggaaa/pb.v1"
+
+ "github.com/containers/image/image"
+ "github.com/containers/image/pkg/compression"
+ "github.com/containers/image/signature"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+type digestingReader struct {
+ source io.Reader
+ digester digest.Digester
+ expectedDigest digest.Digest
+ validationFailed bool
+}
+
+// imageCopier allows us to keep track of diffID values for blobs, and other
+// data, that we're copying between images, and cache other information that
+// might allow us to take some shortcuts
+type imageCopier struct {
+ copiedBlobs map[digest.Digest]digest.Digest
+ cachedDiffIDs map[digest.Digest]digest.Digest
+ manifestUpdates *types.ManifestUpdateOptions
+ dest types.ImageDestination
+ src types.Image
+ rawSource types.ImageSource
+ diffIDsAreNeeded bool
+ canModifyManifest bool
+ reportWriter io.Writer
+ progressInterval time.Duration
+ progress chan types.ProgressProperties
+}
+
+// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
+// and set validationFailed to true if the source stream does not match expectedDigest.
+func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
+ if err := expectedDigest.Validate(); err != nil {
+ return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
+ }
+ digestAlgorithm := expectedDigest.Algorithm()
+ if !digestAlgorithm.Available() {
+ return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
+ }
+ return &digestingReader{
+ source: source,
+ digester: digestAlgorithm.Digester(),
+ expectedDigest: expectedDigest,
+ validationFailed: false,
+ }, nil
+}
+
+func (d *digestingReader) Read(p []byte) (int, error) {
+ n, err := d.source.Read(p)
+ if n > 0 {
+ if n2, err := d.digester.Hash().Write(p[:n]); n2 != n || err != nil {
+ // Coverage: This should not happen, the hash.Hash interface requires
+ // d.digest.Write to never return an error, and the io.Writer interface
+ // requires n2 == len(input) if no error is returned.
+ return 0, errors.Wrapf(err, "Error updating digest during verification: %d vs. %d", n2, n)
+ }
+ }
+ if err == io.EOF {
+ actualDigest := d.digester.Digest()
+ if actualDigest != d.expectedDigest {
+ d.validationFailed = true
+ return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
+ }
+ }
+ return n, err
+}
+
+// Options allows supplying non-default configuration modifying the behavior of CopyImage.
+type Options struct {
+ RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature.
+ SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(),
+ ReportWriter io.Writer
+ SourceCtx *types.SystemContext
+ DestinationCtx *types.SystemContext
+ ProgressInterval time.Duration // time to wait between reports to signal the progress channel
+ Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset.
+}
+
+// Image copies image from srcRef to destRef, using policyContext to validate
+// source image admissibility.
+func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (retErr error) {
+ // NOTE this function uses an output parameter for the error return value.
+ // Setting this and returning is the ideal way to return an error.
+ //
+ // the defers in this routine will wrap the error return with its own errors
+ // which can be valuable context in the middle of a multi-streamed copy.
+ if options == nil {
+ options = &Options{}
+ }
+
+ reportWriter := ioutil.Discard
+
+ if options.ReportWriter != nil {
+ reportWriter = options.ReportWriter
+ }
+
+ writeReport := func(f string, a ...interface{}) {
+ fmt.Fprintf(reportWriter, f, a...)
+ }
+
+ dest, err := destRef.NewImageDestination(options.DestinationCtx)
+ if err != nil {
+ return errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef))
+ }
+ defer func() {
+ if err := dest.Close(); err != nil {
+ retErr = errors.Wrapf(retErr, " (dest: %v)", err)
+ }
+ }()
+
+ rawSource, err := srcRef.NewImageSource(options.SourceCtx)
+ if err != nil {
+ return errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
+ }
+ unparsedImage := image.UnparsedFromSource(rawSource)
+ defer func() {
+ if unparsedImage != nil {
+ if err := unparsedImage.Close(); err != nil {
+ retErr = errors.Wrapf(retErr, " (unparsed: %v)", err)
+ }
+ }
+ }()
+
+ // Please keep this policy check BEFORE reading any other information about the image.
+ if allowed, err := policyContext.IsRunningImageAllowed(unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
+ return errors.Wrap(err, "Source image rejected")
+ }
+ src, err := image.FromUnparsedImage(unparsedImage)
+ if err != nil {
+ return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(srcRef))
+ }
+ unparsedImage = nil
+ defer func() {
+ if err := src.Close(); err != nil {
+ retErr = errors.Wrapf(retErr, " (source: %v)", err)
+ }
+ }()
+
+ if err := checkImageDestinationForCurrentRuntimeOS(src, dest); err != nil {
+ return err
+ }
+
+ if src.IsMultiImage() {
+ return errors.Errorf("can not copy %s: manifest contains multiple images", transports.ImageName(srcRef))
+ }
+
+ var sigs [][]byte
+ if options.RemoveSignatures {
+ sigs = [][]byte{}
+ } else {
+ writeReport("Getting image source signatures\n")
+ s, err := src.Signatures(context.TODO())
+ if err != nil {
+ return errors.Wrap(err, "Error reading signatures")
+ }
+ sigs = s
+ }
+ if len(sigs) != 0 {
+ writeReport("Checking if image destination supports signatures\n")
+ if err := dest.SupportsSignatures(); err != nil {
+ return errors.Wrap(err, "Can not copy signatures")
+ }
+ }
+
+ canModifyManifest := len(sigs) == 0
+ manifestUpdates := types.ManifestUpdateOptions{}
+ manifestUpdates.InformationOnly.Destination = dest
+
+ if err := updateEmbeddedDockerReference(&manifestUpdates, dest, src, canModifyManifest); err != nil {
+ return err
+ }
+
+ // We compute preferredManifestMIMEType only to show it in error messages.
+ // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed.
+ preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := determineManifestConversion(&manifestUpdates, src, dest.SupportedManifestMIMETypes(), canModifyManifest)
+ if err != nil {
+ return err
+ }
+
+ // If src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates) will be true, it needs to be true by the time we get here.
+ ic := imageCopier{
+ copiedBlobs: make(map[digest.Digest]digest.Digest),
+ cachedDiffIDs: make(map[digest.Digest]digest.Digest),
+ manifestUpdates: &manifestUpdates,
+ dest: dest,
+ src: src,
+ rawSource: rawSource,
+ diffIDsAreNeeded: src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates),
+ canModifyManifest: canModifyManifest,
+ reportWriter: reportWriter,
+ progressInterval: options.ProgressInterval,
+ progress: options.Progress,
+ }
+
+ if err := ic.copyLayers(); err != nil {
+ return err
+ }
+
+ // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only;
+ // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support
+ // without actually trying to upload something and getting a types.ManifestTypeRejectedError.
+ // So, try the preferred manifest MIME type. If the process succeeds, fine…
+ manifest, err := ic.copyUpdatedConfigAndManifest()
+ if err != nil {
+ logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err)
+ // … if it fails, _and_ the failure is because the manifest is rejected, we may have other options.
+ if _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError); !isManifestRejected || len(otherManifestMIMETypeCandidates) == 0 {
+ // We don’t have other options.
+ // In principle the code below would handle this as well, but the resulting error message is fairly ugly.
+ // Don’t bother the user with MIME types if we have no choice.
+ return err
+ }
+ // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType.
+ // So if we are here, we will definitely be trying to convert the manifest.
+ // With !canModifyManifest, that would just be a string of repeated failures for the same reason,
+ // so let’s bail out early and with a better error message.
+ if !canModifyManifest {
+ return errors.Wrap(err, "Writing manifest failed (and converting it is not possible)")
+ }
+
+ // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
+ errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)}
+ for _, manifestMIMEType := range otherManifestMIMETypeCandidates {
+ logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType)
+ manifestUpdates.ManifestMIMEType = manifestMIMEType
+ attemptedManifest, err := ic.copyUpdatedConfigAndManifest()
+ if err != nil {
+ logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err)
+ errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err))
+ continue
+ }
+
+ // We have successfully uploaded a manifest.
+ manifest = attemptedManifest
+ errs = nil // Mark this as a success so that we don't abort below.
+ break
+ }
+ if errs != nil {
+ return fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
+ }
+ }
+
+ if options.SignBy != "" {
+ newSig, err := createSignature(dest, manifest, options.SignBy, reportWriter)
+ if err != nil {
+ return err
+ }
+ sigs = append(sigs, newSig)
+ }
+
+ writeReport("Storing signatures\n")
+ if err := dest.PutSignatures(sigs); err != nil {
+ return errors.Wrap(err, "Error writing signatures")
+ }
+
+ if err := dest.Commit(); err != nil {
+ return errors.Wrap(err, "Error committing the finished image")
+ }
+
+ return nil
+}
+
+func checkImageDestinationForCurrentRuntimeOS(src types.Image, dest types.ImageDestination) error {
+ if dest.MustMatchRuntimeOS() {
+ c, err := src.OCIConfig()
+ if err != nil {
+ return errors.Wrapf(err, "Error parsing image configuration")
+ }
+ osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, runtime.GOOS)
+ if runtime.GOOS == "windows" && c.OS == "linux" {
+ return osErr
+ } else if runtime.GOOS != "windows" && c.OS == "windows" {
+ return osErr
+ }
+ }
+ return nil
+}
+
+// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests.
+func updateEmbeddedDockerReference(manifestUpdates *types.ManifestUpdateOptions, dest types.ImageDestination, src types.Image, canModifyManifest bool) error {
+ destRef := dest.Reference().DockerReference()
+ if destRef == nil {
+ return nil // Destination does not care about Docker references
+ }
+ if !src.EmbeddedDockerReferenceConflicts(destRef) {
+ return nil // No reference embedded in the manifest, or it matches destRef already.
+ }
+
+ if !canModifyManifest {
+ return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would invalidate existing signatures. Explicitly enable signature removal to proceed anyway",
+ transports.ImageName(dest.Reference()), destRef.String())
+ }
+ manifestUpdates.EmbeddedDockerReference = destRef
+ return nil
+}
+
+// copyLayers copies layers from src/rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest.
+func (ic *imageCopier) copyLayers() error {
+ srcInfos := ic.src.LayerInfos()
+ destInfos := []types.BlobInfo{}
+ diffIDs := []digest.Digest{}
+ updatedSrcInfos := ic.src.UpdatedLayerInfos()
+ srcInfosUpdated := false
+ if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) {
+ if !ic.canModifyManifest {
+ return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden")
+ }
+ srcInfos = updatedSrcInfos
+ srcInfosUpdated = true
+ }
+ for _, srcLayer := range srcInfos {
+ var (
+ destInfo types.BlobInfo
+ diffID digest.Digest
+ err error
+ )
+ if ic.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
+ // DiffIDs are, currently, needed only when converting from schema1.
+ // In which case src.LayerInfos will not have URLs because schema1
+ // does not support them.
+ if ic.diffIDsAreNeeded {
+ return errors.New("getting DiffID for foreign layers is unimplemented")
+ }
+ destInfo = srcLayer
+ fmt.Fprintf(ic.reportWriter, "Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.dest.Reference().Transport().Name())
+ } else {
+ destInfo, diffID, err = ic.copyLayer(srcLayer)
+ if err != nil {
+ return err
+ }
+ }
+ destInfos = append(destInfos, destInfo)
+ diffIDs = append(diffIDs, diffID)
+ }
+ ic.manifestUpdates.InformationOnly.LayerInfos = destInfos
+ if ic.diffIDsAreNeeded {
+ ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
+ }
+ if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) {
+ ic.manifestUpdates.LayerInfos = destInfos
+ }
+ return nil
+}
+
+// layerDigestsDiffer return true iff the digests in a and b differ (ignoring sizes and possible other fields)
+func layerDigestsDiffer(a, b []types.BlobInfo) bool {
+ if len(a) != len(b) {
+ return true
+ }
+ for i := range a {
+ if a[i].Digest != b[i].Digest {
+ return true
+ }
+ }
+ return false
+}
+
+// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary,
+// stores the resulting config and manifest to the destination, and returns the stored manifest.
+func (ic *imageCopier) copyUpdatedConfigAndManifest() ([]byte, error) {
+ pendingImage := ic.src
+ if !reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) {
+ if !ic.canModifyManifest {
+ return nil, errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden")
+ }
+ if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) {
+ // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion.
+ // So, this can only happen if we are trying to upload using one of the other MIME type candidates.
+ // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise
+ // when ic.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2.
+ // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now.
+ // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates.
+ return nil, errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType)
+ }
+ pi, err := ic.src.UpdatedImage(*ic.manifestUpdates)
+ if err != nil {
+ return nil, errors.Wrap(err, "Error creating an updated image manifest")
+ }
+ pendingImage = pi
+ }
+ manifest, _, err := pendingImage.Manifest()
+ if err != nil {
+ return nil, errors.Wrap(err, "Error reading manifest")
+ }
+
+ if err := ic.copyConfig(pendingImage); err != nil {
+ return nil, err
+ }
+
+ fmt.Fprintf(ic.reportWriter, "Writing manifest to image destination\n")
+ if err := ic.dest.PutManifest(manifest); err != nil {
+ return nil, errors.Wrap(err, "Error writing manifest")
+ }
+ return manifest, nil
+}
+
+// copyConfig copies config.json, if any, from src to dest.
+func (ic *imageCopier) copyConfig(src types.Image) error {
+ srcInfo := src.ConfigInfo()
+ if srcInfo.Digest != "" {
+ fmt.Fprintf(ic.reportWriter, "Copying config %s\n", srcInfo.Digest)
+ configBlob, err := src.ConfigBlob()
+ if err != nil {
+ return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
+ }
+ destInfo, err := ic.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false)
+ if err != nil {
+ return err
+ }
+ if destInfo.Digest != srcInfo.Digest {
+ return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
+ }
+ }
+ return nil
+}
+
+// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine.
+// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation.
+type diffIDResult struct {
+ digest digest.Digest
+ err error
+}
+
+// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
+// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
+func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) {
+ // Check if we already have a blob with this digest
+ haveBlob, extantBlobSize, err := ic.dest.HasBlob(srcInfo)
+ if err != nil {
+ return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest)
+ }
+ // If we already have a cached diffID for this blob, we don't need to compute it
+ diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.cachedDiffIDs[srcInfo.Digest] == "")
+ // If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again
+ if haveBlob && !diffIDIsNeeded {
+ // Check the blob sizes match, if we were given a size this time
+ if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize {
+ return types.BlobInfo{}, "", errors.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size)
+ }
+ srcInfo.Size = extantBlobSize
+ // Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob
+ blobinfo, err := ic.dest.ReapplyBlob(srcInfo)
+ if err != nil {
+ return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest)
+ }
+ fmt.Fprintf(ic.reportWriter, "Skipping fetch of repeat blob %s\n", srcInfo.Digest)
+ return blobinfo, ic.cachedDiffIDs[srcInfo.Digest], err
+ }
+
+ // Fallback: copy the layer, computing the diffID if we need to do so
+ fmt.Fprintf(ic.reportWriter, "Copying blob %s\n", srcInfo.Digest)
+ srcStream, srcBlobSize, err := ic.rawSource.GetBlob(srcInfo)
+ if err != nil {
+ return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
+ }
+ defer srcStream.Close()
+
+ blobInfo, diffIDChan, err := ic.copyLayerFromStream(srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize},
+ diffIDIsNeeded)
+ if err != nil {
+ return types.BlobInfo{}, "", err
+ }
+ var diffIDResult diffIDResult // = {digest:""}
+ if diffIDIsNeeded {
+ diffIDResult = <-diffIDChan
+ if diffIDResult.err != nil {
+ return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
+ }
+ logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
+ ic.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest
+ }
+ return blobInfo, diffIDResult.digest, nil
+}
+
+// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.
+// it copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest,
+// perhaps compressing the stream if canCompress,
+// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
+func (ic *imageCopier) copyLayerFromStream(srcStream io.Reader, srcInfo types.BlobInfo,
+ diffIDIsNeeded bool) (types.BlobInfo, <-chan diffIDResult, error) {
+ var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
+ var diffIDChan chan diffIDResult
+
+ err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithError below
+ if diffIDIsNeeded {
+ diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block.
+ pipeReader, pipeWriter := io.Pipe()
+ defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily.
+ pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close()
+ }()
+
+ getDiffIDRecorder = func(decompressor compression.DecompressorFunc) io.Writer {
+ // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further
+ // reading from the pipe has failed, we don’t really care.
+ // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it,
+ // the return value includes an error indication, which we do check.
+ //
+ // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be
+ // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC.
+ go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader
+ return pipeWriter
+ }
+ }
+ blobInfo, err := ic.copyBlobFromStream(srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest) // Sets err to nil on success
+ return blobInfo, diffIDChan, err
+ // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
+}
+
+// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest.
+func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compression.DecompressorFunc) {
+ result := diffIDResult{
+ digest: "",
+ err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"),
+ }
+ defer func() { dest <- result }()
+ defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead.
+
+ result.digest, result.err = computeDiffID(layerStream, decompressor)
+}
+
+// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest.
+func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) (digest.Digest, error) {
+ if decompressor != nil {
+ s, err := decompressor(stream)
+ if err != nil {
+ return "", err
+ }
+ stream = s
+ }
+
+ return digest.Canonical.FromReader(stream)
+}
+
+// copyBlobFromStream copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest,
+// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
+// perhaps compressing it if canCompress,
+// and returns a complete blobInfo of the copied blob.
+func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo,
+ getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
+ canCompress bool) (types.BlobInfo, error) {
+ // The copying happens through a pipeline of connected io.Readers.
+ // === Input: srcStream
+
+ // === Process input through digestingReader to validate against the expected digest.
+ // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
+ // use a separate validation failure indicator.
+ // Note that we don't use a stronger "validationSucceeded" indicator, because
+ // dest.PutBlob may detect that the layer already exists, in which case we don't
+ // read stream to the end, and validation does not happen.
+ digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest)
+ }
+ var destStream io.Reader = digestingReader
+
+ // === Detect compression of the input stream.
+ // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
+ decompressor, destStream, err := compression.DetectCompression(destStream) // We could skip this in some cases, but let's keep the code path uniform
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
+ }
+ isCompressed := decompressor != nil
+
+ // === Report progress using a pb.Reader.
+ bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES)
+ bar.Output = ic.reportWriter
+ bar.SetMaxWidth(80)
+ bar.ShowTimeLeft = false
+ bar.ShowPercent = false
+ bar.Start()
+ destStream = bar.NewProxyReader(destStream)
+ defer bar.Finish()
+
+ // === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
+ var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
+ if getOriginalLayerCopyWriter != nil {
+ destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor))
+ originalLayerReader = destStream
+ }
+
+ // === Compress the layer if it is uncompressed and compression is desired
+ var inputInfo types.BlobInfo
+ if !canCompress || isCompressed || !ic.dest.ShouldCompressLayers() {
+ logrus.Debugf("Using original blob without modification")
+ inputInfo = srcInfo
+ } else {
+ logrus.Debugf("Compressing blob on the fly")
+ pipeReader, pipeWriter := io.Pipe()
+ defer pipeReader.Close()
+
+ // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
+ // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
+ // we don’t care.
+ go compressGoroutine(pipeWriter, destStream) // Closes pipeWriter
+ destStream = pipeReader
+ inputInfo.Digest = ""
+ inputInfo.Size = -1
+ }
+
+ // === Report progress using the ic.progress channel, if required.
+ if ic.progress != nil && ic.progressInterval > 0 {
+ destStream = &progressReader{
+ source: destStream,
+ channel: ic.progress,
+ interval: ic.progressInterval,
+ artifact: srcInfo,
+ lastTime: time.Now(),
+ }
+ }
+
+ // === Finally, send the layer stream to dest.
+ uploadedInfo, err := ic.dest.PutBlob(destStream, inputInfo)
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
+ }
+
+ // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer
+ // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it.
+ // So, read everything from originalLayerReader, which will cause the rest to be
+ // sent there if we are not already at EOF.
+ if getOriginalLayerCopyWriter != nil {
+ logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
+ _, err := io.Copy(ioutil.Discard, originalLayerReader)
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrapf(err, "Error reading input blob %s", srcInfo.Digest)
+ }
+ }
+
+ if digestingReader.validationFailed { // Coverage: This should never happen.
+ return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
+ }
+ if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
+ return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
+ }
+ return uploadedInfo, nil
+}
+
+// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
+func compressGoroutine(dest *io.PipeWriter, src io.Reader) {
+ err := errors.New("Internal error: unexpected panic in compressGoroutine")
+ defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
+ dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close()
+ }()
+
+ zipper := gzip.NewWriter(dest)
+ defer zipper.Close()
+
+ _, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close()
+}
diff --git a/vendor/github.com/containers/image/copy/manifest.go b/vendor/github.com/containers/image/copy/manifest.go
new file mode 100644
index 000000000..e3b294dd1
--- /dev/null
+++ b/vendor/github.com/containers/image/copy/manifest.go
@@ -0,0 +1,102 @@
+package copy
+
+import (
+ "strings"
+
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert.
+// Prefer v2s2 to v2s1 because v2s2 does not need to be changed when uploading to a different location.
+// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used.
+var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType}
+
+// orderedSet is a list of strings (MIME types in our case), with each string appearing at most once.
+type orderedSet struct {
+ list []string
+ included map[string]struct{}
+}
+
+// newOrderedSet creates a correctly initialized orderedSet.
+// [Sometimes it would be really nice if Golang had constructors…]
+func newOrderedSet() *orderedSet {
+ return &orderedSet{
+ list: []string{},
+ included: map[string]struct{}{},
+ }
+}
+
+// append adds s to the end of os, only if it is not included already.
+func (os *orderedSet) append(s string) {
+ if _, ok := os.included[s]; !ok {
+ os.list = append(os.list, s)
+ os.included[s] = struct{}{}
+ }
+}
+
+// determineManifestConversion updates manifestUpdates to convert manifest to a supported MIME type, if necessary and canModifyManifest.
+// Note that the conversion will only happen later, through src.UpdatedImage
+// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified),
+// and a list of other possible alternatives, in order.
+func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, src types.Image, destSupportedManifestMIMETypes []string, canModifyManifest bool) (string, []string, error) {
+ _, srcType, err := src.Manifest()
+ if err != nil { // This should have been cached?!
+ return "", nil, errors.Wrap(err, "Error reading manifest")
+ }
+
+ if len(destSupportedManifestMIMETypes) == 0 {
+ return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions.
+ }
+ supportedByDest := map[string]struct{}{}
+ for _, t := range destSupportedManifestMIMETypes {
+ supportedByDest[t] = struct{}{}
+ }
+
+ // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types.
+ // So, build a list of types to try in order of decreasing preference.
+ // FIXME? This treats manifest.DockerV2Schema1SignedMediaType and manifest.DockerV2Schema1MediaType as distinct,
+ // although we are not really making any conversion, and it is very unlikely that a destination would support one but not the other.
+ // In practice, schema1 is probably the lowest common denominator, so we would expect to try the first one of the MIME types
+ // and never attempt the other one.
+ prioritizedTypes := newOrderedSet()
+
+ // First of all, prefer to keep the original manifest unmodified.
+ if _, ok := supportedByDest[srcType]; ok {
+ prioritizedTypes.append(srcType)
+ }
+ if !canModifyManifest {
+ // We could also drop the !canModifyManifest parameter and have the caller
+ // make the choice; it is already doing that to an extent, to improve error
+ // messages. But it is nice to hide the “if !canModifyManifest, do no conversion”
+ // special case in here; the caller can then worry (or not) only about a good UI.
+ logrus.Debugf("We can't modify the manifest, hoping for the best...")
+ return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying?
+ }
+
+ // Then use our list of preferred types.
+ for _, t := range preferredManifestMIMETypes {
+ if _, ok := supportedByDest[t]; ok {
+ prioritizedTypes.append(t)
+ }
+ }
+
+ // Finally, try anything else the destination supports.
+ for _, t := range destSupportedManifestMIMETypes {
+ prioritizedTypes.append(t)
+ }
+
+ logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", "))
+ if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen.
+ return "", nil, errors.New("Internal error: no candidate MIME types")
+ }
+ preferredType := prioritizedTypes.list[0]
+ if preferredType != srcType {
+ manifestUpdates.ManifestMIMEType = preferredType
+ } else {
+ logrus.Debugf("... will first try using the original manifest unmodified")
+ }
+ return preferredType, prioritizedTypes.list[1:], nil
+}
diff --git a/vendor/github.com/containers/image/copy/progress_reader.go b/vendor/github.com/containers/image/copy/progress_reader.go
new file mode 100644
index 000000000..b670ee59f
--- /dev/null
+++ b/vendor/github.com/containers/image/copy/progress_reader.go
@@ -0,0 +1,28 @@
+package copy
+
+import (
+ "io"
+ "time"
+
+ "github.com/containers/image/types"
+)
+
+// progressReader is a reader that reports its progress on an interval.
+type progressReader struct {
+ source io.Reader
+ channel chan types.ProgressProperties
+ interval time.Duration
+ artifact types.BlobInfo
+ lastTime time.Time
+ offset uint64
+}
+
+func (r *progressReader) Read(p []byte) (int, error) {
+ n, err := r.source.Read(p)
+ r.offset += uint64(n)
+ if time.Since(r.lastTime) > r.interval {
+ r.channel <- types.ProgressProperties{Artifact: r.artifact, Offset: r.offset}
+ r.lastTime = time.Now()
+ }
+ return n, err
+}
diff --git a/vendor/github.com/containers/image/copy/sign.go b/vendor/github.com/containers/image/copy/sign.go
new file mode 100644
index 000000000..9187d70b3
--- /dev/null
+++ b/vendor/github.com/containers/image/copy/sign.go
@@ -0,0 +1,35 @@
+package copy
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/containers/image/signature"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+)
+
+// createSignature creates a new signature of manifest at (identified by) dest using keyIdentity.
+func createSignature(dest types.ImageDestination, manifest []byte, keyIdentity string, reportWriter io.Writer) ([]byte, error) {
+ mech, err := signature.NewGPGSigningMechanism()
+ if err != nil {
+ return nil, errors.Wrap(err, "Error initializing GPG")
+ }
+ defer mech.Close()
+ if err := mech.SupportsSigning(); err != nil {
+ return nil, errors.Wrap(err, "Signing not supported")
+ }
+
+ dockerReference := dest.Reference().DockerReference()
+ if dockerReference == nil {
+ return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference()))
+ }
+
+ fmt.Fprintf(reportWriter, "Signing manifest\n")
+ newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity)
+ if err != nil {
+ return nil, errors.Wrap(err, "Error creating signature")
+ }
+ return newSig, nil
+}
diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go
new file mode 100644
index 000000000..ea46a27ed
--- /dev/null
+++ b/vendor/github.com/containers/image/directory/directory_dest.go
@@ -0,0 +1,149 @@
+package directory
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+type dirImageDestination struct {
+ ref dirReference
+}
+
+// newImageDestination returns an ImageDestination for writing to an existing directory.
+func newImageDestination(ref dirReference) types.ImageDestination {
+ return &dirImageDestination{ref}
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (d *dirImageDestination) Reference() types.ImageReference {
+ return d.ref
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any.
+func (d *dirImageDestination) Close() error {
+ return nil
+}
+
+func (d *dirImageDestination) SupportedManifestMIMETypes() []string {
+ return nil
+}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+func (d *dirImageDestination) SupportsSignatures() error {
+ return nil
+}
+
+// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
+func (d *dirImageDestination) ShouldCompressLayers() bool {
+ return false
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (d *dirImageDestination) AcceptsForeignLayerURLs() bool {
+ return false
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+func (d *dirImageDestination) MustMatchRuntimeOS() bool {
+ return false
+}
+
+// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Size is the expected length of stream, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
+ blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob")
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ succeeded := false
+ defer func() {
+ blobFile.Close()
+ if !succeeded {
+ os.Remove(blobFile.Name())
+ }
+ }()
+
+ digester := digest.Canonical.Digester()
+ tee := io.TeeReader(stream, digester.Hash())
+
+ size, err := io.Copy(blobFile, tee)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ computedDigest := digester.Digest()
+ if inputInfo.Size != -1 && size != inputInfo.Size {
+ return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
+ }
+ if err := blobFile.Sync(); err != nil {
+ return types.BlobInfo{}, err
+ }
+ if err := blobFile.Chmod(0644); err != nil {
+ return types.BlobInfo{}, err
+ }
+ blobPath := d.ref.layerPath(computedDigest)
+ if err := os.Rename(blobFile.Name(), blobPath); err != nil {
+ return types.BlobInfo{}, err
+ }
+ succeeded = true
+ return types.BlobInfo{Digest: computedDigest, Size: size}, nil
+}
+
+// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
+// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
+// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
+// it returns a non-nil error only on an unexpected failure.
+func (d *dirImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
+ if info.Digest == "" {
+ return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ }
+ blobPath := d.ref.layerPath(info.Digest)
+ finfo, err := os.Stat(blobPath)
+ if err != nil && os.IsNotExist(err) {
+ return false, -1, nil
+ }
+ if err != nil {
+ return false, -1, err
+ }
+ return true, finfo.Size(), nil
+}
+
+func (d *dirImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
+ return info, nil
+}
+
+// PutManifest writes manifest to the destination.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+func (d *dirImageDestination) PutManifest(manifest []byte) error {
+ return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644)
+}
+
+func (d *dirImageDestination) PutSignatures(signatures [][]byte) error {
+ for i, sig := range signatures {
+ if err := ioutil.WriteFile(d.ref.signaturePath(i), sig, 0644); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (d *dirImageDestination) Commit() error {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/directory/directory_src.go
new file mode 100644
index 000000000..705e289b4
--- /dev/null
+++ b/vendor/github.com/containers/image/directory/directory_src.go
@@ -0,0 +1,81 @@
+package directory
+
+import (
+ "context"
+ "io"
+ "io/ioutil"
+ "os"
+
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+type dirImageSource struct {
+ ref dirReference
+}
+
+// newImageSource returns an ImageSource reading from an existing directory.
+// The caller must call .Close() on the returned ImageSource.
+func newImageSource(ref dirReference) types.ImageSource {
+ return &dirImageSource{ref}
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (s *dirImageSource) Reference() types.ImageReference {
+ return s.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *dirImageSource) Close() error {
+ return nil
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+func (s *dirImageSource) GetManifest() ([]byte, string, error) {
+ m, err := ioutil.ReadFile(s.ref.manifestPath())
+ if err != nil {
+ return nil, "", err
+ }
+ return m, manifest.GuessMIMEType(m), err
+}
+
+func (s *dirImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
+ return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`)
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+func (s *dirImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
+ r, err := os.Open(s.ref.layerPath(info.Digest))
+ if err != nil {
+ return nil, 0, nil
+ }
+ fi, err := r.Stat()
+ if err != nil {
+ return nil, 0, nil
+ }
+ return r, fi.Size(), nil
+}
+
+func (s *dirImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
+ signatures := [][]byte{}
+ for i := 0; ; i++ {
+ signature, err := ioutil.ReadFile(s.ref.signaturePath(i))
+ if err != nil {
+ if os.IsNotExist(err) {
+ break
+ }
+ return nil, err
+ }
+ signatures = append(signatures, signature)
+ }
+ return signatures, nil
+}
+
+// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
+func (s *dirImageSource) UpdatedLayerInfos() []types.BlobInfo {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/directory/directory_transport.go b/vendor/github.com/containers/image/directory/directory_transport.go
new file mode 100644
index 000000000..b9ce01a2e
--- /dev/null
+++ b/vendor/github.com/containers/image/directory/directory_transport.go
@@ -0,0 +1,177 @@
+package directory
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ "github.com/pkg/errors"
+
+ "github.com/containers/image/directory/explicitfilepath"
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/image"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+// Transport is an ImageTransport for directory paths.
+var Transport = dirTransport{}
+
+type dirTransport struct{}
+
+func (t dirTransport) Name() string {
+ return "dir"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (t dirTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return NewReference(reference)
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error {
+ if !strings.HasPrefix(scope, "/") {
+ return errors.Errorf("Invalid scope %s: Must be an absolute path", scope)
+ }
+ // Refuse also "/", otherwise "/" and "" would have the same semantics,
+ // and "" could be unexpectedly shadowed by the "/" entry.
+ if scope == "/" {
+ return errors.New(`Invalid scope "/": Use the generic default scope ""`)
+ }
+ cleaned := filepath.Clean(scope)
+ if cleaned != scope {
+ return errors.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
+ }
+ return nil
+}
+
+// dirReference is an ImageReference for directory paths.
+type dirReference struct {
+ // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time!
+ // Either of the paths may point to a different, or no, inode over time. resolvedPath may contain symbolic links, and so on.
+
+ // Generally we follow the intent of the user, and use the "path" member for filesystem operations (e.g. the user can use a relative path to avoid
+ // being exposed to symlinks and renames in the parent directories to the working directory).
+ // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
+ path string // As specified by the user. May be relative, contain symlinks, etc.
+ resolvedPath string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
+}
+
+// There is no directory.ParseReference because it is rather pointless.
+// Callers who need a transport-independent interface will go through
+// dirTransport.ParseReference; callers who intentionally deal with directories
+// can use directory.NewReference.
+
+// NewReference returns a directory reference for a specified path.
+//
+// We do not expose an API supplying the resolvedPath; we could, but recomputing it
+// is generally cheap enough that we prefer being confident about the properties of resolvedPath.
+func NewReference(path string) (types.ImageReference, error) {
+ resolved, err := explicitfilepath.ResolvePathToFullyExplicit(path)
+ if err != nil {
+ return nil, err
+ }
+ return dirReference{path: path, resolvedPath: resolved}, nil
+}
+
+func (ref dirReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
+func (ref dirReference) StringWithinTransport() string {
+ return ref.path
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref dirReference) DockerReference() reference.Named {
+ return nil
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+// (i.e. various references with exactly the same semantics should return the same configuration identity)
+// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+// not required/guaranteed that it will be a valid input to Transport().ParseReference().
+// Returns "" if configuration identities for these references are not supported.
+func (ref dirReference) PolicyConfigurationIdentity() string {
+ return ref.resolvedPath
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref dirReference) PolicyConfigurationNamespaces() []string {
+ res := []string{}
+ path := ref.resolvedPath
+ for {
+ lastSlash := strings.LastIndex(path, "/")
+ if lastSlash == -1 || lastSlash == 0 {
+ break
+ }
+ path = path[:lastSlash]
+ res = append(res, path)
+ }
+ // Note that we do not include "/"; it is redundant with the default "" global default,
+ // and rejected by dirTransport.ValidatePolicyConfigurationScope above.
+ return res
+}
+
+// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned Image.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+func (ref dirReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
+ src := newImageSource(ref)
+ return image.FromSource(src)
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref dirReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(ref), nil
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref dirReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
+ return newImageDestination(ref), nil
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref dirReference) DeleteImage(ctx *types.SystemContext) error {
+ return errors.Errorf("Deleting images not implemented for dir: images")
+}
+
+// manifestPath returns a path for the manifest within a directory using our conventions.
+func (ref dirReference) manifestPath() string {
+ return filepath.Join(ref.path, "manifest.json")
+}
+
+// layerPath returns a path for a layer tarball within a directory using our conventions.
+func (ref dirReference) layerPath(digest digest.Digest) string {
+ // FIXME: Should we keep the digest identification?
+ return filepath.Join(ref.path, digest.Hex()+".tar")
+}
+
+// signaturePath returns a path for a signature within a directory using our conventions.
+func (ref dirReference) signaturePath(index int) string {
+ return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1))
+}
diff --git a/vendor/github.com/containers/image/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/directory/explicitfilepath/path.go
new file mode 100644
index 000000000..71136b880
--- /dev/null
+++ b/vendor/github.com/containers/image/directory/explicitfilepath/path.go
@@ -0,0 +1,56 @@
+package explicitfilepath
+
+import (
+ "os"
+ "path/filepath"
+
+ "github.com/pkg/errors"
+)
+
+// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path.
+// To do so, all elements of the input path must exist; as a special case, the final component may be
+// a non-existent name (but not a symlink pointing to a non-existent name)
+// This is intended as a a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc.
+func ResolvePathToFullyExplicit(path string) (string, error) {
+ switch _, err := os.Lstat(path); {
+ case err == nil:
+ return resolveExistingPathToFullyExplicit(path)
+ case os.IsNotExist(err):
+ parent, file := filepath.Split(path)
+ resolvedParent, err := resolveExistingPathToFullyExplicit(parent)
+ if err != nil {
+ return "", err
+ }
+ if file == "." || file == ".." {
+ // Coverage: This can happen, but very rarely: if we have successfully resolved the parent, both "." and ".." in it should have been resolved as well.
+ // This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed.
+ // We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components
+ // in the resulting path, and especially not at the end.
+ return "", errors.Errorf("Unexpectedly missing special filename component in %s", path)
+ }
+ resolvedPath := filepath.Join(resolvedParent, file)
+ // As a sanity check, ensure that there are no "." or ".." components.
+ cleanedResolvedPath := filepath.Clean(resolvedPath)
+ if cleanedResolvedPath != resolvedPath {
+ // Coverage: This should never happen.
+ return "", errors.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath)
+ }
+ return resolvedPath, nil
+ default: // err != nil, unrecognized
+ return "", err
+ }
+}
+
+// resolveExistingPathToFullyExplicit is the same as ResolvePathToFullyExplicit,
+// but without the special case for missing final component.
+func resolveExistingPathToFullyExplicit(path string) (string, error) {
+ resolved, err := filepath.Abs(path)
+ if err != nil {
+ return "", err // Coverage: This can fail only if os.Getwd() fails.
+ }
+ resolved, err = filepath.EvalSymlinks(resolved)
+ if err != nil {
+ return "", err
+ }
+ return filepath.Clean(resolved), nil
+}
diff --git a/vendor/github.com/containers/image/docker/archive/dest.go b/vendor/github.com/containers/image/docker/archive/dest.go
new file mode 100644
index 000000000..9fc85bd85
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/archive/dest.go
@@ -0,0 +1,66 @@
+package archive
+
+import (
+ "io"
+ "os"
+
+ "github.com/containers/image/docker/tarfile"
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+)
+
+type archiveImageDestination struct {
+ *tarfile.Destination // Implements most of types.ImageDestination
+ ref archiveReference
+ writer io.Closer
+}
+
+func newImageDestination(ctx *types.SystemContext, ref archiveReference) (types.ImageDestination, error) {
+ if ref.destinationRef == nil {
+ return nil, errors.Errorf("docker-archive: destination reference not supplied (must be of form <path>:<reference:tag>)")
+ }
+
+ // ref.path can be either a pipe or a regular file
+ // in the case of a pipe, we require that we can open it for write
+ // in the case of a regular file, we don't want to overwrite any pre-existing file
+ // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy,
+ // only in a different way. Either way, it’s up to the user to not have two writers to the same path.)
+ fh, err := os.OpenFile(ref.path, os.O_WRONLY|os.O_CREATE, 0644)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error opening file %q", ref.path)
+ }
+
+ fhStat, err := fh.Stat()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error statting file %q", ref.path)
+ }
+
+ if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
+ return nil, errors.New("docker-archive doesn't support modifying existing images")
+ }
+
+ return &archiveImageDestination{
+ Destination: tarfile.NewDestination(fh, ref.destinationRef),
+ ref: ref,
+ writer: fh,
+ }, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (d *archiveImageDestination) Reference() types.ImageReference {
+ return d.ref
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any.
+func (d *archiveImageDestination) Close() error {
+ return d.writer.Close()
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (d *archiveImageDestination) Commit() error {
+ return d.Destination.Commit()
+}
diff --git a/vendor/github.com/containers/image/docker/archive/src.go b/vendor/github.com/containers/image/docker/archive/src.go
new file mode 100644
index 000000000..b9941dfc9
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/archive/src.go
@@ -0,0 +1,41 @@
+package archive
+
+import (
+ "github.com/containers/image/docker/tarfile"
+ "github.com/containers/image/types"
+ "github.com/sirupsen/logrus"
+)
+
+type archiveImageSource struct {
+ *tarfile.Source // Implements most of types.ImageSource
+ ref archiveReference
+}
+
+// newImageSource returns a types.ImageSource for the specified image reference.
+// The caller must call .Close() on the returned ImageSource.
+func newImageSource(ctx *types.SystemContext, ref archiveReference) types.ImageSource {
+ if ref.destinationRef != nil {
+ logrus.Warnf("docker-archive: references are not supported for sources (ignoring)")
+ }
+ src := tarfile.NewSource(ref.path)
+ return &archiveImageSource{
+ Source: src,
+ ref: ref,
+ }
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (s *archiveImageSource) Reference() types.ImageReference {
+ return s.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *archiveImageSource) Close() error {
+ return nil
+}
+
+// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
+func (s *archiveImageSource) UpdatedLayerInfos() []types.BlobInfo {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/docker/archive/transport.go b/vendor/github.com/containers/image/docker/archive/transport.go
new file mode 100644
index 000000000..f38d4aced
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/archive/transport.go
@@ -0,0 +1,153 @@
+package archive
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/docker/reference"
+ ctrImage "github.com/containers/image/image"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+// Transport is an ImageTransport for local Docker archives.
+var Transport = archiveTransport{}
+
+type archiveTransport struct{}
+
+func (t archiveTransport) Name() string {
+ return "docker-archive"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (t archiveTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return ParseReference(reference)
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error {
+ // See the explanation in archiveReference.PolicyConfigurationIdentity.
+ return errors.New(`docker-archive: does not support any scopes except the default "" one`)
+}
+
+// archiveReference is an ImageReference for Docker images.
+type archiveReference struct {
+ destinationRef reference.NamedTagged // only used for destinations
+ path string
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
+func ParseReference(refString string) (types.ImageReference, error) {
+ if refString == "" {
+ return nil, errors.Errorf("docker-archive reference %s isn't of the form <path>[:<reference>]", refString)
+ }
+
+ parts := strings.SplitN(refString, ":", 2)
+ path := parts[0]
+ var destinationRef reference.NamedTagged
+
+ // A :tag was specified, which is only necessary for destinations.
+ if len(parts) == 2 {
+ ref, err := reference.ParseNormalizedNamed(parts[1])
+ if err != nil {
+ return nil, errors.Wrapf(err, "docker-archive parsing reference")
+ }
+ ref = reference.TagNameOnly(ref)
+
+ if _, isDigest := ref.(reference.Canonical); isDigest {
+ return nil, errors.Errorf("docker-archive doesn't support digest references: %s", refString)
+ }
+
+ refTagged, isTagged := ref.(reference.NamedTagged)
+ if !isTagged {
+ // Really shouldn't be hit...
+ return nil, errors.Errorf("internal error: reference is not tagged even after reference.TagNameOnly: %s", refString)
+ }
+ destinationRef = refTagged
+ }
+
+ return archiveReference{
+ destinationRef: destinationRef,
+ path: path,
+ }, nil
+}
+
+func (ref archiveReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
+func (ref archiveReference) StringWithinTransport() string {
+ if ref.destinationRef == nil {
+ return ref.path
+ }
+ return fmt.Sprintf("%s:%s", ref.path, ref.destinationRef.String())
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref archiveReference) DockerReference() reference.Named {
+ return ref.destinationRef
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+// (i.e. various references with exactly the same semantics should return the same configuration identity)
+// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+// not required/guaranteed that it will be a valid input to Transport().ParseReference().
+// Returns "" if configuration identities for these references are not supported.
+func (ref archiveReference) PolicyConfigurationIdentity() string {
+ // Punt, the justification is similar to dockerReference.PolicyConfigurationIdentity.
+ return ""
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref archiveReference) PolicyConfigurationNamespaces() []string {
+ // TODO
+ return []string{}
+}
+
+// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned Image.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+func (ref archiveReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
+ src := newImageSource(ctx, ref)
+ return ctrImage.FromSource(src)
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref archiveReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(ctx, ref), nil
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref archiveReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
+ return newImageDestination(ctx, ref)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref archiveReference) DeleteImage(ctx *types.SystemContext) error {
+ // Not really supported, for safety reasons.
+ return errors.New("Deleting images not implemented for docker-archive: images")
+}
diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/docker/daemon/daemon_dest.go
new file mode 100644
index 000000000..559e5c71d
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/daemon/daemon_dest.go
@@ -0,0 +1,128 @@
+package daemon
+
+import (
+ "io"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/docker/tarfile"
+ "github.com/containers/image/types"
+ "github.com/docker/docker/client"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/net/context"
+)
+
+type daemonImageDestination struct {
+ ref daemonReference
+ *tarfile.Destination // Implements most of types.ImageDestination
+ // For talking to imageLoadGoroutine
+ goroutineCancel context.CancelFunc
+ statusChannel <-chan error
+ writer *io.PipeWriter
+ // Other state
+ committed bool // writer has been closed
+}
+
+// newImageDestination returns a types.ImageDestination for the specified image reference.
+func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) {
+ if ref.ref == nil {
+ return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
+ }
+ namedTaggedRef, ok := ref.ref.(reference.NamedTagged)
+ if !ok {
+ return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
+ }
+
+ c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host
+ if err != nil {
+ return nil, errors.Wrap(err, "Error initializing docker engine client")
+ }
+
+ reader, writer := io.Pipe()
+ // Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it.
+ statusChannel := make(chan error, 1)
+
+ ctx, goroutineCancel := context.WithCancel(context.Background())
+ go imageLoadGoroutine(ctx, c, reader, statusChannel)
+
+ return &daemonImageDestination{
+ ref: ref,
+ Destination: tarfile.NewDestination(writer, namedTaggedRef),
+ goroutineCancel: goroutineCancel,
+ statusChannel: statusChannel,
+ writer: writer,
+ committed: false,
+ }, nil
+}
+
+// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel
+func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) {
+ err := errors.New("Internal error: unexpected panic in imageLoadGoroutine")
+ defer func() {
+ logrus.Debugf("docker-daemon: sending done, status %v", err)
+ statusChannel <- err
+ }()
+ defer func() {
+ if err == nil {
+ reader.Close()
+ } else {
+ reader.CloseWithError(err)
+ }
+ }()
+
+ resp, err := c.ImageLoad(ctx, reader, true)
+ if err != nil {
+ err = errors.Wrap(err, "Error saving image to docker engine")
+ return
+ }
+ defer resp.Body.Close()
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+func (d *daemonImageDestination) MustMatchRuntimeOS() bool {
+ return true
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any.
+func (d *daemonImageDestination) Close() error {
+ if !d.committed {
+ logrus.Debugf("docker-daemon: Closing tar stream to abort loading")
+ // In principle, goroutineCancel() should abort the HTTP request and stop the process from continuing.
+ // In practice, though, various HTTP implementations used by client.Client.ImageLoad() (including
+ // https://github.com/golang/net/blob/master/context/ctxhttp/ctxhttp_pre17.go and the
+ // net/http version with native Context support in Go 1.7) do not always actually immediately cancel
+ // the operation: they may process the HTTP request, or a part of it, to completion in a goroutine, and
+ // return early if the context is canceled without terminating the goroutine at all.
+ // So we need this CloseWithError to terminate sending the HTTP request Body
+ // immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending
+ // the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all.
+ // Whether that works or not, closing the PipeWriter seems desirable in any case.
+ d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()"))
+ }
+ d.goroutineCancel()
+
+ return nil
+}
+
+func (d *daemonImageDestination) Reference() types.ImageReference {
+ return d.ref
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (d *daemonImageDestination) Commit() error {
+ logrus.Debugf("docker-daemon: Closing tar stream")
+ if err := d.Destination.Commit(); err != nil {
+ return err
+ }
+ if err := d.writer.Close(); err != nil {
+ return err
+ }
+ d.committed = true // We may still fail, but we are done sending to imageLoadGoroutine.
+
+ logrus.Debugf("docker-daemon: Waiting for status")
+ err := <-d.statusChannel
+ return err
+}
diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/docker/daemon/daemon_src.go
new file mode 100644
index 000000000..3d059da93
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/daemon/daemon_src.go
@@ -0,0 +1,90 @@
+package daemon
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+
+ "github.com/containers/image/docker/tarfile"
+ "github.com/containers/image/types"
+ "github.com/docker/docker/client"
+ "github.com/pkg/errors"
+ "golang.org/x/net/context"
+)
+
+const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs.
+
+type daemonImageSource struct {
+ ref daemonReference
+ *tarfile.Source // Implements most of types.ImageSource
+ tarCopyPath string
+}
+
+type layerInfo struct {
+ path string
+ size int64
+}
+
+// newImageSource returns a types.ImageSource for the specified image reference.
+// The caller must call .Close() on the returned ImageSource.
+//
+// It would be great if we were able to stream the input tar as it is being
+// sent; but Docker sends the top-level manifest, which determines which paths
+// to look for, at the end, so in we will need to seek back and re-read, several times.
+// (We could, perhaps, expect an exact sequence, assume that the first plaintext file
+// is the config, and that the following len(RootFS) files are the layers, but that feels
+// way too brittle.)
+func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageSource, error) {
+ c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host
+ if err != nil {
+ return nil, errors.Wrap(err, "Error initializing docker engine client")
+ }
+ // Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference.
+ // Either way ImageSave should create a tarball with exactly one image.
+ inputStream, err := c.ImageSave(context.TODO(), []string{ref.StringWithinTransport()})
+ if err != nil {
+ return nil, errors.Wrap(err, "Error loading image from docker engine")
+ }
+ defer inputStream.Close()
+
+ // FIXME: use SystemContext here.
+ tarCopyFile, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-daemon-tar")
+ if err != nil {
+ return nil, err
+ }
+ defer tarCopyFile.Close()
+
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ os.Remove(tarCopyFile.Name())
+ }
+ }()
+
+ if _, err := io.Copy(tarCopyFile, inputStream); err != nil {
+ return nil, err
+ }
+
+ succeeded = true
+ return &daemonImageSource{
+ ref: ref,
+ Source: tarfile.NewSource(tarCopyFile.Name()),
+ tarCopyPath: tarCopyFile.Name(),
+ }, nil
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (s *daemonImageSource) Reference() types.ImageReference {
+ return s.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *daemonImageSource) Close() error {
+ return os.Remove(s.tarCopyPath)
+}
+
+// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
+func (s *daemonImageSource) UpdatedLayerInfos() []types.BlobInfo {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/docker/daemon/daemon_transport.go
new file mode 100644
index 000000000..41be1b2db
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/daemon/daemon_transport.go
@@ -0,0 +1,182 @@
+package daemon
+
+import (
+ "github.com/pkg/errors"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/image"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+// Transport is an ImageTransport for images managed by a local Docker daemon.
+var Transport = daemonTransport{}
+
+type daemonTransport struct{}
+
+// Name returns the name of the transport, which must be unique among other transports.
+func (t daemonTransport) Name() string {
+ return "docker-daemon"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (t daemonTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return ParseReference(reference)
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error {
+ // See the explanation in daemonReference.PolicyConfigurationIdentity.
+ return errors.New(`docker-daemon: does not support any scopes except the default "" one`)
+}
+
+// daemonReference is an ImageReference for images managed by a local Docker daemon
+// Exactly one of id and ref can be set.
+// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon)
+// For daemonImageDestination, it must be a ref, which is NamedTagged.
+// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest.
+// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.)
+type daemonReference struct {
+ id digest.Digest
+ ref reference.Named // !reference.IsNameOnly
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func ParseReference(refString string) (types.ImageReference, error) {
+ // This is intended to be compatible with reference.ParseAnyReference, but more strict about refusing some of the ambiguous cases.
+ // In particular, this rejects unprefixed digest values (64 hex chars), and sha256 digest prefixes (sha256:fewer-than-64-hex-chars).
+
+ // digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag).
+ // reference.ParseAnyReference interprets such strings as digests.
+ if dgst, err := digest.Parse(refString); err == nil {
+ // The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name.
+ // Other digest references are ambiguous, so refuse them.
+ if dgst.Algorithm() != digest.Canonical {
+ return nil, errors.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical)
+ }
+ return NewReference(dgst, nil)
+ }
+
+ ref, err := reference.ParseNormalizedNamed(refString) // This also rejects unprefixed digest values
+ if err != nil {
+ return nil, err
+ }
+ if reference.FamiliarName(ref) == digest.Canonical.String() {
+ return nil, errors.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical)
+ }
+ return NewReference("", ref)
+}
+
+// NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly)
+func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) {
+ if id != "" && ref != nil {
+ return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time")
+ }
+ if ref != nil {
+ if reference.IsNameOnly(ref) {
+ return nil, errors.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
+ }
+ // A github.com/distribution/reference value can have a tag and a digest at the same time!
+ // Most versions of docker/reference do not handle that (ignoring the tag), so reject such input.
+ // This MAY be accepted in the future.
+ _, isTagged := ref.(reference.NamedTagged)
+ _, isDigested := ref.(reference.Canonical)
+ if isTagged && isDigested {
+ return nil, errors.Errorf("docker-daemon: references with both a tag and digest are currently not supported")
+ }
+ }
+ return daemonReference{
+ id: id,
+ ref: ref,
+ }, nil
+}
+
+func (ref daemonReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
+// instead, see transports.ImageName().
+func (ref daemonReference) StringWithinTransport() string {
+ switch {
+ case ref.id != "":
+ return ref.id.String()
+ case ref.ref != nil:
+ return reference.FamiliarString(ref.ref)
+ default: // Coverage: Should never happen, NewReference above should refuse such values.
+ panic("Internal inconsistency: daemonReference has empty id and nil ref")
+ }
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref daemonReference) DockerReference() reference.Named {
+ return ref.ref // May be nil
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+// (i.e. various references with exactly the same semantics should return the same configuration identity)
+// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+// not required/guaranteed that it will be a valid input to Transport().ParseReference().
+// Returns "" if configuration identities for these references are not supported.
+func (ref daemonReference) PolicyConfigurationIdentity() string {
+ // We must allow referring to images in the daemon by image ID, otherwise untagged images would not be accessible.
+ // But the existence of image IDs means that we can’t truly well namespace the input; the untagged images would have to fall into the default policy,
+ // which can be unexpected. So, punt.
+ return "" // This still allows using the default "" scope to define a policy for this transport.
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref daemonReference) PolicyConfigurationNamespaces() []string {
+ // See the explanation in daemonReference.PolicyConfigurationIdentity.
+ return []string{}
+}
+
+// NewImage returns a types.Image for this reference.
+// The caller must call .Close() on the returned Image.
+func (ref daemonReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
+ src, err := newImageSource(ctx, ref)
+ if err != nil {
+ return nil, err
+ }
+ return image.FromSource(src)
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref daemonReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(ctx, ref)
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref daemonReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
+ return newImageDestination(ctx, ref)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref daemonReference) DeleteImage(ctx *types.SystemContext) error {
+ // Should this just untag the image? Should this stop running containers?
+ // The semantics is not quite as clear as for remote repositories.
+ // The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant.
+ return errors.Errorf("Deleting images not implemented for docker-daemon: images")
+}
diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go
new file mode 100644
index 000000000..217e9dcbf
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/docker_client.go
@@ -0,0 +1,430 @@
+package docker
+
+import (
+ "context"
+ "crypto/tls"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/pkg/docker/config"
+ "github.com/containers/image/pkg/tlsclientconfig"
+ "github.com/containers/image/types"
+ "github.com/docker/distribution/registry/client"
+ "github.com/docker/go-connections/tlsconfig"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ dockerHostname = "docker.io"
+ dockerRegistry = "registry-1.docker.io"
+
+ systemPerHostCertDirPath = "/etc/docker/certs.d"
+
+ resolvedPingV2URL = "%s://%s/v2/"
+ resolvedPingV1URL = "%s://%s/v1/_ping"
+ tagsPath = "/v2/%s/tags/list"
+ manifestPath = "/v2/%s/manifests/%s"
+ blobsPath = "/v2/%s/blobs/%s"
+ blobUploadPath = "/v2/%s/blobs/uploads/"
+ extensionsSignaturePath = "/extensions/v2/%s/signatures/%s"
+
+ minimumTokenLifetimeSeconds = 60
+
+ extensionSignatureSchemaVersion = 2 // extensionSignature.Version
+ extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type
+)
+
+var (
+ // ErrV1NotSupported is returned when we're trying to talk to a
+ // docker V1 registry.
+ ErrV1NotSupported = errors.New("can't talk to a V1 docker registry")
+ // ErrUnauthorizedForCredentials is returned when the status code returned is 401
+ ErrUnauthorizedForCredentials = errors.New("unable to retrieve auth token: invalid username/password")
+)
+
+// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go:
+// signature represents a Docker image signature.
+type extensionSignature struct {
+ Version int `json:"schemaVersion"` // Version specifies the schema version
+ Name string `json:"name"` // Name must be in "sha256:<digest>@signatureName" format
+ Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1"
+ Content []byte `json:"content"` // Content contains the signature
+}
+
+// signatureList represents list of Docker image signatures.
+type extensionSignatureList struct {
+ Signatures []extensionSignature `json:"signatures"`
+}
+
+type bearerToken struct {
+ Token string `json:"token"`
+ ExpiresIn int `json:"expires_in"`
+ IssuedAt time.Time `json:"issued_at"`
+}
+
+// dockerClient is configuration for dealing with a single Docker registry.
+type dockerClient struct {
+ // The following members are set by newDockerClient and do not change afterwards.
+ ctx *types.SystemContext
+ registry string
+ username string
+ password string
+ client *http.Client
+ signatureBase signatureStorageBase
+ scope authScope
+ // The following members are detected registry properties:
+ // They are set after a successful detectProperties(), and never change afterwards.
+ scheme string // Empty value also used to indicate detectProperties() has not yet succeeded.
+ challenges []challenge
+ supportsSignatures bool
+ // The following members are private state for setupRequestAuth, both are valid if token != nil.
+ token *bearerToken
+ tokenExpiration time.Time
+}
+
+type authScope struct {
+ remoteName string
+ actions string
+}
+
+// this is cloned from docker/go-connections because upstream docker has changed
+// it and make deps here fails otherwise.
+// We'll drop this once we upgrade to docker 1.13.x deps.
+func serverDefault() *tls.Config {
+ return &tls.Config{
+ // Avoid fallback to SSL protocols < TLS1.0
+ MinVersion: tls.VersionTLS10,
+ PreferServerCipherSuites: true,
+ CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
+ }
+}
+
+// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort.
+func dockerCertDir(ctx *types.SystemContext, hostPort string) string {
+ if ctx != nil && ctx.DockerCertPath != "" {
+ return ctx.DockerCertPath
+ }
+ var hostCertDir string
+ if ctx != nil && ctx.DockerPerHostCertDirPath != "" {
+ hostCertDir = ctx.DockerPerHostCertDirPath
+ } else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
+ hostCertDir = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemPerHostCertDirPath)
+ } else {
+ hostCertDir = systemPerHostCertDirPath
+ }
+ return filepath.Join(hostCertDir, hostPort)
+}
+
+// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
+// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
+func newDockerClientFromRef(ctx *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
+ registry := reference.Domain(ref.ref)
+ username, password, err := config.GetAuthentication(ctx, reference.Domain(ref.ref))
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting username and password")
+ }
+ sigBase, err := configuredSignatureStorageBase(ctx, ref, write)
+ if err != nil {
+ return nil, err
+ }
+ remoteName := reference.Path(ref.ref)
+
+ return newDockerClientWithDetails(ctx, registry, username, password, actions, sigBase, remoteName)
+}
+
+// newDockerClientWithDetails returns a new dockerClient instance for the given parameters
+func newDockerClientWithDetails(ctx *types.SystemContext, registry, username, password, actions string, sigBase signatureStorageBase, remoteName string) (*dockerClient, error) {
+ hostName := registry
+ if registry == dockerHostname {
+ registry = dockerRegistry
+ }
+ tr := tlsclientconfig.NewTransport()
+ tr.TLSClientConfig = serverDefault()
+
+ // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry,
+ // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible
+ // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because
+ // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is
+ // undocumented and may change if docker/docker changes.
+ certDir := dockerCertDir(ctx, hostName)
+ if err := tlsclientconfig.SetupCertificates(certDir, tr.TLSClientConfig); err != nil {
+ return nil, err
+ }
+
+ if ctx != nil && ctx.DockerInsecureSkipTLSVerify {
+ tr.TLSClientConfig.InsecureSkipVerify = true
+ }
+
+ return &dockerClient{
+ ctx: ctx,
+ registry: registry,
+ username: username,
+ password: password,
+ client: &http.Client{Transport: tr},
+ signatureBase: sigBase,
+ scope: authScope{
+ actions: actions,
+ remoteName: remoteName,
+ },
+ }, nil
+}
+
+// CheckAuth validates the credentials by attempting to log into the registry
+// returns an error if an error occcured while making the http request or the status code received was 401
+func CheckAuth(ctx context.Context, sCtx *types.SystemContext, username, password, registry string) error {
+ newLoginClient, err := newDockerClientWithDetails(sCtx, registry, username, password, "", nil, "")
+ if err != nil {
+ return errors.Wrapf(err, "error creating new docker client")
+ }
+
+ resp, err := newLoginClient.makeRequest(ctx, "GET", "/v2/", nil, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ switch resp.StatusCode {
+ case http.StatusOK:
+ return nil
+ case http.StatusUnauthorized:
+ return ErrUnauthorizedForCredentials
+ default:
+ return errors.Errorf("error occured with status code %q", resp.StatusCode)
+ }
+}
+
+// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
+// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
+func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader) (*http.Response, error) {
+ if err := c.detectProperties(ctx); err != nil {
+ return nil, err
+ }
+
+ url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
+ return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, true)
+}
+
+// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
+// streamLen, if not -1, specifies the length of the data expected on stream.
+// makeRequest should generally be preferred.
+// TODO(runcom): too many arguments here, use a struct
+func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, sendAuth bool) (*http.Response, error) {
+ req, err := http.NewRequest(method, url, stream)
+ if err != nil {
+ return nil, err
+ }
+ req = req.WithContext(ctx)
+ if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it.
+ req.ContentLength = streamLen
+ }
+ req.Header.Set("Docker-Distribution-API-Version", "registry/2.0")
+ for n, h := range headers {
+ for _, hh := range h {
+ req.Header.Add(n, hh)
+ }
+ }
+ if c.ctx != nil && c.ctx.DockerRegistryUserAgent != "" {
+ req.Header.Add("User-Agent", c.ctx.DockerRegistryUserAgent)
+ }
+ if sendAuth {
+ if err := c.setupRequestAuth(req); err != nil {
+ return nil, err
+ }
+ }
+ logrus.Debugf("%s %s", method, url)
+ res, err := c.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+// we're using the challenges from the /v2/ ping response and not the one from the destination
+// URL in this request because:
+//
+// 1) docker does that as well
+// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request
+//
+// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up
+func (c *dockerClient) setupRequestAuth(req *http.Request) error {
+ if len(c.challenges) == 0 {
+ return nil
+ }
+ schemeNames := make([]string, 0, len(c.challenges))
+ for _, challenge := range c.challenges {
+ schemeNames = append(schemeNames, challenge.Scheme)
+ switch challenge.Scheme {
+ case "basic":
+ req.SetBasicAuth(c.username, c.password)
+ return nil
+ case "bearer":
+ if c.token == nil || time.Now().After(c.tokenExpiration) {
+ realm, ok := challenge.Parameters["realm"]
+ if !ok {
+ return errors.Errorf("missing realm in bearer auth challenge")
+ }
+ service, _ := challenge.Parameters["service"] // Will be "" if not present
+ var scope string
+ if c.scope.remoteName != "" && c.scope.actions != "" {
+ scope = fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
+ }
+ token, err := c.getBearerToken(req.Context(), realm, service, scope)
+ if err != nil {
+ return err
+ }
+ c.token = token
+ c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
+ }
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token))
+ return nil
+ default:
+ logrus.Debugf("no handler for %s authentication", challenge.Scheme)
+ }
+ }
+ logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", "))
+ return nil
+}
+
+func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope string) (*bearerToken, error) {
+ authReq, err := http.NewRequest("GET", realm, nil)
+ if err != nil {
+ return nil, err
+ }
+ authReq = authReq.WithContext(ctx)
+ getParams := authReq.URL.Query()
+ if service != "" {
+ getParams.Add("service", service)
+ }
+ if scope != "" {
+ getParams.Add("scope", scope)
+ }
+ authReq.URL.RawQuery = getParams.Encode()
+ if c.username != "" && c.password != "" {
+ authReq.SetBasicAuth(c.username, c.password)
+ }
+ tr := tlsclientconfig.NewTransport()
+ // TODO(runcom): insecure for now to contact the external token service
+ tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
+ client := &http.Client{Transport: tr}
+ res, err := client.Do(authReq)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ switch res.StatusCode {
+ case http.StatusUnauthorized:
+ return nil, ErrUnauthorizedForCredentials
+ case http.StatusOK:
+ break
+ default:
+ return nil, errors.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL)
+ }
+ tokenBlob, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+ var token bearerToken
+ if err := json.Unmarshal(tokenBlob, &token); err != nil {
+ return nil, err
+ }
+ if token.ExpiresIn < minimumTokenLifetimeSeconds {
+ token.ExpiresIn = minimumTokenLifetimeSeconds
+ logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
+ }
+ if token.IssuedAt.IsZero() {
+ token.IssuedAt = time.Now().UTC()
+ }
+ return &token, nil
+}
+
+// detectProperties detects various properties of the registry.
+// See the dockerClient documentation for members which are affected by this.
+func (c *dockerClient) detectProperties(ctx context.Context) error {
+ if c.scheme != "" {
+ return nil
+ }
+
+ ping := func(scheme string) error {
+ url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
+ resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, true)
+ logrus.Debugf("Ping %s err %#v", url, err)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
+ return errors.Errorf("error pinging repository, response code %d", resp.StatusCode)
+ }
+ c.challenges = parseAuthHeader(resp.Header)
+ c.scheme = scheme
+ c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1"
+ return nil
+ }
+ err := ping("https")
+ if err != nil && c.ctx != nil && c.ctx.DockerInsecureSkipTLSVerify {
+ err = ping("http")
+ }
+ if err != nil {
+ err = errors.Wrap(err, "pinging docker registry returned")
+ if c.ctx != nil && c.ctx.DockerDisableV1Ping {
+ return err
+ }
+ // best effort to understand if we're talking to a V1 registry
+ pingV1 := func(scheme string) bool {
+ url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
+ resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, true)
+ logrus.Debugf("Ping %s err %#v", url, err)
+ if err != nil {
+ return false
+ }
+ defer resp.Body.Close()
+ logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
+ return false
+ }
+ return true
+ }
+ isV1 := pingV1("https")
+ if !isV1 && c.ctx != nil && c.ctx.DockerInsecureSkipTLSVerify {
+ isV1 = pingV1("http")
+ }
+ if isV1 {
+ err = ErrV1NotSupported
+ }
+ }
+ return err
+}
+
+// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
+// using the original data structures.
+func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
+ path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
+ res, err := c.makeRequest(ctx, "GET", path, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusOK {
+ return nil, client.HandleErrorResponse(res)
+ }
+ body, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ var parsedBody extensionSignatureList
+ if err := json.Unmarshal(body, &parsedBody); err != nil {
+ return nil, errors.Wrapf(err, "Error decoding signature list")
+ }
+ return &parsedBody, nil
+}
diff --git a/vendor/github.com/containers/image/docker/docker_image.go b/vendor/github.com/containers/image/docker/docker_image.go
new file mode 100644
index 000000000..8be35b735
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/docker_image.go
@@ -0,0 +1,63 @@
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/image"
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+)
+
+// Image is a Docker-specific implementation of types.Image with a few extra methods
+// which are specific to Docker.
+type Image struct {
+ types.Image
+ src *dockerImageSource
+}
+
+// newImage returns a new Image interface type after setting up
+// a client to the registry hosting the given image.
+// The caller must call .Close() on the returned Image.
+func newImage(ctx *types.SystemContext, ref dockerReference) (types.Image, error) {
+ s, err := newImageSource(ctx, ref)
+ if err != nil {
+ return nil, err
+ }
+ img, err := image.FromSource(s)
+ if err != nil {
+ return nil, err
+ }
+ return &Image{Image: img, src: s}, nil
+}
+
+// SourceRefFullName returns a fully expanded name for the repository this image is in.
+func (i *Image) SourceRefFullName() string {
+ return i.src.ref.ref.Name()
+}
+
+// GetRepositoryTags list all tags available in the repository. Note that this has no connection with the tag(s) used for this specific image, if any.
+func (i *Image) GetRepositoryTags() ([]string, error) {
+ path := fmt.Sprintf(tagsPath, reference.Path(i.src.ref.ref))
+ // FIXME: Pass the context.Context
+ res, err := i.src.c.makeRequest(context.TODO(), "GET", path, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusOK {
+ // print url also
+ return nil, errors.Errorf("Invalid status code returned when fetching tags list %d", res.StatusCode)
+ }
+ type tagsRes struct {
+ Tags []string
+ }
+ tags := &tagsRes{}
+ if err := json.NewDecoder(res.Body).Decode(tags); err != nil {
+ return nil, err
+ }
+ return tags.Tags, nil
+}
diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go
new file mode 100644
index 000000000..32d5a18b1
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/docker_image_dest.go
@@ -0,0 +1,457 @@
+package docker
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/docker/distribution/registry/api/errcode"
+ "github.com/docker/distribution/registry/api/v2"
+ "github.com/docker/distribution/registry/client"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+type dockerImageDestination struct {
+ ref dockerReference
+ c *dockerClient
+ // State
+ manifestDigest digest.Digest // or "" if not yet known.
+}
+
+// newImageDestination creates a new ImageDestination for the specified image reference.
+func newImageDestination(ctx *types.SystemContext, ref dockerReference) (types.ImageDestination, error) {
+ c, err := newDockerClientFromRef(ctx, ref, true, "pull,push")
+ if err != nil {
+ return nil, err
+ }
+ return &dockerImageDestination{
+ ref: ref,
+ c: c,
+ }, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (d *dockerImageDestination) Reference() types.ImageReference {
+ return d.ref
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any.
+func (d *dockerImageDestination) Close() error {
+ return nil
+}
+
+func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
+ return []string{
+ imgspecv1.MediaTypeImageManifest,
+ manifest.DockerV2Schema2MediaType,
+ manifest.DockerV2Schema1SignedMediaType,
+ manifest.DockerV2Schema1MediaType,
+ }
+}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+func (d *dockerImageDestination) SupportsSignatures() error {
+ if err := d.c.detectProperties(context.TODO()); err != nil {
+ return err
+ }
+ switch {
+ case d.c.signatureBase != nil:
+ return nil
+ case d.c.supportsSignatures:
+ return nil
+ default:
+ return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
+ }
+}
+
+// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
+func (d *dockerImageDestination) ShouldCompressLayers() bool {
+ return true
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool {
+ return true
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+func (d *dockerImageDestination) MustMatchRuntimeOS() bool {
+ return false
+}
+
+// sizeCounter is an io.Writer which only counts the total size of its input.
+type sizeCounter struct{ size int64 }
+
+func (c *sizeCounter) Write(p []byte) (n int, err error) {
+ c.size += int64(len(p))
+ return len(p), nil
+}
+
+// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Size is the expected length of stream, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
+ if inputInfo.Digest.String() != "" {
+ haveBlob, size, err := d.HasBlob(inputInfo)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ if haveBlob {
+ return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
+ }
+ }
+
+ // FIXME? Chunked upload, progress reporting, etc.
+ uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
+ logrus.Debugf("Uploading %s", uploadPath)
+ res, err := d.c.makeRequest(context.TODO(), "POST", uploadPath, nil, nil)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusAccepted {
+ logrus.Debugf("Error initiating layer upload, response %#v", *res)
+ return types.BlobInfo{}, errors.Errorf("Error initiating layer upload to %s, status %d", uploadPath, res.StatusCode)
+ }
+ uploadLocation, err := res.Location()
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
+ }
+
+ digester := digest.Canonical.Digester()
+ sizeCounter := &sizeCounter{}
+ tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
+ res, err = d.c.makeRequestToResolvedURL(context.TODO(), "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, true)
+ if err != nil {
+ logrus.Debugf("Error uploading layer chunked, response %#v", res)
+ return types.BlobInfo{}, err
+ }
+ defer res.Body.Close()
+ computedDigest := digester.Digest()
+
+ uploadLocation, err = res.Location()
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
+ }
+
+ // FIXME: DELETE uploadLocation on failure
+
+ locationQuery := uploadLocation.Query()
+ // TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
+ locationQuery.Set("digest", computedDigest.String())
+ uploadLocation.RawQuery = locationQuery.Encode()
+ res, err = d.c.makeRequestToResolvedURL(context.TODO(), "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, true)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusCreated {
+ logrus.Debugf("Error uploading layer, response %#v", *res)
+ return types.BlobInfo{}, errors.Errorf("Error uploading layer to %s, status %d", uploadLocation, res.StatusCode)
+ }
+
+ logrus.Debugf("Upload of layer %s complete", computedDigest)
+ return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
+}
+
+// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
+// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
+// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
+// it returns a non-nil error only on an unexpected failure.
+func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
+ if info.Digest == "" {
+ return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ }
+ checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
+
+ logrus.Debugf("Checking %s", checkPath)
+ res, err := d.c.makeRequest(context.TODO(), "HEAD", checkPath, nil, nil)
+ if err != nil {
+ return false, -1, err
+ }
+ defer res.Body.Close()
+ switch res.StatusCode {
+ case http.StatusOK:
+ logrus.Debugf("... already exists")
+ return true, getBlobSize(res), nil
+ case http.StatusUnauthorized:
+ logrus.Debugf("... not authorized")
+ return false, -1, errors.Errorf("not authorized to read from destination repository %s", reference.Path(d.ref.ref))
+ case http.StatusNotFound:
+ logrus.Debugf("... not present")
+ return false, -1, nil
+ default:
+ return false, -1, errors.Errorf("failed to read from destination repository %s: %v", reference.Path(d.ref.ref), http.StatusText(res.StatusCode))
+ }
+}
+
+func (d *dockerImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
+ return info, nil
+}
+
+// PutManifest writes manifest to the destination.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+func (d *dockerImageDestination) PutManifest(m []byte) error {
+ digest, err := manifest.Digest(m)
+ if err != nil {
+ return err
+ }
+ d.manifestDigest = digest
+
+ refTail, err := d.ref.tagOrDigest()
+ if err != nil {
+ return err
+ }
+ path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail)
+
+ headers := map[string][]string{}
+ mimeType := manifest.GuessMIMEType(m)
+ if mimeType != "" {
+ headers["Content-Type"] = []string{mimeType}
+ }
+ res, err := d.c.makeRequest(context.TODO(), "PUT", path, headers, bytes.NewReader(m))
+ if err != nil {
+ return err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusCreated {
+ err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest to %s", path)
+ if isManifestInvalidError(errors.Cause(err)) {
+ err = types.ManifestTypeRejectedError{Err: err}
+ }
+ return err
+ }
+ return nil
+}
+
+// isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error.
+func isManifestInvalidError(err error) bool {
+ errors, ok := err.(errcode.Errors)
+ if !ok || len(errors) == 0 {
+ return false
+ }
+ ec, ok := errors[0].(errcode.ErrorCoder)
+ if !ok {
+ return false
+ }
+ // ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false.
+ // ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd)
+ // when uploading to a tag (because it can’t find a matching tag inside the manifest)
+ return ec.ErrorCode() == v2.ErrorCodeManifestInvalid || ec.ErrorCode() == v2.ErrorCodeTagInvalid
+}
+
+func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
+ // Do not fail if we don’t really need to support signatures.
+ if len(signatures) == 0 {
+ return nil
+ }
+ if err := d.c.detectProperties(context.TODO()); err != nil {
+ return err
+ }
+ switch {
+ case d.c.signatureBase != nil:
+ return d.putSignaturesToLookaside(signatures)
+ case d.c.supportsSignatures:
+ return d.putSignaturesToAPIExtension(signatures)
+ default:
+ return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
+ }
+}
+
+// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase,
+// which is not nil.
+func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) error {
+ // FIXME? This overwrites files one at a time, definitely not atomic.
+ // A failure when updating signatures with a reordered copy could lose some of them.
+
+ // Skip dealing with the manifest digest if not necessary.
+ if len(signatures) == 0 {
+ return nil
+ }
+
+ if d.manifestDigest.String() == "" {
+ // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
+ return errors.Errorf("Unknown manifest digest, can't add signatures")
+ }
+
+ // NOTE: Keep this in sync with docs/signature-protocols.md!
+ for i, signature := range signatures {
+ url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
+ if url == nil {
+ return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
+ }
+ err := d.putOneSignature(url, signature)
+ if err != nil {
+ return err
+ }
+ }
+ // Remove any other signatures, if present.
+ // We stop at the first missing signature; if a previous deleting loop aborted
+ // prematurely, this may not clean up all of them, but one missing signature
+ // is enough for dockerImageSource to stop looking for other signatures, so that
+ // is sufficient.
+ for i := len(signatures); ; i++ {
+ url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
+ if url == nil {
+ return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
+ }
+ missing, err := d.c.deleteOneSignature(url)
+ if err != nil {
+ return err
+ }
+ if missing {
+ break
+ }
+ }
+
+ return nil
+}
+
+// putOneSignature stores one signature to url.
+// NOTE: Keep this in sync with docs/signature-protocols.md!
+func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error {
+ switch url.Scheme {
+ case "file":
+ logrus.Debugf("Writing to %s", url.Path)
+ err := os.MkdirAll(filepath.Dir(url.Path), 0755)
+ if err != nil {
+ return err
+ }
+ err = ioutil.WriteFile(url.Path, signature, 0644)
+ if err != nil {
+ return err
+ }
+ return nil
+
+ case "http", "https":
+ return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
+ default:
+ return errors.Errorf("Unsupported scheme when writing signature to %s", url.String())
+ }
+}
+
+// deleteOneSignature deletes a signature from url, if it exists.
+// If it successfully determines that the signature does not exist, returns (true, nil)
+// NOTE: Keep this in sync with docs/signature-protocols.md!
+func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) {
+ switch url.Scheme {
+ case "file":
+ logrus.Debugf("Deleting %s", url.Path)
+ err := os.Remove(url.Path)
+ if err != nil && os.IsNotExist(err) {
+ return true, nil
+ }
+ return false, err
+
+ case "http", "https":
+ return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
+ default:
+ return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String())
+ }
+}
+
+// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension.
+func (d *dockerImageDestination) putSignaturesToAPIExtension(signatures [][]byte) error {
+ // Skip dealing with the manifest digest, or reading the old state, if not necessary.
+ if len(signatures) == 0 {
+ return nil
+ }
+
+ if d.manifestDigest.String() == "" {
+ // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
+ return errors.Errorf("Unknown manifest digest, can't add signatures")
+ }
+
+ // Because image signatures are a shared resource in Atomic Registry, the default upload
+ // always adds signatures. Eventually we should also allow removing signatures,
+ // but the X-Registry-Supports-Signatures API extension does not support that yet.
+
+ existingSignatures, err := d.c.getExtensionsSignatures(context.TODO(), d.ref, d.manifestDigest)
+ if err != nil {
+ return err
+ }
+ existingSigNames := map[string]struct{}{}
+ for _, sig := range existingSignatures.Signatures {
+ existingSigNames[sig.Name] = struct{}{}
+ }
+
+sigExists:
+ for _, newSig := range signatures {
+ for _, existingSig := range existingSignatures.Signatures {
+ if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
+ continue sigExists
+ }
+ }
+
+ // The API expect us to invent a new unique name. This is racy, but hopefully good enough.
+ var signatureName string
+ for {
+ randBytes := make([]byte, 16)
+ n, err := rand.Read(randBytes)
+ if err != nil || n != 16 {
+ return errors.Wrapf(err, "Error generating random signature len %d", n)
+ }
+ signatureName = fmt.Sprintf("%s@%032x", d.manifestDigest.String(), randBytes)
+ if _, ok := existingSigNames[signatureName]; !ok {
+ break
+ }
+ }
+ sig := extensionSignature{
+ Version: extensionSignatureSchemaVersion,
+ Name: signatureName,
+ Type: extensionSignatureTypeAtomic,
+ Content: newSig,
+ }
+ body, err := json.Marshal(sig)
+ if err != nil {
+ return err
+ }
+
+ path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
+ res, err := d.c.makeRequest(context.TODO(), "PUT", path, nil, bytes.NewReader(body))
+ if err != nil {
+ return err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusCreated {
+ body, err := ioutil.ReadAll(res.Body)
+ if err == nil {
+ logrus.Debugf("Error body %s", string(body))
+ }
+ logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
+ return errors.Errorf("Error uploading signature to %s, status %d", path, res.StatusCode)
+ }
+ }
+
+ return nil
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (d *dockerImageDestination) Commit() error {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go
new file mode 100644
index 000000000..14e3c2b50
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/docker_image_src.go
@@ -0,0 +1,379 @@
+package docker
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/docker/distribution/registry/client"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+type dockerImageSource struct {
+ ref dockerReference
+ c *dockerClient
+ // State
+ cachedManifest []byte // nil if not loaded yet
+ cachedManifestMIMEType string // Only valid if cachedManifest != nil
+}
+
+// newImageSource creates a new ImageSource for the specified image reference.
+// The caller must call .Close() on the returned ImageSource.
+func newImageSource(ctx *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
+ c, err := newDockerClientFromRef(ctx, ref, false, "pull")
+ if err != nil {
+ return nil, err
+ }
+ return &dockerImageSource{
+ ref: ref,
+ c: c,
+ }, nil
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (s *dockerImageSource) Reference() types.ImageReference {
+ return s.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *dockerImageSource) Close() error {
+ return nil
+}
+
+// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
+func (s *dockerImageSource) UpdatedLayerInfos() []types.BlobInfo {
+ return nil
+}
+
+// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1)
+// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string.
+func simplifyContentType(contentType string) string {
+ if contentType == "" {
+ return contentType
+ }
+ mimeType, _, err := mime.ParseMediaType(contentType)
+ if err != nil {
+ return ""
+ }
+ return mimeType
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+func (s *dockerImageSource) GetManifest() ([]byte, string, error) {
+ err := s.ensureManifestIsLoaded(context.TODO())
+ if err != nil {
+ return nil, "", err
+ }
+ return s.cachedManifest, s.cachedManifestMIMEType, nil
+}
+
+func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) {
+ path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest)
+ headers := make(map[string][]string)
+ headers["Accept"] = manifest.DefaultRequestedManifestMIMETypes
+ res, err := s.c.makeRequest(ctx, "GET", path, headers, nil)
+ if err != nil {
+ return nil, "", err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusOK {
+ return nil, "", client.HandleErrorResponse(res)
+ }
+ manblob, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, "", err
+ }
+ return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil
+}
+
+// GetTargetManifest returns an image's manifest given a digest.
+// This is mainly used to retrieve a single image's manifest out of a manifest list.
+func (s *dockerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
+ return s.fetchManifest(context.TODO(), digest.String())
+}
+
+// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType
+//
+// ImageSource implementations are not required or expected to do any caching,
+// but because our signatures are “attached” to the manifest digest,
+// we need to ensure that the digest of the manifest returned by GetManifest
+// and used by GetSignatures are consistent, otherwise we would get spurious
+// signature verification failures when pulling while a tag is being updated.
+func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error {
+ if s.cachedManifest != nil {
+ return nil
+ }
+
+ reference, err := s.ref.tagOrDigest()
+ if err != nil {
+ return err
+ }
+
+ manblob, mt, err := s.fetchManifest(ctx, reference)
+ if err != nil {
+ return err
+ }
+ // We might validate manblob against the Docker-Content-Digest header here to protect against transport errors.
+ s.cachedManifest = manblob
+ s.cachedManifestMIMEType = mt
+ return nil
+}
+
+func (s *dockerImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64, error) {
+ var (
+ resp *http.Response
+ err error
+ )
+ for _, url := range urls {
+ resp, err = s.c.makeRequestToResolvedURL(context.TODO(), "GET", url, nil, nil, -1, false)
+ if err == nil {
+ if resp.StatusCode != http.StatusOK {
+ err = errors.Errorf("error fetching external blob from %q: %d", url, resp.StatusCode)
+ logrus.Debug(err)
+ continue
+ }
+ break
+ }
+ }
+ if resp.Body != nil && err == nil {
+ return resp.Body, getBlobSize(resp), nil
+ }
+ return nil, 0, err
+}
+
+func getBlobSize(resp *http.Response) int64 {
+ size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
+ if err != nil {
+ size = -1
+ }
+ return size
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+func (s *dockerImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
+ if len(info.URLs) != 0 {
+ return s.getExternalBlob(info.URLs)
+ }
+
+ path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String())
+ logrus.Debugf("Downloading %s", path)
+ res, err := s.c.makeRequest(context.TODO(), "GET", path, nil, nil)
+ if err != nil {
+ return nil, 0, err
+ }
+ if res.StatusCode != http.StatusOK {
+ // print url also
+ return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d", res.StatusCode)
+ }
+ return res.Body, getBlobSize(res), nil
+}
+
+func (s *dockerImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
+ if err := s.c.detectProperties(ctx); err != nil {
+ return nil, err
+ }
+ switch {
+ case s.c.signatureBase != nil:
+ return s.getSignaturesFromLookaside(ctx)
+ case s.c.supportsSignatures:
+ return s.getSignaturesFromAPIExtension(ctx)
+ default:
+ return [][]byte{}, nil
+ }
+}
+
+// manifestDigest returns a digest of the manifest, either from the supplied reference or from a fetched manifest.
+func (s *dockerImageSource) manifestDigest(ctx context.Context) (digest.Digest, error) {
+ if digested, ok := s.ref.ref.(reference.Digested); ok {
+ d := digested.Digest()
+ if d.Algorithm() == digest.Canonical {
+ return d, nil
+ }
+ }
+ if err := s.ensureManifestIsLoaded(ctx); err != nil {
+ return "", err
+ }
+ return manifest.Digest(s.cachedManifest)
+}
+
+// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase,
+// which is not nil.
+func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context) ([][]byte, error) {
+ manifestDigest, err := s.manifestDigest(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // NOTE: Keep this in sync with docs/signature-protocols.md!
+ signatures := [][]byte{}
+ for i := 0; ; i++ {
+ url := signatureStorageURL(s.c.signatureBase, manifestDigest, i)
+ if url == nil {
+ return nil, errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
+ }
+ signature, missing, err := s.getOneSignature(ctx, url)
+ if err != nil {
+ return nil, err
+ }
+ if missing {
+ break
+ }
+ signatures = append(signatures, signature)
+ }
+ return signatures, nil
+}
+
+// getOneSignature downloads one signature from url.
+// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil.
+// NOTE: Keep this in sync with docs/signature-protocols.md!
+func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature []byte, missing bool, err error) {
+ switch url.Scheme {
+ case "file":
+ logrus.Debugf("Reading %s", url.Path)
+ sig, err := ioutil.ReadFile(url.Path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, true, nil
+ }
+ return nil, false, err
+ }
+ return sig, false, nil
+
+ case "http", "https":
+ logrus.Debugf("GET %s", url)
+ req, err := http.NewRequest("GET", url.String(), nil)
+ if err != nil {
+ return nil, false, err
+ }
+ req = req.WithContext(ctx)
+ res, err := s.c.client.Do(req)
+ if err != nil {
+ return nil, false, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode == http.StatusNotFound {
+ return nil, true, nil
+ } else if res.StatusCode != http.StatusOK {
+ return nil, false, errors.Errorf("Error reading signature from %s: status %d", url.String(), res.StatusCode)
+ }
+ sig, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, false, err
+ }
+ return sig, false, nil
+
+ default:
+ return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String())
+ }
+}
+
+// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension.
+func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context) ([][]byte, error) {
+ manifestDigest, err := s.manifestDigest(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ parsedBody, err := s.c.getExtensionsSignatures(ctx, s.ref, manifestDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ var sigs [][]byte
+ for _, sig := range parsedBody.Signatures {
+ if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic {
+ sigs = append(sigs, sig.Content)
+ }
+ }
+ return sigs, nil
+}
+
+// deleteImage deletes the named image from the registry, if supported.
+func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
+ c, err := newDockerClientFromRef(ctx, ref, true, "push")
+ if err != nil {
+ return err
+ }
+
+ // When retrieving the digest from a registry >= 2.3 use the following header:
+ // "Accept": "application/vnd.docker.distribution.manifest.v2+json"
+ headers := make(map[string][]string)
+ headers["Accept"] = []string{manifest.DockerV2Schema2MediaType}
+
+ refTail, err := ref.tagOrDigest()
+ if err != nil {
+ return err
+ }
+ getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
+ get, err := c.makeRequest(context.TODO(), "GET", getPath, headers, nil)
+ if err != nil {
+ return err
+ }
+ defer get.Body.Close()
+ manifestBody, err := ioutil.ReadAll(get.Body)
+ if err != nil {
+ return err
+ }
+ switch get.StatusCode {
+ case http.StatusOK:
+ case http.StatusNotFound:
+ return errors.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref)
+ default:
+ return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status)
+ }
+
+ digest := get.Header.Get("Docker-Content-Digest")
+ deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest)
+
+ // When retrieving the digest from a registry >= 2.3 use the following header:
+ // "Accept": "application/vnd.docker.distribution.manifest.v2+json"
+ delete, err := c.makeRequest(context.TODO(), "DELETE", deletePath, headers, nil)
+ if err != nil {
+ return err
+ }
+ defer delete.Body.Close()
+
+ body, err := ioutil.ReadAll(delete.Body)
+ if err != nil {
+ return err
+ }
+ if delete.StatusCode != http.StatusAccepted {
+ return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status)
+ }
+
+ if c.signatureBase != nil {
+ manifestDigest, err := manifest.Digest(manifestBody)
+ if err != nil {
+ return err
+ }
+
+ for i := 0; ; i++ {
+ url := signatureStorageURL(c.signatureBase, manifestDigest, i)
+ if url == nil {
+ return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
+ }
+ missing, err := c.deleteOneSignature(url)
+ if err != nil {
+ return err
+ }
+ if missing {
+ break
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containers/image/docker/docker_transport.go b/vendor/github.com/containers/image/docker/docker_transport.go
new file mode 100644
index 000000000..1d67cc4fc
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/docker_transport.go
@@ -0,0 +1,160 @@
+package docker
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/docker/policyconfiguration"
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+// Transport is an ImageTransport for Docker registry-hosted images.
+var Transport = dockerTransport{}
+
+type dockerTransport struct{}
+
+func (t dockerTransport) Name() string {
+ return "docker"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (t dockerTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return ParseReference(reference)
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error {
+ // FIXME? We could be verifying the various character set and length restrictions
+ // from docker/distribution/reference.regexp.go, but other than that there
+ // are few semantically invalid strings.
+ return nil
+}
+
+// dockerReference is an ImageReference for Docker images.
+type dockerReference struct {
+ ref reference.Named // By construction we know that !reference.IsNameOnly(ref)
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
+func ParseReference(refString string) (types.ImageReference, error) {
+ if !strings.HasPrefix(refString, "//") {
+ return nil, errors.Errorf("docker: image reference %s does not start with //", refString)
+ }
+ ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//"))
+ if err != nil {
+ return nil, err
+ }
+ ref = reference.TagNameOnly(ref)
+ return NewReference(ref)
+}
+
+// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly().
+func NewReference(ref reference.Named) (types.ImageReference, error) {
+ if reference.IsNameOnly(ref) {
+ return nil, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
+ }
+ // A github.com/distribution/reference value can have a tag and a digest at the same time!
+ // The docker/distribution API does not really support that (we can’t ask for an image with a specific
+ // tag and digest), so fail. This MAY be accepted in the future.
+ // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop
+ // the tag or the digest first?)
+ _, isTagged := ref.(reference.NamedTagged)
+ _, isDigested := ref.(reference.Canonical)
+ if isTagged && isDigested {
+ return nil, errors.Errorf("Docker references with both a tag and digest are currently not supported")
+ }
+ return dockerReference{
+ ref: ref,
+ }, nil
+}
+
+func (ref dockerReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
+func (ref dockerReference) StringWithinTransport() string {
+ return "//" + reference.FamiliarString(ref.ref)
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref dockerReference) DockerReference() reference.Named {
+ return ref.ref
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+// (i.e. various references with exactly the same semantics should return the same configuration identity)
+// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+// not required/guaranteed that it will be a valid input to Transport().ParseReference().
+// Returns "" if configuration identities for these references are not supported.
+func (ref dockerReference) PolicyConfigurationIdentity() string {
+ res, err := policyconfiguration.DockerReferenceIdentity(ref.ref)
+ if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure.
+ panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err))
+ }
+ return res
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref dockerReference) PolicyConfigurationNamespaces() []string {
+ return policyconfiguration.DockerReferenceNamespaces(ref.ref)
+}
+
+// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned Image.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+func (ref dockerReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
+ return newImage(ctx, ref)
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref dockerReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(ctx, ref)
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref dockerReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
+ return newImageDestination(ctx, ref)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref dockerReference) DeleteImage(ctx *types.SystemContext) error {
+ return deleteImage(ctx, ref)
+}
+
+// tagOrDigest returns a tag or digest from the reference.
+func (ref dockerReference) tagOrDigest() (string, error) {
+ if ref, ok := ref.ref.(reference.Canonical); ok {
+ return ref.Digest().String(), nil
+ }
+ if ref, ok := ref.ref.(reference.NamedTagged); ok {
+ return ref.Tag(), nil
+ }
+ // This should not happen, NewReference above refuses reference.IsNameOnly values.
+ return "", errors.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref))
+}
diff --git a/vendor/github.com/containers/image/docker/lookaside.go b/vendor/github.com/containers/image/docker/lookaside.go
new file mode 100644
index 000000000..18e7733b9
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/lookaside.go
@@ -0,0 +1,202 @@
+package docker
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/types"
+ "github.com/ghodss/yaml"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage.
+// You can override this at build time with
+// -ldflags '-X github.com/containers/image/docker.systemRegistriesDirPath=$your_path'
+var systemRegistriesDirPath = builtinRegistriesDirPath
+
+// builtinRegistriesDirPath is the path to registries.d.
+// DO NOT change this, instead see systemRegistriesDirPath above.
+const builtinRegistriesDirPath = "/etc/containers/registries.d"
+
+// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
+// NOTE: Keep this in sync with docs/registries.d.md!
+type registryConfiguration struct {
+ DefaultDocker *registryNamespace `json:"default-docker"`
+ // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*),
+ Docker map[string]registryNamespace `json:"docker"`
+}
+
+// registryNamespace defines lookaside locations for a single namespace.
+type registryNamespace struct {
+ SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing.
+ SigStoreStaging string `json:"sigstore-staging"` // For writing only.
+}
+
+// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage.
+// Users outside of this file should use configuredSignatureStorageBase and signatureStorageURL below.
+type signatureStorageBase *url.URL // The only documented value is nil, meaning storage is not supported.
+
+// configuredSignatureStorageBase reads configuration to find an appropriate signature storage URL for ref, for write access if “write”.
+func configuredSignatureStorageBase(ctx *types.SystemContext, ref dockerReference, write bool) (signatureStorageBase, error) {
+ // FIXME? Loading and parsing the config could be cached across calls.
+ dirPath := registriesDirPath(ctx)
+ logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath)
+ config, err := loadAndMergeConfig(dirPath)
+ if err != nil {
+ return nil, err
+ }
+
+ topLevel := config.signatureTopLevel(ref, write)
+ if topLevel == "" {
+ return nil, nil
+ }
+
+ url, err := url.Parse(topLevel)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel)
+ }
+ // NOTE: Keep this in sync with docs/signature-protocols.md!
+ // FIXME? Restrict to explicitly supported schemes?
+ repo := reference.Path(ref.ref) // Note that this is without a tag or digest.
+ if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
+ return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String())
+ }
+ url.Path = url.Path + "/" + repo
+ return url, nil
+}
+
+// registriesDirPath returns a path to registries.d
+func registriesDirPath(ctx *types.SystemContext) string {
+ if ctx != nil {
+ if ctx.RegistriesDirPath != "" {
+ return ctx.RegistriesDirPath
+ }
+ if ctx.RootForImplicitAbsolutePaths != "" {
+ return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesDirPath)
+ }
+ }
+ return systemRegistriesDirPath
+}
+
+// loadAndMergeConfig loads configuration files in dirPath
+func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
+ mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}}
+ dockerDefaultMergedFrom := ""
+ nsMergedFrom := map[string]string{}
+
+ dir, err := os.Open(dirPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return &mergedConfig, nil
+ }
+ return nil, err
+ }
+ configNames, err := dir.Readdirnames(0)
+ if err != nil {
+ return nil, err
+ }
+ for _, configName := range configNames {
+ if !strings.HasSuffix(configName, ".yaml") {
+ continue
+ }
+ configPath := filepath.Join(dirPath, configName)
+ configBytes, err := ioutil.ReadFile(configPath)
+ if err != nil {
+ return nil, err
+ }
+
+ var config registryConfiguration
+ err = yaml.Unmarshal(configBytes, &config)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error parsing %s", configPath)
+ }
+
+ if config.DefaultDocker != nil {
+ if mergedConfig.DefaultDocker != nil {
+ return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
+ dockerDefaultMergedFrom, configPath)
+ }
+ mergedConfig.DefaultDocker = config.DefaultDocker
+ dockerDefaultMergedFrom = configPath
+ }
+
+ for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
+ if _, ok := mergedConfig.Docker[nsName]; ok {
+ return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
+ nsName, nsMergedFrom[nsName], configPath)
+ }
+ mergedConfig.Docker[nsName] = nsConfig
+ nsMergedFrom[nsName] = configPath
+ }
+ }
+
+ return &mergedConfig, nil
+}
+
+// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”.
+// (the top level of the storage, namespaced by repo.FullName etc.), or "" if no signature storage should be used.
+func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string {
+ if config.Docker != nil {
+ // Look for a full match.
+ identity := ref.PolicyConfigurationIdentity()
+ if ns, ok := config.Docker[identity]; ok {
+ logrus.Debugf(` Using "docker" namespace %s`, identity)
+ if url := ns.signatureTopLevel(write); url != "" {
+ return url
+ }
+ }
+
+ // Look for a match of the possible parent namespaces.
+ for _, name := range ref.PolicyConfigurationNamespaces() {
+ if ns, ok := config.Docker[name]; ok {
+ logrus.Debugf(` Using "docker" namespace %s`, name)
+ if url := ns.signatureTopLevel(write); url != "" {
+ return url
+ }
+ }
+ }
+ }
+ // Look for a default location
+ if config.DefaultDocker != nil {
+ logrus.Debugf(` Using "default-docker" configuration`)
+ if url := config.DefaultDocker.signatureTopLevel(write); url != "" {
+ return url
+ }
+ }
+ logrus.Debugf(" No signature storage configuration found for %s", ref.PolicyConfigurationIdentity())
+ return ""
+}
+
+// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”.
+// or "" if nothing has been configured.
+func (ns registryNamespace) signatureTopLevel(write bool) string {
+ if write && ns.SigStoreStaging != "" {
+ logrus.Debugf(` Using %s`, ns.SigStoreStaging)
+ return ns.SigStoreStaging
+ }
+ if ns.SigStore != "" {
+ logrus.Debugf(` Using %s`, ns.SigStore)
+ return ns.SigStore
+ }
+ return ""
+}
+
+// signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable.
+// Returns nil iff base == nil.
+// NOTE: Keep this in sync with docs/signature-protocols.md!
+func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL {
+ if base == nil {
+ return nil
+ }
+ url := *base
+ url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
+ return &url
+}
diff --git a/vendor/github.com/containers/image/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/docker/policyconfiguration/naming.go
new file mode 100644
index 000000000..31bbb544c
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/policyconfiguration/naming.go
@@ -0,0 +1,56 @@
+package policyconfiguration
+
+import (
+ "strings"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/pkg/errors"
+)
+
+// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup,
+// as a backend for ImageReference.PolicyConfigurationIdentity.
+// The reference must satisfy !reference.IsNameOnly().
+func DockerReferenceIdentity(ref reference.Named) (string, error) {
+ res := ref.Name()
+ tagged, isTagged := ref.(reference.NamedTagged)
+ digested, isDigested := ref.(reference.Canonical)
+ switch {
+ case isTagged && isDigested: // Note that this CAN actually happen.
+ return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref))
+ case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly()
+ return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref))
+ case isTagged:
+ res = res + ":" + tagged.Tag()
+ case isDigested:
+ res = res + "@" + digested.Digest().String()
+ default: // Coverage: The above was supposed to be exhaustive.
+ return "", errors.New("Internal inconsistency, unexpected default branch")
+ }
+ return res, nil
+}
+
+// DockerReferenceNamespaces returns a list of other policy configuration namespaces to search,
+// as a backend for ImageReference.PolicyConfigurationIdentity.
+// The reference must satisfy !reference.IsNameOnly().
+func DockerReferenceNamespaces(ref reference.Named) []string {
+ // Look for a match of the repository, and then of the possible parent
+ // namespaces. Note that this only happens on the expanded host names
+ // and repository names, i.e. "busybox" is looked up as "docker.io/library/busybox",
+ // then in its parent "docker.io/library"; in none of "busybox",
+ // un-namespaced "library" nor in "" supposedly implicitly representing "library/".
+ //
+ // ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last
+ // iteration matches the host name (for any namespace).
+ res := []string{}
+ name := ref.Name()
+ for {
+ res = append(res, name)
+
+ lastSlash := strings.LastIndex(name, "/")
+ if lastSlash == -1 {
+ break
+ }
+ name = name[:lastSlash]
+ }
+ return res
+}
diff --git a/vendor/github.com/containers/image/docker/reference/README.md b/vendor/github.com/containers/image/docker/reference/README.md
new file mode 100644
index 000000000..53a88de82
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/reference/README.md
@@ -0,0 +1,2 @@
+This is a copy of github.com/docker/distribution/reference as of commit fb0bebc4b64e3881cc52a2478d749845ed76d2a8,
+except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset. \ No newline at end of file
diff --git a/vendor/github.com/containers/image/docker/reference/helpers.go b/vendor/github.com/containers/image/docker/reference/helpers.go
new file mode 100644
index 000000000..978df7eab
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/reference/helpers.go
@@ -0,0 +1,42 @@
+package reference
+
+import "path"
+
+// IsNameOnly returns true if reference only contains a repo name.
+func IsNameOnly(ref Named) bool {
+ if _, ok := ref.(NamedTagged); ok {
+ return false
+ }
+ if _, ok := ref.(Canonical); ok {
+ return false
+ }
+ return true
+}
+
+// FamiliarName returns the familiar name string
+// for the given named, familiarizing if needed.
+func FamiliarName(ref Named) string {
+ if nn, ok := ref.(normalizedNamed); ok {
+ return nn.Familiar().Name()
+ }
+ return ref.Name()
+}
+
+// FamiliarString returns the familiar string representation
+// for the given reference, familiarizing if needed.
+func FamiliarString(ref Reference) string {
+ if nn, ok := ref.(normalizedNamed); ok {
+ return nn.Familiar().String()
+ }
+ return ref.String()
+}
+
+// FamiliarMatch reports whether ref matches the specified pattern.
+// See https://godoc.org/path#Match for supported patterns.
+func FamiliarMatch(pattern string, ref Reference) (bool, error) {
+ matched, err := path.Match(pattern, FamiliarString(ref))
+ if namedRef, isNamed := ref.(Named); isNamed && !matched {
+ matched, _ = path.Match(pattern, FamiliarName(namedRef))
+ }
+ return matched, err
+}
diff --git a/vendor/github.com/containers/image/docker/reference/normalize.go b/vendor/github.com/containers/image/docker/reference/normalize.go
new file mode 100644
index 000000000..fcc436a39
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/reference/normalize.go
@@ -0,0 +1,152 @@
+package reference
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/opencontainers/go-digest"
+)
+
+var (
+ legacyDefaultDomain = "index.docker.io"
+ defaultDomain = "docker.io"
+ officialRepoName = "library"
+ defaultTag = "latest"
+)
+
+// normalizedNamed represents a name which has been
+// normalized and has a familiar form. A familiar name
+// is what is used in Docker UI. An example normalized
+// name is "docker.io/library/ubuntu" and corresponding
+// familiar name of "ubuntu".
+type normalizedNamed interface {
+ Named
+ Familiar() Named
+}
+
+// ParseNormalizedNamed parses a string into a named reference
+// transforming a familiar name from Docker UI to a fully
+// qualified reference. If the value may be an identifier
+// use ParseAnyReference.
+func ParseNormalizedNamed(s string) (Named, error) {
+ if ok := anchoredIdentifierRegexp.MatchString(s); ok {
+ return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
+ }
+ domain, remainder := splitDockerDomain(s)
+ var remoteName string
+ if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
+ remoteName = remainder[:tagSep]
+ } else {
+ remoteName = remainder
+ }
+ if strings.ToLower(remoteName) != remoteName {
+ return nil, errors.New("invalid reference format: repository name must be lowercase")
+ }
+
+ ref, err := Parse(domain + "/" + remainder)
+ if err != nil {
+ return nil, err
+ }
+ named, isNamed := ref.(Named)
+ if !isNamed {
+ return nil, fmt.Errorf("reference %s has no name", ref.String())
+ }
+ return named, nil
+}
+
+// splitDockerDomain splits a repository name to domain and remotename string.
+// If no valid domain is found, the default domain is used. Repository name
+// needs to be already validated before.
+func splitDockerDomain(name string) (domain, remainder string) {
+ i := strings.IndexRune(name, '/')
+ if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
+ domain, remainder = defaultDomain, name
+ } else {
+ domain, remainder = name[:i], name[i+1:]
+ }
+ if domain == legacyDefaultDomain {
+ domain = defaultDomain
+ }
+ if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
+ remainder = officialRepoName + "/" + remainder
+ }
+ return
+}
+
+// familiarizeName returns a shortened version of the name familiar
+// to to the Docker UI. Familiar names have the default domain
+// "docker.io" and "library/" repository prefix removed.
+// For example, "docker.io/library/redis" will have the familiar
+// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
+// Returns a familiarized named only reference.
+func familiarizeName(named namedRepository) repository {
+ repo := repository{
+ domain: named.Domain(),
+ path: named.Path(),
+ }
+
+ if repo.domain == defaultDomain {
+ repo.domain = ""
+ // Handle official repositories which have the pattern "library/<official repo name>"
+ if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
+ repo.path = split[1]
+ }
+ }
+ return repo
+}
+
+func (r reference) Familiar() Named {
+ return reference{
+ namedRepository: familiarizeName(r.namedRepository),
+ tag: r.tag,
+ digest: r.digest,
+ }
+}
+
+func (r repository) Familiar() Named {
+ return familiarizeName(r)
+}
+
+func (t taggedReference) Familiar() Named {
+ return taggedReference{
+ namedRepository: familiarizeName(t.namedRepository),
+ tag: t.tag,
+ }
+}
+
+func (c canonicalReference) Familiar() Named {
+ return canonicalReference{
+ namedRepository: familiarizeName(c.namedRepository),
+ digest: c.digest,
+ }
+}
+
+// TagNameOnly adds the default tag "latest" to a reference if it only has
+// a repo name.
+func TagNameOnly(ref Named) Named {
+ if IsNameOnly(ref) {
+ namedTagged, err := WithTag(ref, defaultTag)
+ if err != nil {
+ // Default tag must be valid, to create a NamedTagged
+ // type with non-validated input the WithTag function
+ // should be used instead
+ panic(err)
+ }
+ return namedTagged
+ }
+ return ref
+}
+
+// ParseAnyReference parses a reference string as a possible identifier,
+// full digest, or familiar name.
+func ParseAnyReference(ref string) (Reference, error) {
+ if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
+ return digestReference("sha256:" + ref), nil
+ }
+ if dgst, err := digest.Parse(ref); err == nil {
+ return digestReference(dgst), nil
+ }
+
+ return ParseNormalizedNamed(ref)
+}
diff --git a/vendor/github.com/containers/image/docker/reference/reference.go b/vendor/github.com/containers/image/docker/reference/reference.go
new file mode 100644
index 000000000..fd3510e9e
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/reference/reference.go
@@ -0,0 +1,433 @@
+// Package reference provides a general type to represent any way of referencing images within the registry.
+// Its main purpose is to abstract tags and digests (content-addressable hash).
+//
+// Grammar
+//
+// reference := name [ ":" tag ] [ "@" digest ]
+// name := [domain '/'] path-component ['/' path-component]*
+// domain := domain-component ['.' domain-component]* [':' port-number]
+// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
+// port-number := /[0-9]+/
+// path-component := alpha-numeric [separator alpha-numeric]*
+// alpha-numeric := /[a-z0-9]+/
+// separator := /[_.]|__|[-]*/
+//
+// tag := /[\w][\w.-]{0,127}/
+//
+// digest := digest-algorithm ":" digest-hex
+// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]
+// digest-algorithm-separator := /[+.-_]/
+// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
+// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
+//
+// identifier := /[a-f0-9]{64}/
+// short-identifier := /[a-f0-9]{6,64}/
+package reference
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/opencontainers/go-digest"
+)
+
+const (
+ // NameTotalLengthMax is the maximum total number of characters in a repository name.
+ NameTotalLengthMax = 255
+)
+
+var (
+ // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
+ ErrReferenceInvalidFormat = errors.New("invalid reference format")
+
+ // ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrTagInvalidFormat = errors.New("invalid tag format")
+
+ // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrDigestInvalidFormat = errors.New("invalid digest format")
+
+ // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
+ ErrNameContainsUppercase = errors.New("repository name must be lowercase")
+
+ // ErrNameEmpty is returned for empty, invalid repository names.
+ ErrNameEmpty = errors.New("repository name must have at least one component")
+
+ // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
+ ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
+
+ // ErrNameNotCanonical is returned when a name is not canonical.
+ ErrNameNotCanonical = errors.New("repository name must be canonical")
+)
+
+// Reference is an opaque object reference identifier that may include
+// modifiers such as a hostname, name, tag, and digest.
+type Reference interface {
+ // String returns the full reference
+ String() string
+}
+
+// Field provides a wrapper type for resolving correct reference types when
+// working with encoding.
+type Field struct {
+ reference Reference
+}
+
+// AsField wraps a reference in a Field for encoding.
+func AsField(reference Reference) Field {
+ return Field{reference}
+}
+
+// Reference unwraps the reference type from the field to
+// return the Reference object. This object should be
+// of the appropriate type to further check for different
+// reference types.
+func (f Field) Reference() Reference {
+ return f.reference
+}
+
+// MarshalText serializes the field to byte text which
+// is the string of the reference.
+func (f Field) MarshalText() (p []byte, err error) {
+ return []byte(f.reference.String()), nil
+}
+
+// UnmarshalText parses text bytes by invoking the
+// reference parser to ensure the appropriately
+// typed reference object is wrapped by field.
+func (f *Field) UnmarshalText(p []byte) error {
+ r, err := Parse(string(p))
+ if err != nil {
+ return err
+ }
+
+ f.reference = r
+ return nil
+}
+
+// Named is an object with a full name
+type Named interface {
+ Reference
+ Name() string
+}
+
+// Tagged is an object which has a tag
+type Tagged interface {
+ Reference
+ Tag() string
+}
+
+// NamedTagged is an object including a name and tag.
+type NamedTagged interface {
+ Named
+ Tag() string
+}
+
+// Digested is an object which has a digest
+// in which it can be referenced by
+type Digested interface {
+ Reference
+ Digest() digest.Digest
+}
+
+// Canonical reference is an object with a fully unique
+// name including a name with domain and digest
+type Canonical interface {
+ Named
+ Digest() digest.Digest
+}
+
+// namedRepository is a reference to a repository with a name.
+// A namedRepository has both domain and path components.
+type namedRepository interface {
+ Named
+ Domain() string
+ Path() string
+}
+
+// Domain returns the domain part of the Named reference
+func Domain(named Named) string {
+ if r, ok := named.(namedRepository); ok {
+ return r.Domain()
+ }
+ domain, _ := splitDomain(named.Name())
+ return domain
+}
+
+// Path returns the name without the domain part of the Named reference
+func Path(named Named) (name string) {
+ if r, ok := named.(namedRepository); ok {
+ return r.Path()
+ }
+ _, path := splitDomain(named.Name())
+ return path
+}
+
+func splitDomain(name string) (string, string) {
+ match := anchoredNameRegexp.FindStringSubmatch(name)
+ if len(match) != 3 {
+ return "", name
+ }
+ return match[1], match[2]
+}
+
+// SplitHostname splits a named reference into a
+// hostname and name string. If no valid hostname is
+// found, the hostname is empty and the full value
+// is returned as name
+// DEPRECATED: Use Domain or Path
+func SplitHostname(named Named) (string, string) {
+ if r, ok := named.(namedRepository); ok {
+ return r.Domain(), r.Path()
+ }
+ return splitDomain(named.Name())
+}
+
+// Parse parses s and returns a syntactically valid Reference.
+// If an error was encountered it is returned, along with a nil Reference.
+// NOTE: Parse will not handle short digests.
+func Parse(s string) (Reference, error) {
+ matches := ReferenceRegexp.FindStringSubmatch(s)
+ if matches == nil {
+ if s == "" {
+ return nil, ErrNameEmpty
+ }
+ if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
+ return nil, ErrNameContainsUppercase
+ }
+ return nil, ErrReferenceInvalidFormat
+ }
+
+ if len(matches[1]) > NameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
+ var repo repository
+
+ nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
+ if nameMatch != nil && len(nameMatch) == 3 {
+ repo.domain = nameMatch[1]
+ repo.path = nameMatch[2]
+ } else {
+ repo.domain = ""
+ repo.path = matches[1]
+ }
+
+ ref := reference{
+ namedRepository: repo,
+ tag: matches[2],
+ }
+ if matches[3] != "" {
+ var err error
+ ref.digest, err = digest.Parse(matches[3])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ r := getBestReferenceType(ref)
+ if r == nil {
+ return nil, ErrNameEmpty
+ }
+
+ return r, nil
+}
+
+// ParseNamed parses s and returns a syntactically valid reference implementing
+// the Named interface. The reference must have a name and be in the canonical
+// form, otherwise an error is returned.
+// If an error was encountered it is returned, along with a nil Reference.
+// NOTE: ParseNamed will not handle short digests.
+func ParseNamed(s string) (Named, error) {
+ named, err := ParseNormalizedNamed(s)
+ if err != nil {
+ return nil, err
+ }
+ if named.String() != s {
+ return nil, ErrNameNotCanonical
+ }
+ return named, nil
+}
+
+// WithName returns a named object representing the given string. If the input
+// is invalid ErrReferenceInvalidFormat will be returned.
+func WithName(name string) (Named, error) {
+ if len(name) > NameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
+ match := anchoredNameRegexp.FindStringSubmatch(name)
+ if match == nil || len(match) != 3 {
+ return nil, ErrReferenceInvalidFormat
+ }
+ return repository{
+ domain: match[1],
+ path: match[2],
+ }, nil
+}
+
+// WithTag combines the name from "name" and the tag from "tag" to form a
+// reference incorporating both the name and the tag.
+func WithTag(name Named, tag string) (NamedTagged, error) {
+ if !anchoredTagRegexp.MatchString(tag) {
+ return nil, ErrTagInvalidFormat
+ }
+ var repo repository
+ if r, ok := name.(namedRepository); ok {
+ repo.domain = r.Domain()
+ repo.path = r.Path()
+ } else {
+ repo.path = name.Name()
+ }
+ if canonical, ok := name.(Canonical); ok {
+ return reference{
+ namedRepository: repo,
+ tag: tag,
+ digest: canonical.Digest(),
+ }, nil
+ }
+ return taggedReference{
+ namedRepository: repo,
+ tag: tag,
+ }, nil
+}
+
+// WithDigest combines the name from "name" and the digest from "digest" to form
+// a reference incorporating both the name and the digest.
+func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
+ if !anchoredDigestRegexp.MatchString(digest.String()) {
+ return nil, ErrDigestInvalidFormat
+ }
+ var repo repository
+ if r, ok := name.(namedRepository); ok {
+ repo.domain = r.Domain()
+ repo.path = r.Path()
+ } else {
+ repo.path = name.Name()
+ }
+ if tagged, ok := name.(Tagged); ok {
+ return reference{
+ namedRepository: repo,
+ tag: tagged.Tag(),
+ digest: digest,
+ }, nil
+ }
+ return canonicalReference{
+ namedRepository: repo,
+ digest: digest,
+ }, nil
+}
+
+// TrimNamed removes any tag or digest from the named reference.
+func TrimNamed(ref Named) Named {
+ domain, path := SplitHostname(ref)
+ return repository{
+ domain: domain,
+ path: path,
+ }
+}
+
+func getBestReferenceType(ref reference) Reference {
+ if ref.Name() == "" {
+ // Allow digest only references
+ if ref.digest != "" {
+ return digestReference(ref.digest)
+ }
+ return nil
+ }
+ if ref.tag == "" {
+ if ref.digest != "" {
+ return canonicalReference{
+ namedRepository: ref.namedRepository,
+ digest: ref.digest,
+ }
+ }
+ return ref.namedRepository
+ }
+ if ref.digest == "" {
+ return taggedReference{
+ namedRepository: ref.namedRepository,
+ tag: ref.tag,
+ }
+ }
+
+ return ref
+}
+
+type reference struct {
+ namedRepository
+ tag string
+ digest digest.Digest
+}
+
+func (r reference) String() string {
+ return r.Name() + ":" + r.tag + "@" + r.digest.String()
+}
+
+func (r reference) Tag() string {
+ return r.tag
+}
+
+func (r reference) Digest() digest.Digest {
+ return r.digest
+}
+
+type repository struct {
+ domain string
+ path string
+}
+
+func (r repository) String() string {
+ return r.Name()
+}
+
+func (r repository) Name() string {
+ if r.domain == "" {
+ return r.path
+ }
+ return r.domain + "/" + r.path
+}
+
+func (r repository) Domain() string {
+ return r.domain
+}
+
+func (r repository) Path() string {
+ return r.path
+}
+
+type digestReference digest.Digest
+
+func (d digestReference) String() string {
+ return digest.Digest(d).String()
+}
+
+func (d digestReference) Digest() digest.Digest {
+ return digest.Digest(d)
+}
+
+type taggedReference struct {
+ namedRepository
+ tag string
+}
+
+func (t taggedReference) String() string {
+ return t.Name() + ":" + t.tag
+}
+
+func (t taggedReference) Tag() string {
+ return t.tag
+}
+
+type canonicalReference struct {
+ namedRepository
+ digest digest.Digest
+}
+
+func (c canonicalReference) String() string {
+ return c.Name() + "@" + c.digest.String()
+}
+
+func (c canonicalReference) Digest() digest.Digest {
+ return c.digest
+}
diff --git a/vendor/github.com/containers/image/docker/reference/regexp.go b/vendor/github.com/containers/image/docker/reference/regexp.go
new file mode 100644
index 000000000..405e995db
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/reference/regexp.go
@@ -0,0 +1,143 @@
+package reference
+
+import "regexp"
+
+var (
+ // alphaNumericRegexp defines the alpha numeric atom, typically a
+ // component of names. This only allows lower case characters and digits.
+ alphaNumericRegexp = match(`[a-z0-9]+`)
+
+ // separatorRegexp defines the separators allowed to be embedded in name
+ // components. This allow one period, one or two underscore and multiple
+ // dashes.
+ separatorRegexp = match(`(?:[._]|__|[-]*)`)
+
+ // nameComponentRegexp restricts registry path component names to start
+ // with at least one letter or number, with following parts able to be
+ // separated by one period, one or two underscore and multiple dashes.
+ nameComponentRegexp = expression(
+ alphaNumericRegexp,
+ optional(repeated(separatorRegexp, alphaNumericRegexp)))
+
+ // domainComponentRegexp restricts the registry domain component of a
+ // repository name to start with a component as defined by domainRegexp
+ // and followed by an optional port.
+ domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
+
+ // domainRegexp defines the structure of potential domain components
+ // that may be part of image names. This is purposely a subset of what is
+ // allowed by DNS to ensure backwards compatibility with Docker image
+ // names.
+ domainRegexp = expression(
+ domainComponentRegexp,
+ optional(repeated(literal(`.`), domainComponentRegexp)),
+ optional(literal(`:`), match(`[0-9]+`)))
+
+ // TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
+ TagRegexp = match(`[\w][\w.-]{0,127}`)
+
+ // anchoredTagRegexp matches valid tag names, anchored at the start and
+ // end of the matched string.
+ anchoredTagRegexp = anchored(TagRegexp)
+
+ // DigestRegexp matches valid digests.
+ DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
+
+ // anchoredDigestRegexp matches valid digests, anchored at the start and
+ // end of the matched string.
+ anchoredDigestRegexp = anchored(DigestRegexp)
+
+ // NameRegexp is the format for the name component of references. The
+ // regexp has capturing groups for the domain and name part omitting
+ // the separating forward slash from either.
+ NameRegexp = expression(
+ optional(domainRegexp, literal(`/`)),
+ nameComponentRegexp,
+ optional(repeated(literal(`/`), nameComponentRegexp)))
+
+ // anchoredNameRegexp is used to parse a name value, capturing the
+ // domain and trailing components.
+ anchoredNameRegexp = anchored(
+ optional(capture(domainRegexp), literal(`/`)),
+ capture(nameComponentRegexp,
+ optional(repeated(literal(`/`), nameComponentRegexp))))
+
+ // ReferenceRegexp is the full supported format of a reference. The regexp
+ // is anchored and has capturing groups for name, tag, and digest
+ // components.
+ ReferenceRegexp = anchored(capture(NameRegexp),
+ optional(literal(":"), capture(TagRegexp)),
+ optional(literal("@"), capture(DigestRegexp)))
+
+ // IdentifierRegexp is the format for string identifier used as a
+ // content addressable identifier using sha256. These identifiers
+ // are like digests without the algorithm, since sha256 is used.
+ IdentifierRegexp = match(`([a-f0-9]{64})`)
+
+ // ShortIdentifierRegexp is the format used to represent a prefix
+ // of an identifier. A prefix may be used to match a sha256 identifier
+ // within a list of trusted identifiers.
+ ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
+
+ // anchoredIdentifierRegexp is used to check or match an
+ // identifier value, anchored at start and end of string.
+ anchoredIdentifierRegexp = anchored(IdentifierRegexp)
+
+ // anchoredShortIdentifierRegexp is used to check if a value
+ // is a possible identifier prefix, anchored at start and end
+ // of string.
+ anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
+)
+
+// match compiles the string to a regular expression.
+var match = regexp.MustCompile
+
+// literal compiles s into a literal regular expression, escaping any regexp
+// reserved characters.
+func literal(s string) *regexp.Regexp {
+ re := match(regexp.QuoteMeta(s))
+
+ if _, complete := re.LiteralPrefix(); !complete {
+ panic("must be a literal")
+ }
+
+ return re
+}
+
+// expression defines a full expression, where each regular expression must
+// follow the previous.
+func expression(res ...*regexp.Regexp) *regexp.Regexp {
+ var s string
+ for _, re := range res {
+ s += re.String()
+ }
+
+ return match(s)
+}
+
+// optional wraps the expression in a non-capturing group and makes the
+// production optional.
+func optional(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(group(expression(res...)).String() + `?`)
+}
+
+// repeated wraps the regexp in a non-capturing group to get one or more
+// matches.
+func repeated(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(group(expression(res...)).String() + `+`)
+}
+
+// group wraps the regexp in a non-capturing group.
+func group(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`(?:` + expression(res...).String() + `)`)
+}
+
+// capture wraps the expression in a capturing group.
+func capture(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`(` + expression(res...).String() + `)`)
+}
+
+// anchored anchors the regular expression by adding start and end delimiters.
+func anchored(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`^` + expression(res...).String() + `$`)
+}
diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go
new file mode 100644
index 000000000..aab4a6d91
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/tarfile/dest.go
@@ -0,0 +1,258 @@
+package tarfile
+
+import (
+ "archive/tar"
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "time"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs.
+
+// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
+type Destination struct {
+ writer io.Writer
+ tar *tar.Writer
+ repoTag string
+ // Other state.
+ blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
+}
+
+// NewDestination returns a tarfile.Destination for the specified io.Writer.
+func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination {
+ // For github.com/docker/docker consumers, this works just as well as
+ // refString := ref.String()
+ // because when reading the RepoTags strings, github.com/docker/docker/reference
+ // normalizes both of them to the same value.
+ //
+ // Doing it this way to include the normalized-out `docker.io[/library]` does make
+ // a difference for github.com/projectatomic/docker consumers, with the
+ // “Add --add-registry and --block-registry options to docker daemon” patch.
+ // These consumers treat reference strings which include a hostname and reference
+ // strings without a hostname differently.
+ //
+ // Using the host name here is more explicit about the intent, and it has the same
+ // effect as (docker pull) in projectatomic/docker, which tags the result using
+ // a hostname-qualified reference.
+ // See https://github.com/containers/image/issues/72 for a more detailed
+ // analysis and explanation.
+ refString := fmt.Sprintf("%s:%s", ref.Name(), ref.Tag())
+ return &Destination{
+ writer: dest,
+ tar: tar.NewWriter(dest),
+ repoTag: refString,
+ blobs: make(map[digest.Digest]types.BlobInfo),
+ }
+}
+
+// SupportedManifestMIMETypes tells which manifest mime types the destination supports
+// If an empty slice or nil it's returned, then any mime type can be tried to upload
+func (d *Destination) SupportedManifestMIMETypes() []string {
+ return []string{
+ manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
+ }
+}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+func (d *Destination) SupportsSignatures() error {
+ return errors.Errorf("Storing signatures for docker tar files is not supported")
+}
+
+// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
+func (d *Destination) ShouldCompressLayers() bool {
+ return false
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (d *Destination) AcceptsForeignLayerURLs() bool {
+ return false
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+func (d *Destination) MustMatchRuntimeOS() bool {
+ return false
+}
+
+// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Size is the expected length of stream, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *Destination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
+ if inputInfo.Digest.String() == "" {
+ return types.BlobInfo{}, errors.Errorf("Can not stream a blob with unknown digest to docker tarfile")
+ }
+
+ ok, size, err := d.HasBlob(inputInfo)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ if ok {
+ return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
+ }
+
+ if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size.
+ logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
+ streamCopy, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-tarfile-blob")
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ defer os.Remove(streamCopy.Name())
+ defer streamCopy.Close()
+
+ size, err := io.Copy(streamCopy, stream)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ _, err = streamCopy.Seek(0, os.SEEK_SET)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
+ stream = streamCopy
+ logrus.Debugf("... streaming done")
+ }
+
+ digester := digest.Canonical.Digester()
+ tee := io.TeeReader(stream, digester.Hash())
+ if err := d.sendFile(inputInfo.Digest.String(), inputInfo.Size, tee); err != nil {
+ return types.BlobInfo{}, err
+ }
+ d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}
+ return types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}, nil
+}
+
+// HasBlob returns true iff the image destination already contains a blob with
+// the matching digest which can be reapplied using ReapplyBlob. Unlike
+// PutBlob, the digest can not be empty. If HasBlob returns true, the size of
+// the blob must also be returned. If the destination does not contain the
+// blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); it
+// returns a non-nil error only on an unexpected failure.
+func (d *Destination) HasBlob(info types.BlobInfo) (bool, int64, error) {
+ if info.Digest == "" {
+ return false, -1, errors.Errorf("Can not check for a blob with unknown digest")
+ }
+ if blob, ok := d.blobs[info.Digest]; ok {
+ return true, blob.Size, nil
+ }
+ return false, -1, nil
+}
+
+// ReapplyBlob informs the image destination that a blob for which HasBlob
+// previously returned true would have been passed to PutBlob if it had
+// returned false. Like HasBlob and unlike PutBlob, the digest can not be
+// empty. If the blob is a filesystem layer, this signifies that the changes
+// it describes need to be applied again when composing a filesystem tree.
+func (d *Destination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
+ return info, nil
+}
+
+// PutManifest writes manifest to the destination.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+func (d *Destination) PutManifest(m []byte) error {
+ // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative,
+ // so the caller trying a different manifest kind would be pointless.
+ var man manifest.Schema2
+ if err := json.Unmarshal(m, &man); err != nil {
+ return errors.Wrap(err, "Error parsing manifest")
+ }
+ if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
+ return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
+ }
+
+ layerPaths := []string{}
+ for _, l := range man.LayersDescriptors {
+ layerPaths = append(layerPaths, l.Digest.String())
+ }
+
+ items := []ManifestItem{{
+ Config: man.ConfigDescriptor.Digest.String(),
+ RepoTags: []string{d.repoTag},
+ Layers: layerPaths,
+ Parent: "",
+ LayerSources: nil,
+ }}
+ itemsBytes, err := json.Marshal(&items)
+ if err != nil {
+ return err
+ }
+
+ // FIXME? Do we also need to support the legacy format?
+ return d.sendFile(manifestFileName, int64(len(itemsBytes)), bytes.NewReader(itemsBytes))
+}
+
+type tarFI struct {
+ path string
+ size int64
+}
+
+func (t *tarFI) Name() string {
+ return t.path
+}
+func (t *tarFI) Size() int64 {
+ return t.size
+}
+func (t *tarFI) Mode() os.FileMode {
+ return 0444
+}
+func (t *tarFI) ModTime() time.Time {
+ return time.Unix(0, 0)
+}
+func (t *tarFI) IsDir() bool {
+ return false
+}
+func (t *tarFI) Sys() interface{} {
+ return nil
+}
+
+// sendFile sends a file into the tar stream.
+func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader) error {
+ hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
+ if err != nil {
+ return nil
+ }
+ logrus.Debugf("Sending as tar file %s", path)
+ if err := d.tar.WriteHeader(hdr); err != nil {
+ return err
+ }
+ size, err := io.Copy(d.tar, stream)
+ if err != nil {
+ return err
+ }
+ if size != expectedSize {
+ return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
+ }
+ return nil
+}
+
+// PutSignatures adds the given signatures to the docker tarfile (currently not
+// supported). MUST be called after PutManifest (signatures reference manifest
+// contents)
+func (d *Destination) PutSignatures(signatures [][]byte) error {
+ if len(signatures) != 0 {
+ return errors.Errorf("Storing signatures for docker tar files is not supported")
+ }
+ return nil
+}
+
+// Commit finishes writing data to the underlying io.Writer.
+// It is the caller's responsibility to close it, if necessary.
+func (d *Destination) Commit() error {
+ return d.tar.Close()
+}
diff --git a/vendor/github.com/containers/image/docker/tarfile/doc.go b/vendor/github.com/containers/image/docker/tarfile/doc.go
new file mode 100644
index 000000000..4ea5369c0
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/tarfile/doc.go
@@ -0,0 +1,3 @@
+// Package tarfile is an internal implementation detail of some transports.
+// Do not use outside of the github.com/containers/image repo!
+package tarfile
diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go
new file mode 100644
index 000000000..34d5ff32a
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/tarfile/src.go
@@ -0,0 +1,360 @@
+package tarfile
+
+import (
+ "archive/tar"
+ "bytes"
+ "context"
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/compression"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+// Source is a partial implementation of types.ImageSource for reading from tarPath.
+type Source struct {
+ tarPath string
+ // The following data is only available after ensureCachedDataIsPresent() succeeds
+ tarManifest *ManifestItem // nil if not available yet.
+ configBytes []byte
+ configDigest digest.Digest
+ orderedDiffIDList []diffID
+ knownLayers map[diffID]*layerInfo
+ // Other state
+ generatedManifest []byte // Private cache for GetManifest(), nil if not set yet.
+}
+
+type layerInfo struct {
+ path string
+ size int64
+}
+
+// NewSource returns a tarfile.Source for the specified path.
+func NewSource(path string) *Source {
+ // TODO: We could add support for multiple images in a single archive, so
+ // that people could use docker-archive:opensuse.tar:opensuse:leap as
+ // the source of an image.
+ return &Source{
+ tarPath: path,
+ }
+}
+
+// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
+type tarReadCloser struct {
+ *tar.Reader
+ backingFile *os.File
+}
+
+func (t *tarReadCloser) Close() error {
+ return t.backingFile.Close()
+}
+
+// openTarComponent returns a ReadCloser for the specific file within the archive.
+// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers),
+// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough.
+// The caller should call .Close() on the returned stream.
+func (s *Source) openTarComponent(componentPath string) (io.ReadCloser, error) {
+ f, err := os.Open(s.tarPath)
+ if err != nil {
+ return nil, err
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ f.Close()
+ }
+ }()
+
+ tarReader, header, err := findTarComponent(f, componentPath)
+ if err != nil {
+ return nil, err
+ }
+ if header == nil {
+ return nil, os.ErrNotExist
+ }
+ if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
+ // We follow only one symlink; so no loops are possible.
+ if _, err := f.Seek(0, os.SEEK_SET); err != nil {
+ return nil, err
+ }
+ // The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
+ // so we don't care.
+ tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname))
+ if err != nil {
+ return nil, err
+ }
+ if header == nil {
+ return nil, os.ErrNotExist
+ }
+ }
+
+ if !header.FileInfo().Mode().IsRegular() {
+ return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
+ }
+ succeeded = true
+ return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
+}
+
+// findTarComponent returns a header and a reader matching path within inputFile,
+// or (nil, nil, nil) if not found.
+func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) {
+ t := tar.NewReader(inputFile)
+ for {
+ h, err := t.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ if h.Name == path {
+ return t, h, nil
+ }
+ }
+ return nil, nil, nil
+}
+
+// readTarComponent returns full contents of componentPath.
+func (s *Source) readTarComponent(path string) ([]byte, error) {
+ file, err := s.openTarComponent(path)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error loading tar component %s", path)
+ }
+ defer file.Close()
+ bytes, err := ioutil.ReadAll(file)
+ if err != nil {
+ return nil, err
+ }
+ return bytes, nil
+}
+
+// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
+func (s *Source) ensureCachedDataIsPresent() error {
+ if s.tarManifest != nil {
+ return nil
+ }
+
+ // Read and parse manifest.json
+ tarManifest, err := s.loadTarManifest()
+ if err != nil {
+ return err
+ }
+
+ // Check to make sure length is 1
+ if len(tarManifest) != 1 {
+ return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest))
+ }
+
+ // Read and parse config.
+ configBytes, err := s.readTarComponent(tarManifest[0].Config)
+ if err != nil {
+ return err
+ }
+ var parsedConfig image // Most fields ommitted, we only care about layer DiffIDs.
+ if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
+ return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
+ }
+
+ knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig)
+ if err != nil {
+ return err
+ }
+
+ // Success; commit.
+ s.tarManifest = &tarManifest[0]
+ s.configBytes = configBytes
+ s.configDigest = digest.FromBytes(configBytes)
+ s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
+ s.knownLayers = knownLayers
+ return nil
+}
+
+// loadTarManifest loads and decodes the manifest.json.
+func (s *Source) loadTarManifest() ([]ManifestItem, error) {
+ // FIXME? Do we need to deal with the legacy format?
+ bytes, err := s.readTarComponent(manifestFileName)
+ if err != nil {
+ return nil, err
+ }
+ var items []ManifestItem
+ if err := json.Unmarshal(bytes, &items); err != nil {
+ return nil, errors.Wrap(err, "Error decoding tar manifest.json")
+ }
+ return items, nil
+}
+
+// LoadTarManifest loads and decodes the manifest.json
+func (s *Source) LoadTarManifest() ([]ManifestItem, error) {
+ return s.loadTarManifest()
+}
+
+func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *image) (map[diffID]*layerInfo, error) {
+ // Collect layer data available in manifest and config.
+ if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
+ return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
+ }
+ knownLayers := map[diffID]*layerInfo{}
+ unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
+ for i, diffID := range parsedConfig.RootFS.DiffIDs {
+ if _, ok := knownLayers[diffID]; ok {
+ // Apparently it really can happen that a single image contains the same layer diff more than once.
+ // In that case, the diffID validation ensures that both layers truly are the same, and it should not matter
+ // which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original.
+ continue
+ }
+ layerPath := tarManifest.Layers[i]
+ if _, ok := unknownLayerSizes[layerPath]; ok {
+ return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
+ }
+ li := &layerInfo{ // A new element in each iteration
+ path: layerPath,
+ size: -1,
+ }
+ knownLayers[diffID] = li
+ unknownLayerSizes[layerPath] = li
+ }
+
+ // Scan the tar file to collect layer sizes.
+ file, err := os.Open(s.tarPath)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ t := tar.NewReader(file)
+ for {
+ h, err := t.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ if li, ok := unknownLayerSizes[h.Name]; ok {
+ li.size = h.Size
+ delete(unknownLayerSizes, h.Name)
+ }
+ }
+ if len(unknownLayerSizes) != 0 {
+ return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
+ }
+
+ return knownLayers, nil
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+func (s *Source) GetManifest() ([]byte, string, error) {
+ if s.generatedManifest == nil {
+ if err := s.ensureCachedDataIsPresent(); err != nil {
+ return nil, "", err
+ }
+ m := manifest.Schema2{
+ SchemaVersion: 2,
+ MediaType: manifest.DockerV2Schema2MediaType,
+ ConfigDescriptor: manifest.Schema2Descriptor{
+ MediaType: manifest.DockerV2Schema2ConfigMediaType,
+ Size: int64(len(s.configBytes)),
+ Digest: s.configDigest,
+ },
+ LayersDescriptors: []manifest.Schema2Descriptor{},
+ }
+ for _, diffID := range s.orderedDiffIDList {
+ li, ok := s.knownLayers[diffID]
+ if !ok {
+ return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
+ }
+ m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{
+ Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball
+ MediaType: manifest.DockerV2Schema2LayerMediaType,
+ Size: li.size,
+ })
+ }
+ manifestBytes, err := json.Marshal(&m)
+ if err != nil {
+ return nil, "", err
+ }
+ s.generatedManifest = manifestBytes
+ }
+ return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil
+}
+
+// GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest
+// out of a manifest list.
+func (s *Source) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
+ // How did we even get here? GetManifest() above has returned a manifest.DockerV2Schema2MediaType.
+ return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
+}
+
+type readCloseWrapper struct {
+ io.Reader
+ closeFunc func() error
+}
+
+func (r readCloseWrapper) Close() error {
+ if r.closeFunc != nil {
+ return r.closeFunc()
+ }
+ return nil
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+func (s *Source) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
+ if err := s.ensureCachedDataIsPresent(); err != nil {
+ return nil, 0, err
+ }
+
+ if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
+ return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
+ }
+
+ if li, ok := s.knownLayers[diffID(info.Digest)]; ok { // diffID is a digest of the uncompressed tarball,
+ stream, err := s.openTarComponent(li.path)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ // In order to handle the fact that digests != diffIDs (and thus that a
+ // caller which is trying to verify the blob will run into problems),
+ // we need to decompress blobs. This is a bit ugly, but it's a
+ // consequence of making everything addressable by their DiffID rather
+ // than by their digest...
+ //
+ // In particular, because the v2s2 manifest being generated uses
+ // DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of
+ // layers not their _actual_ digest. The result is that copy/... will
+ // be verifing a "digest" which is not the actual layer's digest (but
+ // is instead the DiffID).
+
+ decompressFunc, reader, err := compression.DetectCompression(stream)
+ if err != nil {
+ return nil, 0, errors.Wrapf(err, "Detecting compression in blob %s", info.Digest)
+ }
+
+ if decompressFunc != nil {
+ reader, err = decompressFunc(reader)
+ if err != nil {
+ return nil, 0, errors.Wrapf(err, "Decompressing blob %s stream", info.Digest)
+ }
+ }
+
+ newStream := readCloseWrapper{
+ Reader: reader,
+ closeFunc: stream.Close,
+ }
+
+ return newStream, li.size, nil
+ }
+
+ return nil, 0, errors.Errorf("Unknown blob %s", info.Digest)
+}
+
+// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+func (s *Source) GetSignatures(ctx context.Context) ([][]byte, error) {
+ return [][]byte{}, nil
+}
diff --git a/vendor/github.com/containers/image/docker/tarfile/types.go b/vendor/github.com/containers/image/docker/tarfile/types.go
new file mode 100644
index 000000000..4780d66c6
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/tarfile/types.go
@@ -0,0 +1,40 @@
+package tarfile
+
+import (
+ "github.com/containers/image/manifest"
+ "github.com/opencontainers/go-digest"
+)
+
+// Various data structures.
+
+// Based on github.com/docker/docker/image/tarexport/tarexport.go
+const (
+ manifestFileName = "manifest.json"
+ // legacyLayerFileName = "layer.tar"
+ // legacyConfigFileName = "json"
+ // legacyVersionFileName = "VERSION"
+ // legacyRepositoriesFileName = "repositories"
+)
+
+// ManifestItem is an element of the array stored in the top-level manifest.json file.
+type ManifestItem struct {
+ Config string
+ RepoTags []string
+ Layers []string
+ Parent imageID `json:",omitempty"`
+ LayerSources map[diffID]manifest.Schema2Descriptor `json:",omitempty"`
+}
+
+type imageID string
+type diffID digest.Digest
+
+// Based on github.com/docker/docker/image/image.go
+// MOST CONTENT OMITTED AS UNNECESSARY
+type image struct {
+ RootFS *rootFS `json:"rootfs,omitempty"`
+}
+
+type rootFS struct {
+ Type string `json:"type"`
+ DiffIDs []diffID `json:"diff_ids,omitempty"`
+}
diff --git a/vendor/github.com/containers/image/docker/wwwauthenticate.go b/vendor/github.com/containers/image/docker/wwwauthenticate.go
new file mode 100644
index 000000000..23664a74a
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/wwwauthenticate.go
@@ -0,0 +1,159 @@
+package docker
+
+// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies.
+
+import (
+ "net/http"
+ "strings"
+)
+
+// challenge carries information from a WWW-Authenticate response header.
+// See RFC 7235.
+type challenge struct {
+ // Scheme is the auth-scheme according to RFC 7235
+ Scheme string
+
+ // Parameters are the auth-params according to RFC 7235
+ Parameters map[string]string
+}
+
+// Octet types from RFC 7230.
+type octetType byte
+
+var octetTypes [256]octetType
+
+const (
+ isToken octetType = 1 << iota
+ isSpace
+)
+
+func init() {
+ // OCTET = <any 8-bit sequence of data>
+ // CHAR = <any US-ASCII character (octets 0 - 127)>
+ // CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
+ // CR = <US-ASCII CR, carriage return (13)>
+ // LF = <US-ASCII LF, linefeed (10)>
+ // SP = <US-ASCII SP, space (32)>
+ // HT = <US-ASCII HT, horizontal-tab (9)>
+ // <"> = <US-ASCII double-quote mark (34)>
+ // CRLF = CR LF
+ // LWS = [CRLF] 1*( SP | HT )
+ // TEXT = <any OCTET except CTLs, but including LWS>
+ // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+ // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+ // token = 1*<any CHAR except CTLs or separators>
+ // qdtext = <any TEXT except <">>
+
+ for c := 0; c < 256; c++ {
+ var t octetType
+ isCtl := c <= 31 || c == 127
+ isChar := 0 <= c && c <= 127
+ isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
+ if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
+ t |= isSpace
+ }
+ if isChar && !isCtl && !isSeparator {
+ t |= isToken
+ }
+ octetTypes[c] = t
+ }
+}
+
+func parseAuthHeader(header http.Header) []challenge {
+ challenges := []challenge{}
+ for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
+ v, p := parseValueAndParams(h)
+ if v != "" {
+ challenges = append(challenges, challenge{Scheme: v, Parameters: p})
+ }
+ }
+ return challenges
+}
+
+// NOTE: This is not a fully compliant parser per RFC 7235:
+// Most notably it does not support more than one challenge within a single header
+// Some of the whitespace parsing also seems noncompliant.
+// But it is clearly better than what we used to have…
+func parseValueAndParams(header string) (value string, params map[string]string) {
+ params = make(map[string]string)
+ value, s := expectToken(header)
+ if value == "" {
+ return
+ }
+ value = strings.ToLower(value)
+ s = "," + skipSpace(s)
+ for strings.HasPrefix(s, ",") {
+ var pkey string
+ pkey, s = expectToken(skipSpace(s[1:]))
+ if pkey == "" {
+ return
+ }
+ if !strings.HasPrefix(s, "=") {
+ return
+ }
+ var pvalue string
+ pvalue, s = expectTokenOrQuoted(s[1:])
+ if pvalue == "" {
+ return
+ }
+ pkey = strings.ToLower(pkey)
+ params[pkey] = pvalue
+ s = skipSpace(s)
+ }
+ return
+}
+
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isSpace == 0 {
+ break
+ }
+ }
+ return s[i:]
+}
+
+func expectToken(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isToken == 0 {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+func expectTokenOrQuoted(s string) (value string, rest string) {
+ if !strings.HasPrefix(s, "\"") {
+ return expectToken(s)
+ }
+ s = s[1:]
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '"':
+ return s[:i], s[i+1:]
+ case '\\':
+ p := make([]byte, len(s)-1)
+ j := copy(p, s[:i])
+ escape := true
+ for i = i + 1; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case escape:
+ escape = false
+ p[j] = b
+ j++
+ case b == '\\':
+ escape = true
+ case b == '"':
+ return string(p[:j]), s[i+1:]
+ default:
+ p[j] = b
+ j++
+ }
+ }
+ return "", ""
+ }
+ }
+ return "", ""
+}
diff --git a/vendor/github.com/containers/image/image/docker_list.go b/vendor/github.com/containers/image/image/docker_list.go
new file mode 100644
index 000000000..4b152d261
--- /dev/null
+++ b/vendor/github.com/containers/image/image/docker_list.go
@@ -0,0 +1,63 @@
+package image
+
+import (
+ "encoding/json"
+ "runtime"
+
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+type platformSpec struct {
+ Architecture string `json:"architecture"`
+ OS string `json:"os"`
+ OSVersion string `json:"os.version,omitempty"`
+ OSFeatures []string `json:"os.features,omitempty"`
+ Variant string `json:"variant,omitempty"`
+ Features []string `json:"features,omitempty"` // removed in OCI
+}
+
+// A manifestDescriptor references a platform-specific manifest.
+type manifestDescriptor struct {
+ manifest.Schema2Descriptor
+ Platform platformSpec `json:"platform"`
+}
+
+type manifestList struct {
+ SchemaVersion int `json:"schemaVersion"`
+ MediaType string `json:"mediaType"`
+ Manifests []manifestDescriptor `json:"manifests"`
+}
+
+func manifestSchema2FromManifestList(src types.ImageSource, manblob []byte) (genericManifest, error) {
+ list := manifestList{}
+ if err := json.Unmarshal(manblob, &list); err != nil {
+ return nil, err
+ }
+ var targetManifestDigest digest.Digest
+ for _, d := range list.Manifests {
+ if d.Platform.Architecture == runtime.GOARCH && d.Platform.OS == runtime.GOOS {
+ targetManifestDigest = d.Digest
+ break
+ }
+ }
+ if targetManifestDigest == "" {
+ return nil, errors.New("no supported platform found in manifest list")
+ }
+ manblob, mt, err := src.GetTargetManifest(targetManifestDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
+ if err != nil {
+ return nil, errors.Wrap(err, "Error computing manifest digest")
+ }
+ if !matches {
+ return nil, errors.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest)
+ }
+
+ return manifestInstanceFromBlob(src, manblob, mt)
+}
diff --git a/vendor/github.com/containers/image/image/docker_schema1.go b/vendor/github.com/containers/image/image/docker_schema1.go
new file mode 100644
index 000000000..86e30b3e0
--- /dev/null
+++ b/vendor/github.com/containers/image/image/docker_schema1.go
@@ -0,0 +1,245 @@
+package image
+
+import (
+ "encoding/json"
+ "strings"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+type manifestSchema1 struct {
+ m *manifest.Schema1
+}
+
+func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) {
+ m, err := manifest.Schema1FromManifest(manifestBlob)
+ if err != nil {
+ return nil, err
+ }
+ return &manifestSchema1{m: m}, nil
+}
+
+// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data.
+func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) genericManifest {
+ return &manifestSchema1{m: manifest.Schema1FromComponents(ref, fsLayers, history, architecture)}
+}
+
+func (m *manifestSchema1) serialize() ([]byte, error) {
+ return m.m.Serialize()
+}
+
+func (m *manifestSchema1) manifestMIMEType() string {
+ return manifest.DockerV2Schema1SignedMediaType
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+func (m *manifestSchema1) ConfigInfo() types.BlobInfo {
+ return m.m.ConfigInfo()
+}
+
+// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
+// The result is cached; it is OK to call this however often you need.
+func (m *manifestSchema1) ConfigBlob() ([]byte, error) {
+ return nil, nil
+}
+
+// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+// layers in the resulting configuration isn't guaranteed to be returned to due how
+// old image manifests work (docker v2s1 especially).
+func (m *manifestSchema1) OCIConfig() (*imgspecv1.Image, error) {
+ v2s2, err := m.convertToManifestSchema2(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ return v2s2.OCIConfig()
+}
+
+// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *manifestSchema1) LayerInfos() []types.BlobInfo {
+ return m.m.LayerInfos()
+}
+
+// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+// It returns false if the manifest does not embed a Docker reference.
+// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
+ // This is a bit convoluted: We can’t just have a "get embedded docker reference" method
+ // and have the “does it conflict” logic in the generic copy code, because the manifest does not actually
+ // embed a full docker/distribution reference, but only the repo name and tag (without the host name).
+ // So we would have to provide a “return repo without host name, and tag” getter for the generic code,
+ // which would be very awkward. Instead, we do the matching here in schema1-specific code, and all the
+ // generic copy code needs to know about is reference.Named and that a manifest may need updating
+ // for some destinations.
+ name := reference.Path(ref)
+ var tag string
+ if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
+ tag = tagged.Tag()
+ } else {
+ tag = ""
+ }
+ return m.m.Name != name || m.m.Tag != tag
+}
+
+func (m *manifestSchema1) imageInspectInfo() (*types.ImageInspectInfo, error) {
+ v1 := &v1Image{}
+ if err := json.Unmarshal([]byte(m.m.History[0].V1Compatibility), v1); err != nil {
+ return nil, err
+ }
+ i := &types.ImageInspectInfo{
+ Tag: m.m.Tag,
+ DockerVersion: v1.DockerVersion,
+ Created: v1.Created,
+ Architecture: v1.Architecture,
+ Os: v1.OS,
+ }
+ if v1.Config != nil {
+ i.Labels = v1.Config.Labels
+ }
+ return i, nil
+}
+
+// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+// (most importantly it forces us to download the full layers even if they are already present at the destination).
+func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
+ return options.ManifestMIMEType == manifest.DockerV2Schema2MediaType
+}
+
+// UpdatedImage returns a types.Image modified according to options.
+// This does not change the state of the original Image object.
+func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
+ copy := manifestSchema1{m: manifest.Schema1Clone(m.m)}
+ if options.LayerInfos != nil {
+ if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
+ return nil, err
+ }
+ }
+ if options.EmbeddedDockerReference != nil {
+ copy.m.Name = reference.Path(options.EmbeddedDockerReference)
+ if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged {
+ copy.m.Tag = tagged.Tag()
+ } else {
+ copy.m.Tag = ""
+ }
+ }
+
+ switch options.ManifestMIMEType {
+ case "": // No conversion, OK
+ case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
+ // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so,
+ // handle conversions between them by doing nothing.
+ case manifest.DockerV2Schema2MediaType:
+ return copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs)
+ default:
+ return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType)
+ }
+
+ return memoryImageFromManifest(&copy), nil
+}
+
+// Based on github.com/docker/docker/distribution/pull_v2.go
+func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (types.Image, error) {
+ if len(m.m.History) == 0 {
+ // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
+ return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
+ }
+ if len(m.m.History) != len(m.m.FSLayers) {
+ return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.History), len(m.m.FSLayers))
+ }
+ if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) {
+ return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers))
+ }
+ if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) {
+ return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers))
+ }
+
+ rootFS := rootFS{
+ Type: "layers",
+ DiffIDs: []digest.Digest{},
+ BaseLayer: "",
+ }
+ var layers []manifest.Schema2Descriptor
+ history := make([]imageHistory, len(m.m.History))
+ for v1Index := len(m.m.History) - 1; v1Index >= 0; v1Index-- {
+ v2Index := (len(m.m.History) - 1) - v1Index
+
+ var v1compat manifest.Schema1V1Compatibility
+ if err := json.Unmarshal([]byte(m.m.History[v1Index].V1Compatibility), &v1compat); err != nil {
+ return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index)
+ }
+ history[v2Index] = imageHistory{
+ Created: v1compat.Created,
+ Author: v1compat.Author,
+ CreatedBy: strings.Join(v1compat.ContainerConfig.Cmd, " "),
+ Comment: v1compat.Comment,
+ EmptyLayer: v1compat.ThrowAway,
+ }
+
+ if !v1compat.ThrowAway {
+ var size int64
+ if uploadedLayerInfos != nil {
+ size = uploadedLayerInfos[v2Index].Size
+ }
+ var d digest.Digest
+ if layerDiffIDs != nil {
+ d = layerDiffIDs[v2Index]
+ }
+ layers = append(layers, manifest.Schema2Descriptor{
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Size: size,
+ Digest: m.m.FSLayers[v1Index].BlobSum,
+ })
+ rootFS.DiffIDs = append(rootFS.DiffIDs, d)
+ }
+ }
+ configJSON, err := configJSONFromV1Config([]byte(m.m.History[0].V1Compatibility), rootFS, history)
+ if err != nil {
+ return nil, err
+ }
+ configDescriptor := manifest.Schema2Descriptor{
+ MediaType: "application/vnd.docker.container.image.v1+json",
+ Size: int64(len(configJSON)),
+ Digest: digest.FromBytes(configJSON),
+ }
+
+ m2 := manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers)
+ return memoryImageFromManifest(m2), nil
+}
+
+func configJSONFromV1Config(v1ConfigJSON []byte, rootFS rootFS, history []imageHistory) ([]byte, error) {
+ // github.com/docker/docker/image/v1/imagev1.go:MakeConfigFromV1Config unmarshals and re-marshals the input if docker_version is < 1.8.3 to remove blank fields;
+ // we don't do that here. FIXME? Should we? AFAICT it would only affect the digest value of the schema2 manifest, and we don't particularly need that to be
+ // a consistently reproducible value.
+
+ // Preserve everything we don't specifically know about.
+ // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.)
+ rawContents := map[string]*json.RawMessage{}
+ if err := json.Unmarshal(v1ConfigJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?!
+ return nil, err
+ }
+
+ delete(rawContents, "id")
+ delete(rawContents, "parent")
+ delete(rawContents, "Size")
+ delete(rawContents, "parent_id")
+ delete(rawContents, "layer_id")
+ delete(rawContents, "throwaway")
+
+ updates := map[string]interface{}{"rootfs": rootFS, "history": history}
+ for field, value := range updates {
+ encoded, err := json.Marshal(value)
+ if err != nil {
+ return nil, err
+ }
+ rawContents[field] = (*json.RawMessage)(&encoded)
+ }
+ return json.Marshal(rawContents)
+}
diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go
new file mode 100644
index 000000000..7ccd061c0
--- /dev/null
+++ b/vendor/github.com/containers/image/image/docker_schema2.go
@@ -0,0 +1,353 @@
+package image
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "io/ioutil"
+ "strings"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// gzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
+// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is
+// a non-zero embedded timestamp; we could zero that, but that would just waste storage space
+// in registries, so let’s use the same values.
+var gzippedEmptyLayer = []byte{
+ 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
+ 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
+}
+
+// gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer
+const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
+
+type manifestSchema2 struct {
+ src types.ImageSource // May be nil if configBlob is not nil
+ configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
+ m *manifest.Schema2
+}
+
+func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
+ m, err := manifest.Schema2FromManifest(manifestBlob)
+ if err != nil {
+ return nil, err
+ }
+ return &manifestSchema2{
+ src: src,
+ m: m,
+ }, nil
+}
+
+// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data:
+func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) genericManifest {
+ return &manifestSchema2{
+ src: src,
+ configBlob: configBlob,
+ m: manifest.Schema2FromComponents(config, layers),
+ }
+}
+
+func (m *manifestSchema2) serialize() ([]byte, error) {
+ return m.m.Serialize()
+}
+
+func (m *manifestSchema2) manifestMIMEType() string {
+ return m.m.MediaType
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
+ return m.m.ConfigInfo()
+}
+
+// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+// layers in the resulting configuration isn't guaranteed to be returned to due how
+// old image manifests work (docker v2s1 especially).
+func (m *manifestSchema2) OCIConfig() (*imgspecv1.Image, error) {
+ configBlob, err := m.ConfigBlob()
+ if err != nil {
+ return nil, err
+ }
+ // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields
+ // than OCI v1. This unmarshal makes sure we drop docker v2s2
+ // fields that aren't needed in OCI v1.
+ configOCI := &imgspecv1.Image{}
+ if err := json.Unmarshal(configBlob, configOCI); err != nil {
+ return nil, err
+ }
+ return configOCI, nil
+}
+
+// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
+// The result is cached; it is OK to call this however often you need.
+func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
+ if m.configBlob == nil {
+ if m.src == nil {
+ return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
+ }
+ stream, _, err := m.src.GetBlob(types.BlobInfo{
+ Digest: m.m.ConfigDescriptor.Digest,
+ Size: m.m.ConfigDescriptor.Size,
+ URLs: m.m.ConfigDescriptor.URLs,
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer stream.Close()
+ blob, err := ioutil.ReadAll(stream)
+ if err != nil {
+ return nil, err
+ }
+ computedDigest := digest.FromBytes(blob)
+ if computedDigest != m.m.ConfigDescriptor.Digest {
+ return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
+ }
+ m.configBlob = blob
+ }
+ return m.configBlob, nil
+}
+
+// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
+ return m.m.LayerInfos()
+}
+
+// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+// It returns false if the manifest does not embed a Docker reference.
+// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
+ return false
+}
+
+func (m *manifestSchema2) imageInspectInfo() (*types.ImageInspectInfo, error) {
+ config, err := m.ConfigBlob()
+ if err != nil {
+ return nil, err
+ }
+ v1 := &v1Image{}
+ if err := json.Unmarshal(config, v1); err != nil {
+ return nil, err
+ }
+ i := &types.ImageInspectInfo{
+ DockerVersion: v1.DockerVersion,
+ Created: v1.Created,
+ Architecture: v1.Architecture,
+ Os: v1.OS,
+ }
+ if v1.Config != nil {
+ i.Labels = v1.Config.Labels
+ }
+ return i, nil
+}
+
+// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+// (most importantly it forces us to download the full layers even if they are already present at the destination).
+func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
+ return false
+}
+
+// UpdatedImage returns a types.Image modified according to options.
+// This does not change the state of the original Image object.
+func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
+ copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
+ src: m.src,
+ configBlob: m.configBlob,
+ m: manifest.Schema2Clone(m.m),
+ }
+ if options.LayerInfos != nil {
+ if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
+ return nil, err
+ }
+ }
+ // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
+
+ switch options.ManifestMIMEType {
+ case "": // No conversion, OK
+ case manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType:
+ return copy.convertToManifestSchema1(options.InformationOnly.Destination)
+ case imgspecv1.MediaTypeImageManifest:
+ return copy.convertToManifestOCI1()
+ default:
+ return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema2MediaType, options.ManifestMIMEType)
+ }
+
+ return memoryImageFromManifest(&copy), nil
+}
+
+func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor {
+ return imgspecv1.Descriptor{
+ MediaType: d.MediaType,
+ Size: d.Size,
+ Digest: d.Digest,
+ URLs: d.URLs,
+ }
+}
+
+func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) {
+ configOCI, err := m.OCIConfig()
+ if err != nil {
+ return nil, err
+ }
+ configOCIBytes, err := json.Marshal(configOCI)
+ if err != nil {
+ return nil, err
+ }
+
+ config := imgspecv1.Descriptor{
+ MediaType: imgspecv1.MediaTypeImageConfig,
+ Size: int64(len(configOCIBytes)),
+ Digest: digest.FromBytes(configOCIBytes),
+ }
+
+ layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors))
+ for idx := range layers {
+ layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx])
+ if m.m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType {
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
+ } else {
+ // we assume layers are gzip'ed because docker v2s2 only deals with
+ // gzip'ed layers. However, OCI has non-gzip'ed layers as well.
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip
+ }
+ }
+
+ m1 := manifestOCI1FromComponents(config, m.src, configOCIBytes, layers)
+ return memoryImageFromManifest(m1), nil
+}
+
+// Based on docker/distribution/manifest/schema1/config_builder.go
+func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) (types.Image, error) {
+ configBytes, err := m.ConfigBlob()
+ if err != nil {
+ return nil, err
+ }
+ imageConfig := &image{}
+ if err := json.Unmarshal(configBytes, imageConfig); err != nil {
+ return nil, err
+ }
+
+ // Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
+ fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History))
+ history := make([]manifest.Schema1History, len(imageConfig.History))
+ nonemptyLayerIndex := 0
+ var parentV1ID string // Set in the loop
+ v1ID := ""
+ haveGzippedEmptyLayer := false
+ if len(imageConfig.History) == 0 {
+ // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
+ return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
+ }
+ for v2Index, historyEntry := range imageConfig.History {
+ parentV1ID = v1ID
+ v1Index := len(imageConfig.History) - 1 - v2Index
+
+ var blobDigest digest.Digest
+ if historyEntry.EmptyLayer {
+ if !haveGzippedEmptyLayer {
+ logrus.Debugf("Uploading empty layer during conversion to schema 1")
+ info, err := dest.PutBlob(bytes.NewReader(gzippedEmptyLayer), types.BlobInfo{Digest: gzippedEmptyLayerDigest, Size: int64(len(gzippedEmptyLayer))})
+ if err != nil {
+ return nil, errors.Wrap(err, "Error uploading empty layer")
+ }
+ if info.Digest != gzippedEmptyLayerDigest {
+ return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, gzippedEmptyLayerDigest)
+ }
+ haveGzippedEmptyLayer = true
+ }
+ blobDigest = gzippedEmptyLayerDigest
+ } else {
+ if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
+ return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
+ }
+ blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest
+ nonemptyLayerIndex++
+ }
+
+ // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency.
+ v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID)
+ if err != nil {
+ return nil, err
+ }
+ v1ID = v
+
+ fakeImage := manifest.Schema1V1Compatibility{
+ ID: v1ID,
+ Parent: parentV1ID,
+ Comment: historyEntry.Comment,
+ Created: historyEntry.Created,
+ Author: historyEntry.Author,
+ ThrowAway: historyEntry.EmptyLayer,
+ }
+ fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy}
+ v1CompatibilityBytes, err := json.Marshal(&fakeImage)
+ if err != nil {
+ return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
+ }
+
+ fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest}
+ history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)}
+ // Note that parentV1ID of the top layer is preserved when exiting this loop
+ }
+
+ // Now patch in real configuration for the top layer (v1Index == 0)
+ v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency.
+ if err != nil {
+ return nil, err
+ }
+ v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer)
+ if err != nil {
+ return nil, err
+ }
+ history[0].V1Compatibility = string(v1Config)
+
+ m1 := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
+ return memoryImageFromManifest(m1), nil
+}
+
+func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) {
+ if err := blobDigest.Validate(); err != nil {
+ return "", err
+ }
+ parts := append([]string{blobDigest.Hex()}, others...)
+ v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
+ return hex.EncodeToString(v1IDHash[:]), nil
+}
+
+func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
+ // Preserve everything we don't specifically know about.
+ // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.)
+ rawContents := map[string]*json.RawMessage{}
+ if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?!
+ return nil, err
+ }
+ delete(rawContents, "rootfs")
+ delete(rawContents, "history")
+
+ updates := map[string]interface{}{"id": v1ID}
+ if parentV1ID != "" {
+ updates["parent"] = parentV1ID
+ }
+ if throwaway {
+ updates["throwaway"] = throwaway
+ }
+ for field, value := range updates {
+ encoded, err := json.Marshal(value)
+ if err != nil {
+ return nil, err
+ }
+ rawContents[field] = (*json.RawMessage)(&encoded)
+ }
+ return json.Marshal(rawContents)
+}
diff --git a/vendor/github.com/containers/image/image/manifest.go b/vendor/github.com/containers/image/image/manifest.go
new file mode 100644
index 000000000..4a79ac277
--- /dev/null
+++ b/vendor/github.com/containers/image/image/manifest.go
@@ -0,0 +1,118 @@
+package image
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/strslice"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+type config struct {
+ Cmd strslice.StrSlice
+ Labels map[string]string
+}
+
+type v1Image struct {
+ ID string `json:"id,omitempty"`
+ Parent string `json:"parent,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ Created time.Time `json:"created"`
+ ContainerConfig *config `json:"container_config,omitempty"`
+ DockerVersion string `json:"docker_version,omitempty"`
+ Author string `json:"author,omitempty"`
+ // Config is the configuration of the container received from the client
+ Config *config `json:"config,omitempty"`
+ // Architecture is the hardware that the image is build and runs on
+ Architecture string `json:"architecture,omitempty"`
+ // OS is the operating system used to build and run the image
+ OS string `json:"os,omitempty"`
+}
+
+type image struct {
+ v1Image
+ History []imageHistory `json:"history,omitempty"`
+ RootFS *rootFS `json:"rootfs,omitempty"`
+}
+
+type imageHistory struct {
+ Created time.Time `json:"created"`
+ Author string `json:"author,omitempty"`
+ CreatedBy string `json:"created_by,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ EmptyLayer bool `json:"empty_layer,omitempty"`
+}
+
+type rootFS struct {
+ Type string `json:"type"`
+ DiffIDs []digest.Digest `json:"diff_ids,omitempty"`
+ BaseLayer string `json:"base_layer,omitempty"`
+}
+
+// genericManifest is an interface for parsing, modifying image manifests and related data.
+// Note that the public methods are intended to be a subset of types.Image
+// so that embedding a genericManifest into structs works.
+// will support v1 one day...
+type genericManifest interface {
+ serialize() ([]byte, error)
+ manifestMIMEType() string
+ // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+ // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+ ConfigInfo() types.BlobInfo
+ // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
+ // The result is cached; it is OK to call this however often you need.
+ ConfigBlob() ([]byte, error)
+ // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+ // layers in the resulting configuration isn't guaranteed to be returned to due how
+ // old image manifests work (docker v2s1 especially).
+ OCIConfig() (*imgspecv1.Image, error)
+ // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+ // The Digest field is guaranteed to be provided; Size may be -1.
+ // WARNING: The list may contain duplicates, and they are semantically relevant.
+ LayerInfos() []types.BlobInfo
+ // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+ // It returns false if the manifest does not embed a Docker reference.
+ // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+ EmbeddedDockerReferenceConflicts(ref reference.Named) bool
+ imageInspectInfo() (*types.ImageInspectInfo, error) // To be called by inspectManifest
+ // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+ // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+ // (most importantly it forces us to download the full layers even if they are already present at the destination).
+ UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool
+ // UpdatedImage returns a types.Image modified according to options.
+ // This does not change the state of the original Image object.
+ UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error)
+}
+
+func manifestInstanceFromBlob(src types.ImageSource, manblob []byte, mt string) (genericManifest, error) {
+ switch manifest.NormalizedMIMEType(mt) {
+ case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
+ return manifestSchema1FromManifest(manblob)
+ case imgspecv1.MediaTypeImageManifest:
+ return manifestOCI1FromManifest(src, manblob)
+ case manifest.DockerV2Schema2MediaType:
+ return manifestSchema2FromManifest(src, manblob)
+ case manifest.DockerV2ListMediaType:
+ return manifestSchema2FromManifestList(src, manblob)
+ default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values.
+ return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
+ }
+}
+
+// inspectManifest is an implementation of types.Image.Inspect
+func inspectManifest(m genericManifest) (*types.ImageInspectInfo, error) {
+ info, err := m.imageInspectInfo()
+ if err != nil {
+ return nil, err
+ }
+ layers := m.LayerInfos()
+ info.Layers = make([]string, len(layers))
+ for i, layer := range layers {
+ info.Layers[i] = layer.Digest.String()
+ }
+ return info, nil
+}
diff --git a/vendor/github.com/containers/image/image/memory.go b/vendor/github.com/containers/image/image/memory.go
new file mode 100644
index 000000000..1e8bb3a4a
--- /dev/null
+++ b/vendor/github.com/containers/image/image/memory.go
@@ -0,0 +1,80 @@
+package image
+
+import (
+ "context"
+
+ "github.com/pkg/errors"
+
+ "github.com/containers/image/types"
+)
+
+// memoryImage is a mostly-implementation of types.Image assembled from data
+// created in memory, used primarily as a return value of types.Image.UpdatedImage
+// as a way to carry various structured information in a type-safe and easy-to-use way.
+// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone
+// collection of all related information, e.g. there is no way to get layer blobs
+// from a memoryImage.
+type memoryImage struct {
+ genericManifest
+ serializedManifest []byte // A private cache for Manifest()
+}
+
+func memoryImageFromManifest(m genericManifest) types.Image {
+ return &memoryImage{
+ genericManifest: m,
+ serializedManifest: nil,
+ }
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (i *memoryImage) Reference() types.ImageReference {
+ // It would really be inappropriate to return the ImageReference of the image this was based on.
+ return nil
+}
+
+// Close removes resources associated with an initialized UnparsedImage, if any.
+func (i *memoryImage) Close() error {
+ return nil
+}
+
+// Size returns the size of the image as stored, if known, or -1 if not.
+func (i *memoryImage) Size() (int64, error) {
+ return -1, nil
+}
+
+// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
+func (i *memoryImage) Manifest() ([]byte, string, error) {
+ if i.serializedManifest == nil {
+ m, err := i.genericManifest.serialize()
+ if err != nil {
+ return nil, "", err
+ }
+ i.serializedManifest = m
+ }
+ return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil
+}
+
+// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
+func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) {
+ // Modifying an image invalidates signatures; a caller asking the updated image for signatures
+ // is probably confused.
+ return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory")
+}
+
+// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+func (i *memoryImage) Inspect() (*types.ImageInspectInfo, error) {
+ return inspectManifest(i.genericManifest)
+}
+
+// IsMultiImage returns true if the image's manifest is a list of images, false otherwise.
+func (i *memoryImage) IsMultiImage() bool {
+ return false
+}
+
+// UpdatedLayerInfos returns an updated set of layer blob information which may not match the manifest.
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (i *memoryImage) UpdatedLayerInfos() []types.BlobInfo {
+ return i.LayerInfos()
+}
diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go
new file mode 100644
index 000000000..77ddedae8
--- /dev/null
+++ b/vendor/github.com/containers/image/image/oci.go
@@ -0,0 +1,195 @@
+package image
+
+import (
+ "encoding/json"
+ "io/ioutil"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+type manifestOCI1 struct {
+ src types.ImageSource // May be nil if configBlob is not nil
+ configBlob []byte // If set, corresponds to contents of m.Config.
+ m *manifest.OCI1
+}
+
+func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
+ m, err := manifest.OCI1FromManifest(manifestBlob)
+ if err != nil {
+ return nil, err
+ }
+ return &manifestOCI1{
+ src: src,
+ m: m,
+ }, nil
+}
+
+// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data:
+func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest {
+ return &manifestOCI1{
+ src: src,
+ configBlob: configBlob,
+ m: manifest.OCI1FromComponents(config, layers),
+ }
+}
+
+func (m *manifestOCI1) serialize() ([]byte, error) {
+ return m.m.Serialize()
+}
+
+func (m *manifestOCI1) manifestMIMEType() string {
+ return imgspecv1.MediaTypeImageManifest
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+func (m *manifestOCI1) ConfigInfo() types.BlobInfo {
+ return m.m.ConfigInfo()
+}
+
+// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
+// The result is cached; it is OK to call this however often you need.
+func (m *manifestOCI1) ConfigBlob() ([]byte, error) {
+ if m.configBlob == nil {
+ if m.src == nil {
+ return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
+ }
+ stream, _, err := m.src.GetBlob(types.BlobInfo{
+ Digest: m.m.Config.Digest,
+ Size: m.m.Config.Size,
+ URLs: m.m.Config.URLs,
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer stream.Close()
+ blob, err := ioutil.ReadAll(stream)
+ if err != nil {
+ return nil, err
+ }
+ computedDigest := digest.FromBytes(blob)
+ if computedDigest != m.m.Config.Digest {
+ return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest)
+ }
+ m.configBlob = blob
+ }
+ return m.configBlob, nil
+}
+
+// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+// layers in the resulting configuration isn't guaranteed to be returned to due how
+// old image manifests work (docker v2s1 especially).
+func (m *manifestOCI1) OCIConfig() (*imgspecv1.Image, error) {
+ cb, err := m.ConfigBlob()
+ if err != nil {
+ return nil, err
+ }
+ configOCI := &imgspecv1.Image{}
+ if err := json.Unmarshal(cb, configOCI); err != nil {
+ return nil, err
+ }
+ return configOCI, nil
+}
+
+// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *manifestOCI1) LayerInfos() []types.BlobInfo {
+ return m.m.LayerInfos()
+}
+
+// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+// It returns false if the manifest does not embed a Docker reference.
+// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
+ return false
+}
+
+func (m *manifestOCI1) imageInspectInfo() (*types.ImageInspectInfo, error) {
+ config, err := m.ConfigBlob()
+ if err != nil {
+ return nil, err
+ }
+ v1 := &v1Image{}
+ if err := json.Unmarshal(config, v1); err != nil {
+ return nil, err
+ }
+ i := &types.ImageInspectInfo{
+ DockerVersion: v1.DockerVersion,
+ Created: v1.Created,
+ Architecture: v1.Architecture,
+ Os: v1.OS,
+ }
+ if v1.Config != nil {
+ i.Labels = v1.Config.Labels
+ }
+ return i, nil
+}
+
+// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+// (most importantly it forces us to download the full layers even if they are already present at the destination).
+func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
+ return false
+}
+
+// UpdatedImage returns a types.Image modified according to options.
+// This does not change the state of the original Image object.
+func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
+ copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc.
+ src: m.src,
+ configBlob: m.configBlob,
+ m: manifest.OCI1Clone(m.m),
+ }
+ if options.LayerInfos != nil {
+ if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
+ return nil, err
+ }
+ }
+ // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care.
+
+ switch options.ManifestMIMEType {
+ case "": // No conversion, OK
+ case manifest.DockerV2Schema2MediaType:
+ return copy.convertToManifestSchema2()
+ default:
+ return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", imgspecv1.MediaTypeImageManifest, options.ManifestMIMEType)
+ }
+
+ return memoryImageFromManifest(&copy), nil
+}
+
+func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor {
+ return manifest.Schema2Descriptor{
+ MediaType: d.MediaType,
+ Size: d.Size,
+ Digest: d.Digest,
+ URLs: d.URLs,
+ }
+}
+
+func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) {
+ // Create a copy of the descriptor.
+ config := schema2DescriptorFromOCI1Descriptor(m.m.Config)
+
+ // The only difference between OCI and DockerSchema2 is the mediatypes. The
+ // media type of the manifest is handled by manifestSchema2FromComponents.
+ config.MediaType = manifest.DockerV2Schema2ConfigMediaType
+
+ layers := make([]manifest.Schema2Descriptor, len(m.m.Layers))
+ for idx := range layers {
+ layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx])
+ layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType
+ }
+
+ // Rather than copying the ConfigBlob now, we just pass m.src to the
+ // translated manifest, since the only difference is the mediatype of
+ // descriptors there is no change to any blob stored in m.src.
+ m1 := manifestSchema2FromComponents(config, m.src, nil, layers)
+ return memoryImageFromManifest(m1), nil
+}
diff --git a/vendor/github.com/containers/image/image/sourced.go b/vendor/github.com/containers/image/image/sourced.go
new file mode 100644
index 000000000..1293f7d30
--- /dev/null
+++ b/vendor/github.com/containers/image/image/sourced.go
@@ -0,0 +1,94 @@
+// Package image consolidates knowledge about various container image formats
+// (as opposed to image storage mechanisms, which are handled by types.ImageSource)
+// and exposes all of them using an unified interface.
+package image
+
+import (
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+)
+
+// FromSource returns a types.Image implementation for source.
+// The caller must call .Close() on the returned Image.
+//
+// FromSource “takes ownership” of the input ImageSource and will call src.Close()
+// when the image is closed. (This does not prevent callers from using both the
+// Image and ImageSource objects simultaneously, but it means that they only need to
+// the Image.)
+//
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
+func FromSource(src types.ImageSource) (types.Image, error) {
+ return FromUnparsedImage(UnparsedFromSource(src))
+}
+
+// sourcedImage is a general set of utilities for working with container images,
+// whatever is their underlying location (i.e. dockerImageSource-independent).
+// Note the existence of skopeo/docker.Image: some instances of a `types.Image`
+// may not be a `sourcedImage` directly. However, most users of `types.Image`
+// do not care, and those who care about `skopeo/docker.Image` know they do.
+type sourcedImage struct {
+ *UnparsedImage
+ manifestBlob []byte
+ manifestMIMEType string
+ // genericManifest contains data corresponding to manifestBlob.
+ // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest
+ // if you want to preserve the original manifest; use manifestBlob directly.
+ genericManifest
+}
+
+// FromUnparsedImage returns a types.Image implementation for unparsed.
+// The caller must call .Close() on the returned Image.
+//
+// FromSource “takes ownership” of the input UnparsedImage and will call uparsed.Close()
+// when the image is closed. (This does not prevent callers from using both the
+// UnparsedImage and ImageSource objects simultaneously, but it means that they only need to
+// keep a reference to the Image.)
+func FromUnparsedImage(unparsed *UnparsedImage) (types.Image, error) {
+ // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage:
+ // we want to be able to use unparsed.src. We could make that an explicit interface, but, well,
+ // this is the only UnparsedImage implementation around, anyway.
+
+ // Also, we do not explicitly implement types.Image.Close; we let the implementation fall through to
+ // unparsed.Close.
+
+ // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest().
+ manifestBlob, manifestMIMEType, err := unparsed.Manifest()
+ if err != nil {
+ return nil, err
+ }
+
+ parsedManifest, err := manifestInstanceFromBlob(unparsed.src, manifestBlob, manifestMIMEType)
+ if err != nil {
+ return nil, err
+ }
+
+ return &sourcedImage{
+ UnparsedImage: unparsed,
+ manifestBlob: manifestBlob,
+ manifestMIMEType: manifestMIMEType,
+ genericManifest: parsedManifest,
+ }, nil
+}
+
+// Size returns the size of the image as stored, if it's known, or -1 if it isn't.
+func (i *sourcedImage) Size() (int64, error) {
+ return -1, nil
+}
+
+// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched.
+func (i *sourcedImage) Manifest() ([]byte, string, error) {
+ return i.manifestBlob, i.manifestMIMEType, nil
+}
+
+func (i *sourcedImage) Inspect() (*types.ImageInspectInfo, error) {
+ return inspectManifest(i.genericManifest)
+}
+
+func (i *sourcedImage) IsMultiImage() bool {
+ return i.manifestMIMEType == manifest.DockerV2ListMediaType
+}
+
+func (i *sourcedImage) UpdatedLayerInfos() []types.BlobInfo {
+ return i.UnparsedImage.UpdatedLayerInfos()
+}
diff --git a/vendor/github.com/containers/image/image/unparsed.go b/vendor/github.com/containers/image/image/unparsed.go
new file mode 100644
index 000000000..7bcac06e0
--- /dev/null
+++ b/vendor/github.com/containers/image/image/unparsed.go
@@ -0,0 +1,92 @@
+package image
+
+import (
+ "context"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+// UnparsedImage implements types.UnparsedImage .
+type UnparsedImage struct {
+ src types.ImageSource
+ cachedManifest []byte // A private cache for Manifest(); nil if not yet known.
+ // A private cache for Manifest(), may be the empty string if guessing failed.
+ // Valid iff cachedManifest is not nil.
+ cachedManifestMIMEType string
+ cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known.
+}
+
+// UnparsedFromSource returns a types.UnparsedImage implementation for source.
+// The caller must call .Close() on the returned UnparsedImage.
+//
+// UnparsedFromSource “takes ownership” of the input ImageSource and will call src.Close()
+// when the image is closed. (This does not prevent callers from using both the
+// UnparsedImage and ImageSource objects simultaneously, but it means that they only need to
+// keep a reference to the UnparsedImage.)
+func UnparsedFromSource(src types.ImageSource) *UnparsedImage {
+ return &UnparsedImage{src: src}
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (i *UnparsedImage) Reference() types.ImageReference {
+ return i.src.Reference()
+}
+
+// Close removes resources associated with an initialized UnparsedImage, if any.
+func (i *UnparsedImage) Close() error {
+ return i.src.Close()
+}
+
+// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
+func (i *UnparsedImage) Manifest() ([]byte, string, error) {
+ if i.cachedManifest == nil {
+ m, mt, err := i.src.GetManifest()
+ if err != nil {
+ return nil, "", err
+ }
+
+ // ImageSource.GetManifest does not do digest verification, but we do;
+ // this immediately protects also any user of types.Image.
+ ref := i.Reference().DockerReference()
+ if ref != nil {
+ if canonical, ok := ref.(reference.Canonical); ok {
+ digest := digest.Digest(canonical.Digest())
+ matches, err := manifest.MatchesDigest(m, digest)
+ if err != nil {
+ return nil, "", errors.Wrap(err, "Error computing manifest digest")
+ }
+ if !matches {
+ return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest)
+ }
+ }
+ }
+
+ i.cachedManifest = m
+ i.cachedManifestMIMEType = mt
+ }
+ return i.cachedManifest, i.cachedManifestMIMEType, nil
+}
+
+// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
+func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
+ if i.cachedSignatures == nil {
+ sigs, err := i.src.GetSignatures(ctx)
+ if err != nil {
+ return nil, err
+ }
+ i.cachedSignatures = sigs
+ }
+ return i.cachedSignatures, nil
+}
+
+// UpdatedLayerInfos returns an updated set of layer blob information which may not match the manifest.
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (i *UnparsedImage) UpdatedLayerInfos() []types.BlobInfo {
+ return i.src.UpdatedLayerInfos()
+}
diff --git a/vendor/github.com/containers/image/manifest/docker_schema1.go b/vendor/github.com/containers/image/manifest/docker_schema1.go
new file mode 100644
index 000000000..f4ce73207
--- /dev/null
+++ b/vendor/github.com/containers/image/manifest/docker_schema1.go
@@ -0,0 +1,212 @@
+package manifest
+
+import (
+ "encoding/json"
+ "regexp"
+ "time"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1.
+type Schema1FSLayers struct {
+ BlobSum digest.Digest `json:"blobSum"`
+}
+
+// Schema1History is an entry of the "history" array in docker/distribution schema 1.
+type Schema1History struct {
+ V1Compatibility string `json:"v1Compatibility"`
+}
+
+// Schema1 is a manifest in docker/distribution schema 1.
+type Schema1 struct {
+ Name string `json:"name"`
+ Tag string `json:"tag"`
+ Architecture string `json:"architecture"`
+ FSLayers []Schema1FSLayers `json:"fsLayers"`
+ History []Schema1History `json:"history"`
+ SchemaVersion int `json:"schemaVersion"`
+}
+
+// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1.
+type Schema1V1Compatibility struct {
+ ID string `json:"id"`
+ Parent string `json:"parent,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ Created time.Time `json:"created"`
+ ContainerConfig struct {
+ Cmd []string
+ } `json:"container_config,omitempty"`
+ Author string `json:"author,omitempty"`
+ ThrowAway bool `json:"throwaway,omitempty"`
+}
+
+// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob.
+// (NOTE: The instance is not necessary a literal representation of the original blob,
+// layers with duplicate IDs are eliminated.)
+func Schema1FromManifest(manifest []byte) (*Schema1, error) {
+ s1 := Schema1{}
+ if err := json.Unmarshal(manifest, &s1); err != nil {
+ return nil, err
+ }
+ if s1.SchemaVersion != 1 {
+ return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion)
+ }
+ if len(s1.FSLayers) != len(s1.History) {
+ return nil, errors.New("length of history not equal to number of layers")
+ }
+ if len(s1.FSLayers) == 0 {
+ return nil, errors.New("no FSLayers in manifest")
+ }
+ if err := s1.fixManifestLayers(); err != nil {
+ return nil, err
+ }
+ return &s1, nil
+}
+
+// Schema1FromComponents creates an Schema1 manifest instance from the supplied data.
+func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) *Schema1 {
+ var name, tag string
+ if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them.
+ name = reference.Path(ref)
+ if tagged, ok := ref.(reference.NamedTagged); ok {
+ tag = tagged.Tag()
+ }
+ }
+ return &Schema1{
+ Name: name,
+ Tag: tag,
+ Architecture: architecture,
+ FSLayers: fsLayers,
+ History: history,
+ SchemaVersion: 1,
+ }
+}
+
+// Schema1Clone creates a copy of the supplied Schema1 manifest.
+func Schema1Clone(src *Schema1) *Schema1 {
+ copy := *src
+ return &copy
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+func (m *Schema1) ConfigInfo() types.BlobInfo {
+ return types.BlobInfo{}
+}
+
+// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *Schema1) LayerInfos() []types.BlobInfo {
+ layers := make([]types.BlobInfo, len(m.FSLayers))
+ for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway)
+ layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1}
+ }
+ return layers
+}
+
+// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
+func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
+ // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well.
+ if len(m.FSLayers) != len(layerInfos) {
+ return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos))
+ }
+ for i, info := range layerInfos {
+ // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest,
+ // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
+ // So, we don't bother recomputing the IDs in m.History.V1Compatibility.
+ m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest
+ }
+ return nil
+}
+
+// Serialize returns the manifest in a blob format.
+// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
+func (m *Schema1) Serialize() ([]byte, error) {
+ // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType.
+ unsigned, err := json.Marshal(*m)
+ if err != nil {
+ return nil, err
+ }
+ return AddDummyV2S1Signature(unsigned)
+}
+
+// fixManifestLayers, after validating the supplied manifest
+// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History),
+// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates,
+// both from m.History and m.FSLayers).
+// Note that even after this succeeds, m.FSLayers may contain duplicate entries
+// (for Dockerfile operations which change the configuration but not the filesystem).
+func (m *Schema1) fixManifestLayers() error {
+ type imageV1 struct {
+ ID string
+ Parent string
+ }
+ // Per the specification, we can assume that len(m.FSLayers) == len(m.History)
+ imgs := make([]*imageV1, len(m.FSLayers))
+ for i := range m.FSLayers {
+ img := &imageV1{}
+
+ if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
+ return err
+ }
+
+ imgs[i] = img
+ if err := validateV1ID(img.ID); err != nil {
+ return err
+ }
+ }
+ if imgs[len(imgs)-1].Parent != "" {
+ return errors.New("Invalid parent ID in the base layer of the image")
+ }
+ // check general duplicates to error instead of a deadlock
+ idmap := make(map[string]struct{})
+ var lastID string
+ for _, img := range imgs {
+ // skip IDs that appear after each other, we handle those later
+ if _, exists := idmap[img.ID]; img.ID != lastID && exists {
+ return errors.Errorf("ID %+v appears multiple times in manifest", img.ID)
+ }
+ lastID = img.ID
+ idmap[lastID] = struct{}{}
+ }
+ // backwards loop so that we keep the remaining indexes after removing items
+ for i := len(imgs) - 2; i >= 0; i-- {
+ if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
+ m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
+ m.History = append(m.History[:i], m.History[i+1:]...)
+ } else if imgs[i].Parent != imgs[i+1].ID {
+ return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
+ }
+ }
+ return nil
+}
+
+var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
+
+func validateV1ID(id string) error {
+ if ok := validHex.MatchString(id); !ok {
+ return errors.Errorf("image ID %q is invalid", id)
+ }
+ return nil
+}
+
+// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
+ s1 := &Schema1V1Compatibility{}
+ if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil {
+ return nil, err
+ }
+ return &types.ImageInspectInfo{
+ Tag: m.Tag,
+ Created: s1.Created,
+ DockerVersion: "",
+ Labels: make(map[string]string),
+ Architecture: "",
+ Os: "",
+ Layers: []string{},
+ }, nil
+}
diff --git a/vendor/github.com/containers/image/manifest/docker_schema2.go b/vendor/github.com/containers/image/manifest/docker_schema2.go
new file mode 100644
index 000000000..a44e561b8
--- /dev/null
+++ b/vendor/github.com/containers/image/manifest/docker_schema2.go
@@ -0,0 +1,241 @@
+package manifest
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/containers/image/pkg/strslice"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
+type Schema2Descriptor struct {
+ MediaType string `json:"mediaType"`
+ Size int64 `json:"size"`
+ Digest digest.Digest `json:"digest"`
+ URLs []string `json:"urls,omitempty"`
+}
+
+// Schema2 is a manifest in docker/distribution schema 2.
+type Schema2 struct {
+ SchemaVersion int `json:"schemaVersion"`
+ MediaType string `json:"mediaType"`
+ ConfigDescriptor Schema2Descriptor `json:"config"`
+ LayersDescriptors []Schema2Descriptor `json:"layers"`
+}
+
+// Schema2Port is a Port, a string containing port number and protocol in the
+// format "80/tcp", from docker/go-connections/nat.
+type Schema2Port string
+
+// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from
+// docker/go-connections/nat.
+type Schema2PortSet map[Schema2Port]struct{}
+
+// Schema2HealthConfig is a HealthConfig, which holds configuration settings
+// for the HEALTHCHECK feature, from docker/docker/api/types/container.
+type Schema2HealthConfig struct {
+ // Test is the test to perform to check that the container is healthy.
+ // An empty slice means to inherit the default.
+ // The options are:
+ // {} : inherit healthcheck
+ // {"NONE"} : disable healthcheck
+ // {"CMD", args...} : exec arguments directly
+ // {"CMD-SHELL", command} : run command with system's default shell
+ Test []string `json:",omitempty"`
+
+ // Zero means to inherit. Durations are expressed as integer nanoseconds.
+ Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
+ Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
+
+ // Retries is the number of consecutive failures needed to consider a container as unhealthy.
+ // Zero means inherit.
+ Retries int `json:",omitempty"`
+}
+
+// Schema2Config is a Config in docker/docker/api/types/container.
+type Schema2Config struct {
+ Hostname string // Hostname
+ Domainname string // Domainname
+ User string // User that will run the command(s) inside the container, also support user:group
+ AttachStdin bool // Attach the standard input, makes possible user interaction
+ AttachStdout bool // Attach the standard output
+ AttachStderr bool // Attach the standard error
+ ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports
+ Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
+ OpenStdin bool // Open stdin
+ StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
+ Env []string // List of environment variable to set in the container
+ Cmd strslice.StrSlice // Command to run when starting the container
+ Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
+ ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
+ Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
+ Volumes map[string]struct{} // List of volumes (mounts) used for the container
+ WorkingDir string // Current directory (PWD) in the command will be launched
+ Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
+ NetworkDisabled bool `json:",omitempty"` // Is network disabled
+ MacAddress string `json:",omitempty"` // Mac Address of the container
+ OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
+ Labels map[string]string // List of labels set to this container
+ StopSignal string `json:",omitempty"` // Signal to stop a container
+ StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
+ Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
+}
+
+// Schema2V1Image is a V1Image in docker/docker/image.
+type Schema2V1Image struct {
+ // ID is a unique 64 character identifier of the image
+ ID string `json:"id,omitempty"`
+ // Parent is the ID of the parent image
+ Parent string `json:"parent,omitempty"`
+ // Comment is the commit message that was set when committing the image
+ Comment string `json:"comment,omitempty"`
+ // Created is the timestamp at which the image was created
+ Created time.Time `json:"created"`
+ // Container is the id of the container used to commit
+ Container string `json:"container,omitempty"`
+ // ContainerConfig is the configuration of the container that is committed into the image
+ ContainerConfig Schema2Config `json:"container_config,omitempty"`
+ // DockerVersion specifies the version of Docker that was used to build the image
+ DockerVersion string `json:"docker_version,omitempty"`
+ // Author is the name of the author that was specified when committing the image
+ Author string `json:"author,omitempty"`
+ // Config is the configuration of the container received from the client
+ Config *Schema2Config `json:"config,omitempty"`
+ // Architecture is the hardware that the image is build and runs on
+ Architecture string `json:"architecture,omitempty"`
+ // OS is the operating system used to build and run the image
+ OS string `json:"os,omitempty"`
+ // Size is the total size of the image including all layers it is composed of
+ Size int64 `json:",omitempty"`
+}
+
+// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image.
+type Schema2RootFS struct {
+ Type string `json:"type"`
+ DiffIDs []digest.Digest `json:"diff_ids,omitempty"`
+}
+
+// Schema2History stores build commands that were used to create an image, from docker/docker/image.
+type Schema2History struct {
+ // Created is the timestamp at which the image was created
+ Created time.Time `json:"created"`
+ // Author is the name of the author that was specified when committing the image
+ Author string `json:"author,omitempty"`
+ // CreatedBy keeps the Dockerfile command used while building the image
+ CreatedBy string `json:"created_by,omitempty"`
+ // Comment is the commit message that was set when committing the image
+ Comment string `json:"comment,omitempty"`
+ // EmptyLayer is set to true if this history item did not generate a
+ // layer. Otherwise, the history item is associated with the next
+ // layer in the RootFS section.
+ EmptyLayer bool `json:"empty_layer,omitempty"`
+}
+
+// Schema2Image is an Image in docker/docker/image.
+type Schema2Image struct {
+ Schema2V1Image
+ Parent digest.Digest `json:"parent,omitempty"`
+ RootFS *Schema2RootFS `json:"rootfs,omitempty"`
+ History []Schema2History `json:"history,omitempty"`
+ OSVersion string `json:"os.version,omitempty"`
+ OSFeatures []string `json:"os.features,omitempty"`
+
+ // rawJSON caches the immutable JSON associated with this image.
+ rawJSON []byte
+
+ // computedID is the ID computed from the hash of the image config.
+ // Not to be confused with the legacy V1 ID in V1Image.
+ computedID digest.Digest
+}
+
+// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob.
+func Schema2FromManifest(manifest []byte) (*Schema2, error) {
+ s2 := Schema2{}
+ if err := json.Unmarshal(manifest, &s2); err != nil {
+ return nil, err
+ }
+ return &s2, nil
+}
+
+// Schema2FromComponents creates an Schema2 manifest instance from the supplied data.
+func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 {
+ return &Schema2{
+ SchemaVersion: 2,
+ MediaType: DockerV2Schema2MediaType,
+ ConfigDescriptor: config,
+ LayersDescriptors: layers,
+ }
+}
+
+// Schema2Clone creates a copy of the supplied Schema2 manifest.
+func Schema2Clone(src *Schema2) *Schema2 {
+ copy := *src
+ return &copy
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+func (m *Schema2) ConfigInfo() types.BlobInfo {
+ return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size, MediaType: DockerV2Schema2ConfigMediaType}
+}
+
+// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *Schema2) LayerInfos() []types.BlobInfo {
+ blobs := []types.BlobInfo{}
+ for _, layer := range m.LayersDescriptors {
+ blobs = append(blobs, types.BlobInfo{
+ Digest: layer.Digest,
+ Size: layer.Size,
+ URLs: layer.URLs,
+ MediaType: layer.MediaType,
+ })
+ }
+ return blobs
+}
+
+// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
+func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
+ if len(m.LayersDescriptors) != len(layerInfos) {
+ return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos))
+ }
+ original := m.LayersDescriptors
+ m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos))
+ for i, info := range layerInfos {
+ m.LayersDescriptors[i].MediaType = original[i].MediaType
+ m.LayersDescriptors[i].Digest = info.Digest
+ m.LayersDescriptors[i].Size = info.Size
+ m.LayersDescriptors[i].URLs = info.URLs
+ }
+ return nil
+}
+
+// Serialize returns the manifest in a blob format.
+// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
+func (m *Schema2) Serialize() ([]byte, error) {
+ return json.Marshal(*m)
+}
+
+// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
+ config, err := configGetter(m.ConfigInfo())
+ if err != nil {
+ return nil, err
+ }
+ s2 := &Schema2Image{}
+ if err := json.Unmarshal(config, s2); err != nil {
+ return nil, err
+ }
+ return &types.ImageInspectInfo{
+ Tag: "",
+ Created: s2.Created,
+ DockerVersion: s2.DockerVersion,
+ Labels: s2.Config.Labels,
+ Architecture: s2.Architecture,
+ Os: s2.OS,
+ Layers: []string{},
+ }, nil
+}
diff --git a/vendor/github.com/containers/image/manifest/manifest.go b/vendor/github.com/containers/image/manifest/manifest.go
new file mode 100644
index 000000000..01f57a325
--- /dev/null
+++ b/vendor/github.com/containers/image/manifest/manifest.go
@@ -0,0 +1,217 @@
+package manifest
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/containers/image/types"
+ "github.com/docker/libtrust"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// FIXME: Should we just use docker/distribution and docker/docker implementations directly?
+
+// FIXME(runcom, mitr): should we havea mediatype pkg??
+const (
+ // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1
+ DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json"
+ // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature
+ DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws"
+ // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2
+ DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json"
+ // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs.
+ DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
+ // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers.
+ DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip"
+ // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list
+ DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json"
+ // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers.
+ DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
+)
+
+// DefaultRequestedManifestMIMETypes is a list of MIME types a types.ImageSource
+// should request from the backend unless directed otherwise.
+var DefaultRequestedManifestMIMETypes = []string{
+ imgspecv1.MediaTypeImageManifest,
+ DockerV2Schema2MediaType,
+ DockerV2Schema1SignedMediaType,
+ DockerV2Schema1MediaType,
+ // DockerV2ListMediaType, // FIXME: Restore this ASAP
+}
+
+// Manifest is an interface for parsing, modifying image manifests in isolation.
+// Callers can either use this abstract interface without understanding the details of the formats,
+// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members
+// directly.
+//
+// See types.Image for functionality not limited to manifests, including format conversions and config parsing.
+// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image.
+type Manifest interface {
+ // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+ ConfigInfo() types.BlobInfo
+ // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+ // The Digest field is guaranteed to be provided; Size may be -1.
+ // WARNING: The list may contain duplicates, and they are semantically relevant.
+ LayerInfos() []types.BlobInfo
+ // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
+ UpdateLayerInfos(layerInfos []types.BlobInfo) error
+
+ // Inspect returns various information for (skopeo inspect) parsed from the manifest,
+ // incorporating information from a configuration blob returned by configGetter, if
+ // the underlying image format is expected to include a configuration blob.
+ Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error)
+
+ // Serialize returns the manifest in a blob format.
+ // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
+ Serialize() ([]byte, error)
+}
+
+// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
+// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
+// but we may not have such metadata available (e.g. when the manifest is a local file).
+func GuessMIMEType(manifest []byte) string {
+ // A subset of manifest fields; the rest is silently ignored by json.Unmarshal.
+ // Also docker/distribution/manifest.Versioned.
+ meta := struct {
+ MediaType string `json:"mediaType"`
+ SchemaVersion int `json:"schemaVersion"`
+ Signatures interface{} `json:"signatures"`
+ }{}
+ if err := json.Unmarshal(manifest, &meta); err != nil {
+ return ""
+ }
+
+ switch meta.MediaType {
+ case DockerV2Schema2MediaType, DockerV2ListMediaType: // A recognized type.
+ return meta.MediaType
+ }
+ // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest.
+ switch meta.SchemaVersion {
+ case 1:
+ if meta.Signatures != nil {
+ return DockerV2Schema1SignedMediaType
+ }
+ return DockerV2Schema1MediaType
+ case 2:
+ // best effort to understand if this is an OCI image since mediaType
+ // isn't in the manifest for OCI anymore
+ // for docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess.
+ ociMan := struct {
+ Config struct {
+ MediaType string `json:"mediaType"`
+ } `json:"config"`
+ Layers []imgspecv1.Descriptor `json:"layers"`
+ }{}
+ if err := json.Unmarshal(manifest, &ociMan); err != nil {
+ return ""
+ }
+ if ociMan.Config.MediaType == imgspecv1.MediaTypeImageConfig && len(ociMan.Layers) != 0 {
+ return imgspecv1.MediaTypeImageManifest
+ }
+ ociIndex := struct {
+ Manifests []imgspecv1.Descriptor `json:"manifests"`
+ }{}
+ if err := json.Unmarshal(manifest, &ociIndex); err != nil {
+ return ""
+ }
+ if len(ociIndex.Manifests) != 0 && ociIndex.Manifests[0].MediaType == imgspecv1.MediaTypeImageManifest {
+ return imgspecv1.MediaTypeImageIndex
+ }
+ return DockerV2Schema2MediaType
+ }
+ return ""
+}
+
+// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures.
+func Digest(manifest []byte) (digest.Digest, error) {
+ if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType {
+ sig, err := libtrust.ParsePrettySignature(manifest, "signatures")
+ if err != nil {
+ return "", err
+ }
+ manifest, err = sig.Payload()
+ if err != nil {
+ // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string
+ // that libtrust itself has josebase64UrlEncode()d
+ return "", err
+ }
+ }
+
+ return digest.FromBytes(manifest), nil
+}
+
+// MatchesDigest returns true iff the manifest matches expectedDigest.
+// Error may be set if this returns false.
+// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified,
+// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob.
+func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) {
+ // This should eventually support various digest types.
+ actualDigest, err := Digest(manifest)
+ if err != nil {
+ return false, err
+ }
+ return expectedDigest == actualDigest, nil
+}
+
+// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest.
+// This is useful to make the manifest acceptable to a Docker Registry (even though nothing needs or wants the JWS signature).
+func AddDummyV2S1Signature(manifest []byte) ([]byte, error) {
+ key, err := libtrust.GenerateECP256PrivateKey()
+ if err != nil {
+ return nil, err // Coverage: This can fail only if rand.Reader fails.
+ }
+
+ js, err := libtrust.NewJSONSignature(manifest)
+ if err != nil {
+ return nil, err
+ }
+ if err := js.Sign(key); err != nil { // Coverage: This can fail basically only if rand.Reader fails.
+ return nil, err
+ }
+ return js.PrettySignature("signatures")
+}
+
+// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
+// centralizing various workarounds.
+func NormalizedMIMEType(input string) string {
+ switch input {
+ // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
+ // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
+ // need to happen within the ImageSource.
+ case "application/json":
+ return DockerV2Schema1SignedMediaType
+ case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType,
+ imgspecv1.MediaTypeImageManifest,
+ DockerV2Schema2MediaType,
+ DockerV2ListMediaType:
+ return input
+ default:
+ // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
+ // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
+ // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
+ //
+ // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
+ // This makes no real sense, but it happens
+ // because requests for manifests are
+ // redirected to a content distribution
+ // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
+ return DockerV2Schema1SignedMediaType
+ }
+}
+
+// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type
+func FromBlob(manblob []byte, mt string) (Manifest, error) {
+ switch NormalizedMIMEType(mt) {
+ case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType:
+ return Schema1FromManifest(manblob)
+ case imgspecv1.MediaTypeImageManifest:
+ return OCI1FromManifest(manblob)
+ case DockerV2Schema2MediaType:
+ return Schema2FromManifest(manblob)
+ case DockerV2ListMediaType:
+ return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented")
+ default: // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
+ return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
+ }
+}
diff --git a/vendor/github.com/containers/image/manifest/oci.go b/vendor/github.com/containers/image/manifest/oci.go
new file mode 100644
index 000000000..18e27d23c
--- /dev/null
+++ b/vendor/github.com/containers/image/manifest/oci.go
@@ -0,0 +1,108 @@
+package manifest
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/containers/image/types"
+ "github.com/opencontainers/image-spec/specs-go"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+// OCI1 is a manifest.Manifest implementation for OCI images.
+// The underlying data from imgspecv1.Manifest is also available.
+type OCI1 struct {
+ imgspecv1.Manifest
+}
+
+// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob.
+func OCI1FromManifest(manifest []byte) (*OCI1, error) {
+ oci1 := OCI1{}
+ if err := json.Unmarshal(manifest, &oci1); err != nil {
+ return nil, err
+ }
+ return &oci1, nil
+}
+
+// OCI1FromComponents creates an OCI1 manifest instance from the supplied data.
+func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 {
+ return &OCI1{
+ imgspecv1.Manifest{
+ Versioned: specs.Versioned{SchemaVersion: 2},
+ Config: config,
+ Layers: layers,
+ },
+ }
+}
+
+// OCI1Clone creates a copy of the supplied OCI1 manifest.
+func OCI1Clone(src *OCI1) *OCI1 {
+ return &OCI1{
+ Manifest: src.Manifest,
+ }
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+func (m *OCI1) ConfigInfo() types.BlobInfo {
+ return types.BlobInfo{Digest: m.Config.Digest, Size: m.Config.Size, Annotations: m.Config.Annotations, MediaType: imgspecv1.MediaTypeImageConfig}
+}
+
+// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *OCI1) LayerInfos() []types.BlobInfo {
+ blobs := []types.BlobInfo{}
+ for _, layer := range m.Layers {
+ blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs, MediaType: layer.MediaType})
+ }
+ return blobs
+}
+
+// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
+func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
+ if len(m.Layers) != len(layerInfos) {
+ return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))
+ }
+ original := m.Layers
+ m.Layers = make([]imgspecv1.Descriptor, len(layerInfos))
+ for i, info := range layerInfos {
+ m.Layers[i].MediaType = original[i].MediaType
+ m.Layers[i].Digest = info.Digest
+ m.Layers[i].Size = info.Size
+ m.Layers[i].Annotations = info.Annotations
+ m.Layers[i].URLs = info.URLs
+ }
+ return nil
+}
+
+// Serialize returns the manifest in a blob format.
+// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
+func (m *OCI1) Serialize() ([]byte, error) {
+ return json.Marshal(*m)
+}
+
+// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
+ config, err := configGetter(m.ConfigInfo())
+ if err != nil {
+ return nil, err
+ }
+ v1 := &imgspecv1.Image{}
+ if err := json.Unmarshal(config, v1); err != nil {
+ return nil, err
+ }
+ created := time.Time{}
+ if v1.Created != nil {
+ created = *v1.Created
+ }
+ return &types.ImageInspectInfo{
+ Tag: "",
+ Created: created,
+ DockerVersion: "",
+ Labels: v1.Config.Labels,
+ Architecture: v1.Architecture,
+ Os: v1.OS,
+ Layers: []string{},
+ }, nil
+}
diff --git a/vendor/github.com/containers/image/oci/archive/oci_dest.go b/vendor/github.com/containers/image/oci/archive/oci_dest.go
new file mode 100644
index 000000000..52e99a43d
--- /dev/null
+++ b/vendor/github.com/containers/image/oci/archive/oci_dest.go
@@ -0,0 +1,131 @@
+package archive
+
+import (
+ "io"
+ "os"
+
+ "github.com/containers/image/types"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/pkg/errors"
+)
+
+type ociArchiveImageDestination struct {
+ ref ociArchiveReference
+ unpackedDest types.ImageDestination
+ tempDirRef tempDirOCIRef
+}
+
+// newImageDestination returns an ImageDestination for writing to an existing directory.
+func newImageDestination(ctx *types.SystemContext, ref ociArchiveReference) (types.ImageDestination, error) {
+ tempDirRef, err := createOCIRef(ref.image)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating oci reference")
+ }
+ unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx)
+ if err != nil {
+ if err := tempDirRef.deleteTempDir(); err != nil {
+ return nil, errors.Wrapf(err, "error deleting temp directory", tempDirRef.tempDirectory)
+ }
+ return nil, err
+ }
+ return &ociArchiveImageDestination{ref: ref,
+ unpackedDest: unpackedDest,
+ tempDirRef: tempDirRef}, nil
+}
+
+// Reference returns the reference used to set up this destination.
+func (d *ociArchiveImageDestination) Reference() types.ImageReference {
+ return d.ref
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any
+// Close deletes the temp directory of the oci-archive image
+func (d *ociArchiveImageDestination) Close() error {
+ defer d.tempDirRef.deleteTempDir()
+ return d.unpackedDest.Close()
+}
+
+func (d *ociArchiveImageDestination) SupportedManifestMIMETypes() []string {
+ return d.unpackedDest.SupportedManifestMIMETypes()
+}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures
+func (d *ociArchiveImageDestination) SupportsSignatures() error {
+ return d.unpackedDest.SupportsSignatures()
+}
+
+// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination
+func (d *ociArchiveImageDestination) ShouldCompressLayers() bool {
+ return d.unpackedDest.ShouldCompressLayers()
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (d *ociArchiveImageDestination) AcceptsForeignLayerURLs() bool {
+ return d.unpackedDest.AcceptsForeignLayerURLs()
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise
+func (d *ociArchiveImageDestination) MustMatchRuntimeOS() bool {
+ return d.unpackedDest.MustMatchRuntimeOS()
+}
+
+// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Size is the expected length of stream, if known.
+func (d *ociArchiveImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
+ return d.unpackedDest.PutBlob(stream, inputInfo)
+}
+
+// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob
+func (d *ociArchiveImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
+ return d.unpackedDest.HasBlob(info)
+}
+
+func (d *ociArchiveImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
+ return d.unpackedDest.ReapplyBlob(info)
+}
+
+// PutManifest writes manifest to the destination
+func (d *ociArchiveImageDestination) PutManifest(m []byte) error {
+ return d.unpackedDest.PutManifest(m)
+}
+
+func (d *ociArchiveImageDestination) PutSignatures(signatures [][]byte) error {
+ return d.unpackedDest.PutSignatures(signatures)
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted
+// after the directory is made, it is tarred up into a file and the directory is deleted
+func (d *ociArchiveImageDestination) Commit() error {
+ if err := d.unpackedDest.Commit(); err != nil {
+ return errors.Wrapf(err, "error storing image %q", d.ref.image)
+ }
+
+ // path of directory to tar up
+ src := d.tempDirRef.tempDirectory
+ // path to save tarred up file
+ dst := d.ref.resolvedFile
+ return tarDirectory(src, dst)
+}
+
+// tar converts the directory at src and saves it to dst
+func tarDirectory(src, dst string) error {
+ // input is a stream of bytes from the archive of the directory at path
+ input, err := archive.Tar(src, archive.Uncompressed)
+ if err != nil {
+ return errors.Wrapf(err, "error retrieving stream of bytes from %q", src)
+ }
+
+ // creates the tar file
+ outFile, err := os.Create(dst)
+ if err != nil {
+ return errors.Wrapf(err, "error creating tar file %q", dst)
+ }
+ defer outFile.Close()
+
+ // copies the contents of the directory to the tar file
+ _, err = io.Copy(outFile, input)
+
+ return err
+}
diff --git a/vendor/github.com/containers/image/oci/archive/oci_src.go b/vendor/github.com/containers/image/oci/archive/oci_src.go
new file mode 100644
index 000000000..fd437f5a9
--- /dev/null
+++ b/vendor/github.com/containers/image/oci/archive/oci_src.go
@@ -0,0 +1,92 @@
+package archive
+
+import (
+ "context"
+ "io"
+
+ ocilayout "github.com/containers/image/oci/layout"
+ "github.com/containers/image/types"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+type ociArchiveImageSource struct {
+ ref ociArchiveReference
+ unpackedSrc types.ImageSource
+ tempDirRef tempDirOCIRef
+}
+
+// newImageSource returns an ImageSource for reading from an existing directory.
+// newImageSource untars the file and saves it in a temp directory
+func newImageSource(ctx *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) {
+ tempDirRef, err := createUntarTempDir(ref)
+ if err != nil {
+ return nil, errors.Wrap(err, "error creating temp directory")
+ }
+
+ unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx)
+ if err != nil {
+ if err := tempDirRef.deleteTempDir(); err != nil {
+ return nil, errors.Wrapf(err, "error deleting temp directory", tempDirRef.tempDirectory)
+ }
+ return nil, err
+ }
+ return &ociArchiveImageSource{ref: ref,
+ unpackedSrc: unpackedSrc,
+ tempDirRef: tempDirRef}, nil
+}
+
+// LoadManifestDescriptor loads the manifest
+func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) {
+ ociArchRef, ok := imgRef.(ociArchiveReference)
+ if !ok {
+ return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociArchiveReference")
+ }
+ tempDirRef, err := createUntarTempDir(ociArchRef)
+ if err != nil {
+ return imgspecv1.Descriptor{}, errors.Wrap(err, "error creating temp directory")
+ }
+ defer tempDirRef.deleteTempDir()
+
+ descriptor, err := ocilayout.LoadManifestDescriptor(tempDirRef.ociRefExtracted)
+ if err != nil {
+ return imgspecv1.Descriptor{}, errors.Wrap(err, "error loading index")
+ }
+ return descriptor, nil
+}
+
+// Reference returns the reference used to set up this source.
+func (s *ociArchiveImageSource) Reference() types.ImageReference {
+ return s.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+// Close deletes the temporary directory at dst
+func (s *ociArchiveImageSource) Close() error {
+ defer s.tempDirRef.deleteTempDir()
+ return s.unpackedSrc.Close()
+}
+
+// GetManifest returns the image's manifest along with its MIME type
+// (which may be empty when it can't be determined but the manifest is available).
+func (s *ociArchiveImageSource) GetManifest() ([]byte, string, error) {
+ return s.unpackedSrc.GetManifest()
+}
+
+func (s *ociArchiveImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
+ return s.unpackedSrc.GetTargetManifest(digest)
+}
+
+// GetBlob returns a stream for the specified blob, and the blob's size.
+func (s *ociArchiveImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
+ return s.unpackedSrc.GetBlob(info)
+}
+
+func (s *ociArchiveImageSource) GetSignatures(c context.Context) ([][]byte, error) {
+ return s.unpackedSrc.GetSignatures(c)
+}
+
+func (s *ociArchiveImageSource) UpdatedLayerInfos() []types.BlobInfo {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/oci/archive/oci_transport.go b/vendor/github.com/containers/image/oci/archive/oci_transport.go
new file mode 100644
index 000000000..31b191989
--- /dev/null
+++ b/vendor/github.com/containers/image/oci/archive/oci_transport.go
@@ -0,0 +1,225 @@
+package archive
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "github.com/containers/image/directory/explicitfilepath"
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/image"
+ ocilayout "github.com/containers/image/oci/layout"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/pkg/errors"
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+// Transport is an ImageTransport for OCI archive
+// it creates an oci-archive tar file by calling into the OCI transport
+// tarring the directory created by oci and deleting the directory
+var Transport = ociArchiveTransport{}
+
+type ociArchiveTransport struct{}
+
+// ociArchiveReference is an ImageReference for OCI Archive paths
+type ociArchiveReference struct {
+ file string
+ resolvedFile string
+ image string
+}
+
+func (t ociArchiveTransport) Name() string {
+ return "oci-archive"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix
+// into an ImageReference.
+func (t ociArchiveTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return ParseReference(reference)
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) error {
+ var file string
+ sep := strings.SplitN(scope, ":", 2)
+ file = sep[0]
+
+ if len(sep) == 2 {
+ image := sep[1]
+ if !refRegexp.MatchString(image) {
+ return errors.Errorf("Invalid image %s", image)
+ }
+ }
+
+ if !strings.HasPrefix(file, "/") {
+ return errors.Errorf("Invalid scope %s: must be an absolute path", scope)
+ }
+ // Refuse also "/", otherwise "/" and "" would have the same semantics,
+ // and "" could be unexpectedly shadowed by the "/" entry.
+ // (Note: we do allow "/:someimage", a bit ridiculous but why refuse it?)
+ if scope == "/" {
+ return errors.New(`Invalid scope "/": Use the generic default scope ""`)
+ }
+ cleaned := filepath.Clean(file)
+ if cleaned != file {
+ return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
+ }
+ return nil
+}
+
+// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys
+const (
+ separator = `(?:[-._:@+]|--)`
+ alphanum = `(?:[A-Za-z0-9]+)`
+ component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)`
+)
+
+var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`)
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
+func ParseReference(reference string) (types.ImageReference, error) {
+ var file, image string
+ sep := strings.SplitN(reference, ":", 2)
+ file = sep[0]
+
+ if len(sep) == 2 {
+ image = sep[1]
+ }
+ return NewReference(file, image)
+}
+
+// NewReference returns an OCI reference for a file and a image.
+func NewReference(file, image string) (types.ImageReference, error) {
+ resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file)
+ if err != nil {
+ return nil, err
+ }
+ // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
+ // from being ambiguous with values of PolicyConfigurationIdentity.
+ if strings.Contains(resolved, ":") {
+ return nil, errors.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", file, image, resolved)
+ }
+ if len(image) > 0 && !refRegexp.MatchString(image) {
+ return nil, errors.Errorf("Invalid image %s", image)
+ }
+ return ociArchiveReference{file: file, resolvedFile: resolved, image: image}, nil
+}
+
+func (ref ociArchiveReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+func (ref ociArchiveReference) StringWithinTransport() string {
+ return fmt.Sprintf("%s:%s", ref.file, ref.image)
+}
+
+// DockerReference returns a Docker reference associated with this reference
+func (ref ociArchiveReference) DockerReference() reference.Named {
+ return nil
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+func (ref ociArchiveReference) PolicyConfigurationIdentity() string {
+ // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the
+ // same image and the two can’t be statically disambiguated. Using at least the repository directory is
+ // less granular but hopefully still useful.
+ return fmt.Sprintf("%s", ref.resolvedFile)
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set
+func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string {
+ res := []string{}
+ path := ref.resolvedFile
+ for {
+ lastSlash := strings.LastIndex(path, "/")
+ // Note that we do not include "/"; it is redundant with the default "" global default,
+ // and rejected by ociTransport.ValidatePolicyConfigurationScope above.
+ if lastSlash == -1 || path == "/" {
+ break
+ }
+ res = append(res, path)
+ path = path[:lastSlash]
+ }
+ return res
+}
+
+// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned Image.
+func (ref ociArchiveReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
+ src, err := newImageSource(ctx, ref)
+ if err != nil {
+ return nil, err
+ }
+ return image.FromSource(src)
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref ociArchiveReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(ctx, ref)
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref ociArchiveReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
+ return newImageDestination(ctx, ref)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref ociArchiveReference) DeleteImage(ctx *types.SystemContext) error {
+ return errors.Errorf("Deleting images not implemented for oci: images")
+}
+
+// struct to store the ociReference and temporary directory returned by createOCIRef
+type tempDirOCIRef struct {
+ tempDirectory string
+ ociRefExtracted types.ImageReference
+}
+
+// deletes the temporary directory created
+func (t *tempDirOCIRef) deleteTempDir() error {
+ return os.RemoveAll(t.tempDirectory)
+}
+
+// createOCIRef creates the oci reference of the image
+func createOCIRef(image string) (tempDirOCIRef, error) {
+ dir, err := ioutil.TempDir("/var/tmp", "oci")
+ if err != nil {
+ return tempDirOCIRef{}, errors.Wrapf(err, "error creating temp directory")
+ }
+ ociRef, err := ocilayout.NewReference(dir, image)
+ if err != nil {
+ return tempDirOCIRef{}, err
+ }
+
+ tempDirRef := tempDirOCIRef{tempDirectory: dir, ociRefExtracted: ociRef}
+ return tempDirRef, nil
+}
+
+// creates the temporary directory and copies the tarred content to it
+func createUntarTempDir(ref ociArchiveReference) (tempDirOCIRef, error) {
+ tempDirRef, err := createOCIRef(ref.image)
+ if err != nil {
+ return tempDirOCIRef{}, errors.Wrap(err, "error creating oci reference")
+ }
+ src := ref.resolvedFile
+ dst := tempDirRef.tempDirectory
+ if err := archive.UntarPath(src, dst); err != nil {
+ if err := tempDirRef.deleteTempDir(); err != nil {
+ return tempDirOCIRef{}, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory)
+ }
+ return tempDirOCIRef{}, errors.Wrapf(err, "error untarring file %q", tempDirRef.tempDirectory)
+ }
+ return tempDirRef, nil
+}
diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go
new file mode 100644
index 000000000..ce1e0c3e2
--- /dev/null
+++ b/vendor/github.com/containers/image/oci/layout/oci_dest.go
@@ -0,0 +1,233 @@
+package layout
+
+import (
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ "github.com/pkg/errors"
+
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ imgspec "github.com/opencontainers/image-spec/specs-go"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+type ociImageDestination struct {
+ ref ociReference
+ index imgspecv1.Index
+}
+
+// newImageDestination returns an ImageDestination for writing to an existing directory.
+func newImageDestination(ref ociReference) (types.ImageDestination, error) {
+ if ref.image == "" {
+ return nil, errors.Errorf("cannot save image with empty image.ref.name")
+ }
+ index := imgspecv1.Index{
+ Versioned: imgspec.Versioned{
+ SchemaVersion: 2,
+ },
+ }
+ return &ociImageDestination{ref: ref, index: index}, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (d *ociImageDestination) Reference() types.ImageReference {
+ return d.ref
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any.
+func (d *ociImageDestination) Close() error {
+ return nil
+}
+
+func (d *ociImageDestination) SupportedManifestMIMETypes() []string {
+ return []string{
+ imgspecv1.MediaTypeImageManifest,
+ }
+}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+func (d *ociImageDestination) SupportsSignatures() error {
+ return errors.Errorf("Pushing signatures for OCI images is not supported")
+}
+
+// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
+func (d *ociImageDestination) ShouldCompressLayers() bool {
+ return true
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (d *ociImageDestination) AcceptsForeignLayerURLs() bool {
+ return true
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+func (d *ociImageDestination) MustMatchRuntimeOS() bool {
+ return false
+}
+
+// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Size is the expected length of stream, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
+ if err := ensureDirectoryExists(d.ref.dir); err != nil {
+ return types.BlobInfo{}, err
+ }
+ blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob")
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ succeeded := false
+ defer func() {
+ blobFile.Close()
+ if !succeeded {
+ os.Remove(blobFile.Name())
+ }
+ }()
+
+ digester := digest.Canonical.Digester()
+ tee := io.TeeReader(stream, digester.Hash())
+
+ size, err := io.Copy(blobFile, tee)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ computedDigest := digester.Digest()
+ if inputInfo.Size != -1 && size != inputInfo.Size {
+ return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
+ }
+ if err := blobFile.Sync(); err != nil {
+ return types.BlobInfo{}, err
+ }
+ if err := blobFile.Chmod(0644); err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ blobPath, err := d.ref.blobPath(computedDigest)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ if err := ensureParentDirectoryExists(blobPath); err != nil {
+ return types.BlobInfo{}, err
+ }
+ if err := os.Rename(blobFile.Name(), blobPath); err != nil {
+ return types.BlobInfo{}, err
+ }
+ succeeded = true
+ return types.BlobInfo{Digest: computedDigest, Size: size}, nil
+}
+
+// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
+// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
+// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
+// it returns a non-nil error only on an unexpected failure.
+func (d *ociImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
+ if info.Digest == "" {
+ return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ }
+ blobPath, err := d.ref.blobPath(info.Digest)
+ if err != nil {
+ return false, -1, err
+ }
+ finfo, err := os.Stat(blobPath)
+ if err != nil && os.IsNotExist(err) {
+ return false, -1, nil
+ }
+ if err != nil {
+ return false, -1, err
+ }
+ return true, finfo.Size(), nil
+}
+
+func (d *ociImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
+ return info, nil
+}
+
+// PutManifest writes manifest to the destination.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+func (d *ociImageDestination) PutManifest(m []byte) error {
+ digest, err := manifest.Digest(m)
+ if err != nil {
+ return err
+ }
+ desc := imgspecv1.Descriptor{}
+ desc.Digest = digest
+ // TODO(runcom): beaware and add support for OCI manifest list
+ desc.MediaType = imgspecv1.MediaTypeImageManifest
+ desc.Size = int64(len(m))
+
+ blobPath, err := d.ref.blobPath(digest)
+ if err != nil {
+ return err
+ }
+ if err := ensureParentDirectoryExists(blobPath); err != nil {
+ return err
+ }
+ if err := ioutil.WriteFile(blobPath, m, 0644); err != nil {
+ return err
+ }
+
+ if d.ref.image == "" {
+ return errors.Errorf("cannot save image with empyt image.ref.name")
+ }
+
+ annotations := make(map[string]string)
+ annotations["org.opencontainers.image.ref.name"] = d.ref.image
+ desc.Annotations = annotations
+ desc.Platform = &imgspecv1.Platform{
+ Architecture: runtime.GOARCH,
+ OS: runtime.GOOS,
+ }
+ d.index.Manifests = append(d.index.Manifests, desc)
+
+ return nil
+}
+
+func ensureDirectoryExists(path string) error {
+ if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ensureParentDirectoryExists ensures the parent of the supplied path exists.
+func ensureParentDirectoryExists(path string) error {
+ return ensureDirectoryExists(filepath.Dir(path))
+}
+
+func (d *ociImageDestination) PutSignatures(signatures [][]byte) error {
+ if len(signatures) != 0 {
+ return errors.Errorf("Pushing signatures for OCI images is not supported")
+ }
+ return nil
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (d *ociImageDestination) Commit() error {
+ if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
+ return err
+ }
+ indexJSON, err := json.Marshal(d.index)
+ if err != nil {
+ return err
+ }
+ return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644)
+}
diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/oci/layout/oci_src.go
new file mode 100644
index 000000000..67f0c3b82
--- /dev/null
+++ b/vendor/github.com/containers/image/oci/layout/oci_src.go
@@ -0,0 +1,147 @@
+package layout
+
+import (
+ "context"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "strconv"
+
+ "github.com/containers/image/pkg/tlsclientconfig"
+ "github.com/containers/image/types"
+ "github.com/docker/go-connections/tlsconfig"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+type ociImageSource struct {
+ ref ociReference
+ descriptor imgspecv1.Descriptor
+ client *http.Client
+}
+
+// newImageSource returns an ImageSource for reading from an existing directory.
+func newImageSource(ctx *types.SystemContext, ref ociReference) (types.ImageSource, error) {
+ tr := tlsclientconfig.NewTransport()
+ tr.TLSClientConfig = tlsconfig.ServerDefault()
+
+ if ctx != nil && ctx.OCICertPath != "" {
+ if err := tlsclientconfig.SetupCertificates(ctx.OCICertPath, tr.TLSClientConfig); err != nil {
+ return nil, err
+ }
+ tr.TLSClientConfig.InsecureSkipVerify = ctx.OCIInsecureSkipTLSVerify
+ }
+
+ client := &http.Client{}
+ client.Transport = tr
+ descriptor, err := ref.getManifestDescriptor()
+ if err != nil {
+ return nil, err
+ }
+ return &ociImageSource{ref: ref, descriptor: descriptor, client: client}, nil
+}
+
+// Reference returns the reference used to set up this source.
+func (s *ociImageSource) Reference() types.ImageReference {
+ return s.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *ociImageSource) Close() error {
+ return nil
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+func (s *ociImageSource) GetManifest() ([]byte, string, error) {
+ manifestPath, err := s.ref.blobPath(digest.Digest(s.descriptor.Digest))
+ if err != nil {
+ return nil, "", err
+ }
+ m, err := ioutil.ReadFile(manifestPath)
+ if err != nil {
+ return nil, "", err
+ }
+
+ return m, s.descriptor.MediaType, nil
+}
+
+func (s *ociImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
+ manifestPath, err := s.ref.blobPath(digest)
+ if err != nil {
+ return nil, "", err
+ }
+
+ m, err := ioutil.ReadFile(manifestPath)
+ if err != nil {
+ return nil, "", err
+ }
+
+ // XXX: GetTargetManifest means that we don't have the context of what
+ // mediaType the manifest has. In OCI this means that we don't know
+ // what reference it came from, so we just *assume* that its
+ // MediaTypeImageManifest.
+ return m, imgspecv1.MediaTypeImageManifest, nil
+}
+
+// GetBlob returns a stream for the specified blob, and the blob's size.
+func (s *ociImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
+ if len(info.URLs) != 0 {
+ return s.getExternalBlob(info.URLs)
+ }
+
+ path, err := s.ref.blobPath(info.Digest)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ r, err := os.Open(path)
+ if err != nil {
+ return nil, 0, err
+ }
+ fi, err := r.Stat()
+ if err != nil {
+ return nil, 0, err
+ }
+ return r, fi.Size(), nil
+}
+
+func (s *ociImageSource) GetSignatures(context.Context) ([][]byte, error) {
+ return [][]byte{}, nil
+}
+
+func (s *ociImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64, error) {
+ errWrap := errors.New("failed fetching external blob from all urls")
+ for _, url := range urls {
+ resp, err := s.client.Get(url)
+ if err != nil {
+ errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error())
+ continue
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ resp.Body.Close()
+ errWrap = errors.Wrapf(errWrap, "fetching %s failed, response code not 200", url)
+ continue
+ }
+
+ return resp.Body, getBlobSize(resp), nil
+ }
+
+ return nil, 0, errWrap
+}
+
+// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
+func (s *ociImageSource) UpdatedLayerInfos() []types.BlobInfo {
+ return nil
+}
+
+func getBlobSize(resp *http.Response) int64 {
+ size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
+ if err != nil {
+ size = -1
+ }
+ return size
+}
diff --git a/vendor/github.com/containers/image/oci/layout/oci_transport.go b/vendor/github.com/containers/image/oci/layout/oci_transport.go
new file mode 100644
index 000000000..312bc0e4e
--- /dev/null
+++ b/vendor/github.com/containers/image/oci/layout/oci_transport.go
@@ -0,0 +1,277 @@
+package layout
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "github.com/containers/image/directory/explicitfilepath"
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/image"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+// Transport is an ImageTransport for OCI directories.
+var Transport = ociTransport{}
+
+type ociTransport struct{}
+
+func (t ociTransport) Name() string {
+ return "oci"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (t ociTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return ParseReference(reference)
+}
+
+// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys
+const (
+ separator = `(?:[-._:@+]|--)`
+ alphanum = `(?:[A-Za-z0-9]+)`
+ component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)`
+)
+
+var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`)
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error {
+ var dir string
+ sep := strings.SplitN(scope, ":", 2)
+ dir = sep[0]
+
+ if len(sep) == 2 {
+ image := sep[1]
+ if !refRegexp.MatchString(image) {
+ return errors.Errorf("Invalid image %s", image)
+ }
+ }
+
+ if !strings.HasPrefix(dir, "/") {
+ return errors.Errorf("Invalid scope %s: must be an absolute path", scope)
+ }
+ // Refuse also "/", otherwise "/" and "" would have the same semantics,
+ // and "" could be unexpectedly shadowed by the "/" entry.
+ // (Note: we do allow "/:someimage", a bit ridiculous but why refuse it?)
+ if scope == "/" {
+ return errors.New(`Invalid scope "/": Use the generic default scope ""`)
+ }
+ cleaned := filepath.Clean(dir)
+ if cleaned != dir {
+ return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
+ }
+ return nil
+}
+
+// ociReference is an ImageReference for OCI directory paths.
+type ociReference struct {
+ // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time!
+ // Either of the paths may point to a different, or no, inode over time. resolvedDir may contain symbolic links, and so on.
+
+ // Generally we follow the intent of the user, and use the "dir" member for filesystem operations (e.g. the user can use a relative path to avoid
+ // being exposed to symlinks and renames in the parent directories to the working directory).
+ // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
+ dir string // As specified by the user. May be relative, contain symlinks, etc.
+ resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
+ image string // If image=="", it means the only image in the index.json is used
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
+func ParseReference(reference string) (types.ImageReference, error) {
+ var dir, image string
+ sep := strings.SplitN(reference, ":", 2)
+ dir = sep[0]
+
+ if len(sep) == 2 {
+ image = sep[1]
+ }
+ return NewReference(dir, image)
+}
+
+// NewReference returns an OCI reference for a directory and a image.
+//
+// We do not expose an API supplying the resolvedDir; we could, but recomputing it
+// is generally cheap enough that we prefer being confident about the properties of resolvedDir.
+func NewReference(dir, image string) (types.ImageReference, error) {
+ resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir)
+ if err != nil {
+ return nil, err
+ }
+ // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
+ // from being ambiguous with values of PolicyConfigurationIdentity.
+ if strings.Contains(resolved, ":") {
+ return nil, errors.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", dir, image, resolved)
+ }
+ if len(image) > 0 && !refRegexp.MatchString(image) {
+ return nil, errors.Errorf("Invalid image %s", image)
+ }
+ return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil
+}
+
+func (ref ociReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
+func (ref ociReference) StringWithinTransport() string {
+ return fmt.Sprintf("%s:%s", ref.dir, ref.image)
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref ociReference) DockerReference() reference.Named {
+ return nil
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+// (i.e. various references with exactly the same semantics should return the same configuration identity)
+// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+// not required/guaranteed that it will be a valid input to Transport().ParseReference().
+// Returns "" if configuration identities for these references are not supported.
+func (ref ociReference) PolicyConfigurationIdentity() string {
+ // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the
+ // same image and the two can’t be statically disambiguated. Using at least the repository directory is
+ // less granular but hopefully still useful.
+ return fmt.Sprintf("%s", ref.resolvedDir)
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref ociReference) PolicyConfigurationNamespaces() []string {
+ res := []string{}
+ path := ref.resolvedDir
+ for {
+ lastSlash := strings.LastIndex(path, "/")
+ // Note that we do not include "/"; it is redundant with the default "" global default,
+ // and rejected by ociTransport.ValidatePolicyConfigurationScope above.
+ if lastSlash == -1 || path == "/" {
+ break
+ }
+ res = append(res, path)
+ path = path[:lastSlash]
+ }
+ return res
+}
+
+// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned Image.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+func (ref ociReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
+ src, err := newImageSource(ctx, ref)
+ if err != nil {
+ return nil, err
+ }
+ return image.FromSource(src)
+}
+
+func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) {
+ indexJSON, err := os.Open(ref.indexPath())
+ if err != nil {
+ return imgspecv1.Descriptor{}, err
+ }
+ defer indexJSON.Close()
+ index := imgspecv1.Index{}
+ if err := json.NewDecoder(indexJSON).Decode(&index); err != nil {
+ return imgspecv1.Descriptor{}, err
+ }
+
+ var d *imgspecv1.Descriptor
+ if ref.image == "" {
+ // return manifest if only one image is in the oci directory
+ if len(index.Manifests) == 1 {
+ d = &index.Manifests[0]
+ } else {
+ // ask user to choose image when more than one image in the oci directory
+ return imgspecv1.Descriptor{}, errors.Wrapf(err, "more than one image in oci, choose an image")
+ }
+ } else {
+ // if image specified, look through all manifests for a match
+ for _, md := range index.Manifests {
+ if md.MediaType != imgspecv1.MediaTypeImageManifest {
+ continue
+ }
+ refName, ok := md.Annotations["org.opencontainers.image.ref.name"]
+ if !ok {
+ continue
+ }
+ if refName == ref.image {
+ d = &md
+ break
+ }
+ }
+ }
+ if d == nil {
+ return imgspecv1.Descriptor{}, fmt.Errorf("no descriptor found for reference %q", ref.image)
+ }
+ return *d, nil
+}
+
+// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name
+// when pulling an image
+func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) {
+ ociRef, ok := imgRef.(ociReference)
+ if !ok {
+ return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociRef")
+ }
+ return ociRef.getManifestDescriptor()
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref ociReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(ctx, ref)
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref ociReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
+ return newImageDestination(ref)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref ociReference) DeleteImage(ctx *types.SystemContext) error {
+ return errors.Errorf("Deleting images not implemented for oci: images")
+}
+
+// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions.
+func (ref ociReference) ociLayoutPath() string {
+ return filepath.Join(ref.dir, "oci-layout")
+}
+
+// indexPath returns a path for the index.json within a directory using OCI conventions.
+func (ref ociReference) indexPath() string {
+ return filepath.Join(ref.dir, "index.json")
+}
+
+// blobPath returns a path for a blob within a directory using OCI image-layout conventions.
+func (ref ociReference) blobPath(digest digest.Digest) (string, error) {
+ if err := digest.Validate(); err != nil {
+ return "", errors.Wrapf(err, "unexpected digest reference %s", digest)
+ }
+ return filepath.Join(ref.dir, "blobs", digest.Algorithm().String(), digest.Hex()), nil
+}
diff --git a/vendor/github.com/containers/image/openshift/openshift-copies.go b/vendor/github.com/containers/image/openshift/openshift-copies.go
new file mode 100644
index 000000000..01fe71a24
--- /dev/null
+++ b/vendor/github.com/containers/image/openshift/openshift-copies.go
@@ -0,0 +1,1174 @@
+package openshift
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/ghodss/yaml"
+ "github.com/imdario/mergo"
+ "github.com/pkg/errors"
+ "golang.org/x/net/http2"
+ "k8s.io/client-go/util/homedir"
+)
+
+// restTLSClientConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.TLSClientConfig.
+// restTLSClientConfig contains settings to enable transport layer security
+type restTLSClientConfig struct {
+ // Server requires TLS client certificate authentication
+ CertFile string
+ // Server requires TLS client certificate authentication
+ KeyFile string
+ // Trusted root certificates for server
+ CAFile string
+
+ // CertData holds PEM-encoded bytes (typically read from a client certificate file).
+ // CertData takes precedence over CertFile
+ CertData []byte
+ // KeyData holds PEM-encoded bytes (typically read from a client certificate key file).
+ // KeyData takes precedence over KeyFile
+ KeyData []byte
+ // CAData holds PEM-encoded bytes (typically read from a root certificates bundle).
+ // CAData takes precedence over CAFile
+ CAData []byte
+}
+
+// restConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.Config.
+// Config holds the common attributes that can be passed to a Kubernetes client on
+// initialization.
+type restConfig struct {
+ // Host must be a host string, a host:port pair, or a URL to the base of the apiserver.
+ // If a URL is given then the (optional) Path of that URL represents a prefix that must
+ // be appended to all request URIs used to access the apiserver. This allows a frontend
+ // proxy to easily relocate all of the apiserver endpoints.
+ Host string
+
+ // Server requires Basic authentication
+ Username string
+ Password string
+
+ // Server requires Bearer authentication. This client will not attempt to use
+ // refresh tokens for an OAuth2 flow.
+ // TODO: demonstrate an OAuth2 compatible client.
+ BearerToken string
+
+ // TLSClientConfig contains settings to enable transport layer security
+ restTLSClientConfig
+
+ // Server should be accessed without verifying the TLS
+ // certificate. For testing only.
+ Insecure bool
+}
+
+// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfig.
+// ClientConfig is used to make it easy to get an api server client
+type clientConfig interface {
+ // ClientConfig returns a complete client config
+ ClientConfig() (*restConfig, error)
+}
+
+// defaultClientConfig is a modified copy of openshift/origin/pkg/cmd/util/clientcmd.DefaultClientConfig.
+func defaultClientConfig() clientConfig {
+ loadingRules := newOpenShiftClientConfigLoadingRules()
+ // REMOVED: Allowing command-line overriding of loadingRules
+ // REMOVED: clientcmd.ConfigOverrides
+
+ clientConfig := newNonInteractiveDeferredLoadingClientConfig(loadingRules)
+
+ return clientConfig
+}
+
+var recommendedHomeFile = path.Join(homedir.HomeDir(), ".kube/config")
+
+// newOpenShiftClientConfigLoadingRules is a modified copy of openshift/origin/pkg/cmd/cli/config.NewOpenShiftClientConfigLoadingRules.
+// NewOpenShiftClientConfigLoadingRules returns file priority loading rules for OpenShift.
+// 1. --config value
+// 2. if KUBECONFIG env var has a value, use it. Otherwise, ~/.kube/config file
+func newOpenShiftClientConfigLoadingRules() *clientConfigLoadingRules {
+ chain := []string{}
+
+ envVarFile := os.Getenv("KUBECONFIG")
+ if len(envVarFile) != 0 {
+ chain = append(chain, filepath.SplitList(envVarFile)...)
+ } else {
+ chain = append(chain, recommendedHomeFile)
+ }
+
+ return &clientConfigLoadingRules{
+ Precedence: chain,
+ // REMOVED: Migration support; run (oc login) to trigger migration
+ }
+}
+
+// deferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig.
+// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a set of loading rules
+// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that
+// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before
+// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid
+// passing extraneous information down a call stack
+type deferredLoadingClientConfig struct {
+ loadingRules *clientConfigLoadingRules
+
+ clientConfig clientConfig
+}
+
+// NewNonInteractiveDeferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveDeferredLoadingClientConfig.
+// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name
+func newNonInteractiveDeferredLoadingClientConfig(loadingRules *clientConfigLoadingRules) clientConfig {
+ return &deferredLoadingClientConfig{loadingRules: loadingRules}
+}
+
+func (config *deferredLoadingClientConfig) createClientConfig() (clientConfig, error) {
+ if config.clientConfig == nil {
+ // REMOVED: Support for concurrent use in multiple threads.
+ mergedConfig, err := config.loadingRules.Load()
+ if err != nil {
+ return nil, err
+ }
+
+ var mergedClientConfig clientConfig
+ // REMOVED: Interactive fallback support.
+ mergedClientConfig = newNonInteractiveClientConfig(*mergedConfig)
+
+ config.clientConfig = mergedClientConfig
+ }
+
+ return config.clientConfig, nil
+}
+
+// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig.ClientConfig.
+// ClientConfig implements ClientConfig
+func (config *deferredLoadingClientConfig) ClientConfig() (*restConfig, error) {
+ mergedClientConfig, err := config.createClientConfig()
+ if err != nil {
+ return nil, err
+ }
+ mergedConfig, err := mergedClientConfig.ClientConfig()
+ if err != nil {
+ return nil, err
+ }
+ // REMOVED: In-cluster service account configuration use.
+
+ return mergedConfig, nil
+}
+
+var (
+ // DefaultCluster is the cluster config used when no other config is specified
+ // TODO: eventually apiserver should start on 443 and be secure by default
+ defaultCluster = clientcmdCluster{Server: "http://localhost:8080"}
+
+ // EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name
+ envVarCluster = clientcmdCluster{Server: os.Getenv("KUBERNETES_MASTER")}
+)
+
+// directClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.
+// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information
+type directClientConfig struct {
+ config clientcmdConfig
+}
+
+// newNonInteractiveClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveClientConfig.
+// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information
+func newNonInteractiveClientConfig(config clientcmdConfig) clientConfig {
+ return &directClientConfig{config}
+}
+
+// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ClientConfig.
+// ClientConfig implements ClientConfig
+func (config *directClientConfig) ClientConfig() (*restConfig, error) {
+ if err := config.ConfirmUsable(); err != nil {
+ return nil, err
+ }
+
+ configAuthInfo := config.getAuthInfo()
+ configClusterInfo := config.getCluster()
+
+ clientConfig := &restConfig{}
+ clientConfig.Host = configClusterInfo.Server
+ if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 {
+ u.RawQuery = ""
+ u.Fragment = ""
+ clientConfig.Host = u.String()
+ }
+
+ // only try to read the auth information if we are secure
+ if isConfigTransportTLS(*clientConfig) {
+ var err error
+
+ // mergo is a first write wins for map value and a last writing wins for interface values
+ // NOTE: This behavior changed with https://github.com/imdario/mergo/commit/d304790b2ed594794496464fadd89d2bb266600a.
+ // Our mergo.Merge version is older than this change.
+ // REMOVED: Support for interactive fallback.
+ userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo)
+ if err != nil {
+ return nil, err
+ }
+ mergo.Merge(clientConfig, userAuthPartialConfig)
+
+ serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo)
+ if err != nil {
+ return nil, err
+ }
+ mergo.Merge(clientConfig, serverAuthPartialConfig)
+ }
+
+ return clientConfig, nil
+}
+
+// getServerIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getServerIdentificationPartialConfig.
+// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
+// both, so we have to split the objects and merge them separately
+// we want this order of precedence for the server identification
+// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files)
+// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
+// 3. load the ~/.kubernetes_auth file as a default
+func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, configClusterInfo clientcmdCluster) (*restConfig, error) {
+ mergedConfig := &restConfig{}
+
+ // configClusterInfo holds the information identify the server provided by .kubeconfig
+ configClientConfig := &restConfig{}
+ configClientConfig.CAFile = configClusterInfo.CertificateAuthority
+ configClientConfig.CAData = configClusterInfo.CertificateAuthorityData
+ configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify
+ mergo.Merge(mergedConfig, configClientConfig)
+
+ return mergedConfig, nil
+}
+
+// getUserIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getUserIdentificationPartialConfig.
+// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
+// both, so we have to split the objects and merge them separately
+// we want this order of precedence for user identifcation
+// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files)
+// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
+// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file
+// 4. if there is not enough information to identify the user, prompt if possible
+func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*restConfig, error) {
+ mergedConfig := &restConfig{}
+
+ // blindly overwrite existing values based on precedence
+ if len(configAuthInfo.Token) > 0 {
+ mergedConfig.BearerToken = configAuthInfo.Token
+ }
+ if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 {
+ mergedConfig.CertFile = configAuthInfo.ClientCertificate
+ mergedConfig.CertData = configAuthInfo.ClientCertificateData
+ mergedConfig.KeyFile = configAuthInfo.ClientKey
+ mergedConfig.KeyData = configAuthInfo.ClientKeyData
+ }
+ if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 {
+ mergedConfig.Username = configAuthInfo.Username
+ mergedConfig.Password = configAuthInfo.Password
+ }
+
+ // REMOVED: prompting for missing information.
+ return mergedConfig, nil
+}
+
+// canIdentifyUser is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.canIdentifyUser
+func canIdentifyUser(config restConfig) bool {
+ return len(config.Username) > 0 ||
+ (len(config.CertFile) > 0 || len(config.CertData) > 0) ||
+ len(config.BearerToken) > 0
+
+}
+
+// ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable.
+// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config,
+// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible.
+func (config *directClientConfig) ConfirmUsable() error {
+ var validationErrors []error
+ validationErrors = append(validationErrors, validateAuthInfo(config.getAuthInfoName(), config.getAuthInfo())...)
+ validationErrors = append(validationErrors, validateClusterInfo(config.getClusterName(), config.getCluster())...)
+ // when direct client config is specified, and our only error is that no server is defined, we should
+ // return a standard "no config" error
+ if len(validationErrors) == 1 && validationErrors[0] == errEmptyCluster {
+ return newErrConfigurationInvalid([]error{errEmptyConfig})
+ }
+ return newErrConfigurationInvalid(validationErrors)
+}
+
+// getContextName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContextName.
+func (config *directClientConfig) getContextName() string {
+ // REMOVED: overrides support
+ return config.config.CurrentContext
+}
+
+// getAuthInfoName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfoName.
+func (config *directClientConfig) getAuthInfoName() string {
+ // REMOVED: overrides support
+ return config.getContext().AuthInfo
+}
+
+// getClusterName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getClusterName.
+func (config *directClientConfig) getClusterName() string {
+ // REMOVED: overrides support
+ return config.getContext().Cluster
+}
+
+// getContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContext.
+func (config *directClientConfig) getContext() clientcmdContext {
+ contexts := config.config.Contexts
+ contextName := config.getContextName()
+
+ var mergedContext clientcmdContext
+ if configContext, exists := contexts[contextName]; exists {
+ mergo.Merge(&mergedContext, configContext)
+ }
+ // REMOVED: overrides support
+
+ return mergedContext
+}
+
+var (
+ errEmptyConfig = errors.New("no configuration has been provided")
+ // message is for consistency with old behavior
+ errEmptyCluster = errors.New("cluster has no server defined")
+)
+
+// validateClusterInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateClusterInfo.
+// validateClusterInfo looks for conflicts and errors in the cluster info
+func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []error {
+ var validationErrors []error
+
+ if reflect.DeepEqual(clientcmdCluster{}, clusterInfo) {
+ return []error{errEmptyCluster}
+ }
+
+ if len(clusterInfo.Server) == 0 {
+ if len(clusterName) == 0 {
+ validationErrors = append(validationErrors, errors.Errorf("default cluster has no server defined"))
+ } else {
+ validationErrors = append(validationErrors, errors.Errorf("no server found for cluster %q", clusterName))
+ }
+ }
+ // Make sure CA data and CA file aren't both specified
+ if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 {
+ validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName))
+ }
+ if len(clusterInfo.CertificateAuthority) != 0 {
+ clientCertCA, err := os.Open(clusterInfo.CertificateAuthority)
+ defer clientCertCA.Close()
+ if err != nil {
+ validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err))
+ }
+ }
+
+ return validationErrors
+}
+
+// validateAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateAuthInfo.
+// validateAuthInfo looks for conflicts and errors in the auth info
+func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error {
+ var validationErrors []error
+
+ usingAuthPath := false
+ methods := make([]string, 0, 3)
+ if len(authInfo.Token) != 0 {
+ methods = append(methods, "token")
+ }
+ if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 {
+ methods = append(methods, "basicAuth")
+ }
+
+ if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 {
+ // Make sure cert data and file aren't both specified
+ if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 {
+ validationErrors = append(validationErrors, errors.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName))
+ }
+ // Make sure key data and file aren't both specified
+ if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 {
+ validationErrors = append(validationErrors, errors.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName))
+ }
+ // Make sure a key is specified
+ if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 {
+ validationErrors = append(validationErrors, errors.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName))
+ }
+
+ if len(authInfo.ClientCertificate) != 0 {
+ clientCertFile, err := os.Open(authInfo.ClientCertificate)
+ defer clientCertFile.Close()
+ if err != nil {
+ validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err))
+ }
+ }
+ if len(authInfo.ClientKey) != 0 {
+ clientKeyFile, err := os.Open(authInfo.ClientKey)
+ defer clientKeyFile.Close()
+ if err != nil {
+ validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err))
+ }
+ }
+ }
+
+ // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case
+ if (len(methods) > 1) && (!usingAuthPath) {
+ validationErrors = append(validationErrors, errors.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods))
+ }
+
+ return validationErrors
+}
+
+// getAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfo.
+func (config *directClientConfig) getAuthInfo() clientcmdAuthInfo {
+ authInfos := config.config.AuthInfos
+ authInfoName := config.getAuthInfoName()
+
+ var mergedAuthInfo clientcmdAuthInfo
+ if configAuthInfo, exists := authInfos[authInfoName]; exists {
+ mergo.Merge(&mergedAuthInfo, configAuthInfo)
+ }
+ // REMOVED: overrides support
+
+ return mergedAuthInfo
+}
+
+// getCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getCluster.
+func (config *directClientConfig) getCluster() clientcmdCluster {
+ clusterInfos := config.config.Clusters
+ clusterInfoName := config.getClusterName()
+
+ var mergedClusterInfo clientcmdCluster
+ mergo.Merge(&mergedClusterInfo, defaultCluster)
+ mergo.Merge(&mergedClusterInfo, envVarCluster)
+ if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists {
+ mergo.Merge(&mergedClusterInfo, configClusterInfo)
+ }
+ // REMOVED: overrides support
+
+ return mergedClusterInfo
+}
+
+// aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.
+// This helper implements the error and Errors interfaces. Keeping it private
+// prevents people from making an aggregate of 0 errors, which is not
+// an error, but does satisfy the error interface.
+type aggregateErr []error
+
+// newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate.
+// NewAggregate converts a slice of errors into an Aggregate interface, which
+// is itself an implementation of the error interface. If the slice is empty,
+// this returns nil.
+// It will check if any of the element of input error list is nil, to avoid
+// nil pointer panic when call Error().
+func newAggregate(errlist []error) error {
+ if len(errlist) == 0 {
+ return nil
+ }
+ // In case of input error list contains nil
+ var errs []error
+ for _, e := range errlist {
+ if e != nil {
+ errs = append(errs, e)
+ }
+ }
+ if len(errs) == 0 {
+ return nil
+ }
+ return aggregateErr(errs)
+}
+
+// Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error.
+// Error is part of the error interface.
+func (agg aggregateErr) Error() string {
+ if len(agg) == 0 {
+ // This should never happen, really.
+ return ""
+ }
+ if len(agg) == 1 {
+ return agg[0].Error()
+ }
+ result := fmt.Sprintf("[%s", agg[0].Error())
+ for i := 1; i < len(agg); i++ {
+ result += fmt.Sprintf(", %s", agg[i].Error())
+ }
+ result += "]"
+ return result
+}
+
+// REMOVED: aggregateErr.Errors
+
+// errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid.
+// errConfigurationInvalid is a set of errors indicating the configuration is invalid.
+type errConfigurationInvalid []error
+
+var _ error = errConfigurationInvalid{}
+
+// REMOVED: utilerrors.Aggregate implementation for errConfigurationInvalid.
+
+// newErrConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.newErrConfigurationInvalid.
+func newErrConfigurationInvalid(errs []error) error {
+ switch len(errs) {
+ case 0:
+ return nil
+ default:
+ return errConfigurationInvalid(errs)
+ }
+}
+
+// Error implements the error interface
+func (e errConfigurationInvalid) Error() string {
+ return fmt.Sprintf("invalid configuration: %v", newAggregate(e).Error())
+}
+
+// clientConfigLoadingRules is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules
+// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config
+// Callers can put the chain together however they want, but we'd recommend:
+// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath
+// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if thie file is not present
+type clientConfigLoadingRules struct {
+ Precedence []string
+}
+
+// Load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.Load
+// Load starts by running the MigrationRules and then
+// takes the loading rules and returns a Config object based on following rules.
+// if the ExplicitPath, return the unmerged explicit file
+// Otherwise, return a merged config based on the Precedence slice
+// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored.
+// Read errors or files with non-deserializable content produce errors.
+// The first file to set a particular map key wins and map key's value is never changed.
+// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed.
+// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two.
+// It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even
+// non-conflicting entries from the second file's "red-user" are discarded.
+// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder
+// and only absolute file paths are returned.
+func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
+ errlist := []error{}
+
+ kubeConfigFiles := []string{}
+
+ // REMOVED: explicit path support
+ kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...)
+
+ kubeconfigs := []*clientcmdConfig{}
+ // read and cache the config files so that we only look at them once
+ for _, filename := range kubeConfigFiles {
+ if len(filename) == 0 {
+ // no work to do
+ continue
+ }
+
+ config, err := loadFromFile(filename)
+ if os.IsNotExist(err) {
+ // skip missing files
+ continue
+ }
+ if err != nil {
+ errlist = append(errlist, errors.Wrapf(err, "Error loading config file \"%s\"", filename))
+ continue
+ }
+
+ kubeconfigs = append(kubeconfigs, config)
+ }
+
+ // first merge all of our maps
+ mapConfig := clientcmdNewConfig()
+ for _, kubeconfig := range kubeconfigs {
+ mergo.Merge(mapConfig, kubeconfig)
+ }
+
+ // merge all of the struct values in the reverse order so that priority is given correctly
+ // errors are not added to the list the second time
+ nonMapConfig := clientcmdNewConfig()
+ for i := len(kubeconfigs) - 1; i >= 0; i-- {
+ kubeconfig := kubeconfigs[i]
+ mergo.Merge(nonMapConfig, kubeconfig)
+ }
+
+ // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and
+ // get the values we expect.
+ config := clientcmdNewConfig()
+ mergo.Merge(config, mapConfig)
+ mergo.Merge(config, nonMapConfig)
+
+ // REMOVED: Possibility to skip this.
+ if err := resolveLocalPaths(config); err != nil {
+ errlist = append(errlist, err)
+ }
+
+ return config, newAggregate(errlist)
+}
+
+// loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile
+// LoadFromFile takes a filename and deserializes the contents into Config object
+func loadFromFile(filename string) (*clientcmdConfig, error) {
+ kubeconfigBytes, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ config, err := load(kubeconfigBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ // set LocationOfOrigin on every Cluster, User, and Context
+ for key, obj := range config.AuthInfos {
+ obj.LocationOfOrigin = filename
+ config.AuthInfos[key] = obj
+ }
+ for key, obj := range config.Clusters {
+ obj.LocationOfOrigin = filename
+ config.Clusters[key] = obj
+ }
+ for key, obj := range config.Contexts {
+ obj.LocationOfOrigin = filename
+ config.Contexts[key] = obj
+ }
+
+ if config.AuthInfos == nil {
+ config.AuthInfos = map[string]*clientcmdAuthInfo{}
+ }
+ if config.Clusters == nil {
+ config.Clusters = map[string]*clientcmdCluster{}
+ }
+ if config.Contexts == nil {
+ config.Contexts = map[string]*clientcmdContext{}
+ }
+
+ return config, nil
+}
+
+// load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.Load
+// Load takes a byte slice and deserializes the contents into Config object.
+// Encapsulates deserialization without assuming the source is a file.
+func load(data []byte) (*clientcmdConfig, error) {
+ config := clientcmdNewConfig()
+ // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input)
+ if len(data) == 0 {
+ return config, nil
+ }
+ // Note: This does absolutely no kind/version checking or conversions.
+ data, err := yaml.YAMLToJSON(data)
+ if err != nil {
+ return nil, err
+ }
+ if err := json.Unmarshal(data, config); err != nil {
+ return nil, err
+ }
+ return config, nil
+}
+
+// resolveLocalPaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolveLocalPaths.
+// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin
+// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without
+// modification of its contents.
+func resolveLocalPaths(config *clientcmdConfig) error {
+ for _, cluster := range config.Clusters {
+ if len(cluster.LocationOfOrigin) == 0 {
+ continue
+ }
+ base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin))
+ if err != nil {
+ return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin)
+ }
+
+ if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil {
+ return err
+ }
+ }
+ for _, authInfo := range config.AuthInfos {
+ if len(authInfo.LocationOfOrigin) == 0 {
+ continue
+ }
+ base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin))
+ if err != nil {
+ return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin)
+ }
+
+ if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// getClusterFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetClusterFileReferences.
+func getClusterFileReferences(cluster *clientcmdCluster) []*string {
+ return []*string{&cluster.CertificateAuthority}
+}
+
+// getAuthInfoFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetAuthInfoFileReferences.
+func getAuthInfoFileReferences(authInfo *clientcmdAuthInfo) []*string {
+ return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey}
+}
+
+// resolvePaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolvePaths.
+// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory
+func resolvePaths(refs []*string, base string) error {
+ for _, ref := range refs {
+ // Don't resolve empty paths
+ if len(*ref) > 0 {
+ // Don't resolve absolute paths
+ if !filepath.IsAbs(*ref) {
+ *ref = filepath.Join(base, *ref)
+ }
+ }
+ }
+ return nil
+}
+
+// restClientFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.RESTClientFor.
+// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config
+// object. Note that a RESTClient may require fields that are optional when initializing a Client.
+// A RESTClient created by this method is generic - it expects to operate on an API that follows
+// the Kubernetes conventions, but may not be the Kubernetes API.
+func restClientFor(config *restConfig) (*url.URL, *http.Client, error) {
+ // REMOVED: Configurable GroupVersion, Codec
+ // REMOVED: Configurable versionedAPIPath
+ baseURL, err := defaultServerURLFor(config)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ transport, err := transportFor(config)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var httpClient *http.Client
+ if transport != http.DefaultTransport {
+ httpClient = &http.Client{Transport: transport}
+ }
+
+ // REMOVED: Configurable QPS, Burst, ContentConfig
+ // REMOVED: Actually returning a RESTClient object.
+ return baseURL, httpClient, nil
+}
+
+// defaultServerURL is a modified copy of k8s.io/kubernets/pkg/client/restclient.DefaultServerURL.
+// DefaultServerURL converts a host, host:port, or URL string to the default base server API path
+// to use with a Client at a given API version following the standard conventions for a
+// Kubernetes API.
+func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) {
+ if host == "" {
+ return nil, errors.Errorf("host must be a URL or a host:port pair")
+ }
+ base := host
+ hostURL, err := url.Parse(base)
+ if err != nil {
+ return nil, err
+ }
+ if hostURL.Scheme == "" {
+ scheme := "http://"
+ if defaultTLS {
+ scheme = "https://"
+ }
+ hostURL, err = url.Parse(scheme + base)
+ if err != nil {
+ return nil, err
+ }
+ if hostURL.Path != "" && hostURL.Path != "/" {
+ return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base)
+ }
+ }
+
+ // REMOVED: versionedAPIPath computation.
+ return hostURL, nil
+}
+
+// defaultServerURLFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.defaultServerURLFor.
+// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It
+// requires Host and Version to be set prior to being called.
+func defaultServerURLFor(config *restConfig) (*url.URL, error) {
+ // TODO: move the default to secure when the apiserver supports TLS by default
+ // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA."
+ hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0
+ hasCert := len(config.CertFile) != 0 || len(config.CertData) != 0
+ defaultTLS := hasCA || hasCert || config.Insecure
+ host := config.Host
+ if host == "" {
+ host = "localhost"
+ }
+
+ // REMOVED: Configurable APIPath, GroupVersion
+ return defaultServerURL(host, defaultTLS)
+}
+
+// transportFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.transportFor.
+// TransportFor returns an http.RoundTripper that will provide the authentication
+// or transport level security defined by the provided Config. Will return the
+// default http.DefaultTransport if no special case behavior is needed.
+func transportFor(config *restConfig) (http.RoundTripper, error) {
+ // REMOVED: separation between restclient.Config and transport.Config, Transport, WrapTransport support
+ return transportNew(config)
+}
+
+// isConfigTransportTLS is a modified copy of k8s.io/kubernets/pkg/client/restclient.IsConfigTransportTLS.
+// IsConfigTransportTLS returns true if and only if the provided
+// config will result in a protected connection to the server when it
+// is passed to restclient.RESTClientFor(). Use to determine when to
+// send credentials over the wire.
+//
+// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are
+// still possible.
+func isConfigTransportTLS(config restConfig) bool {
+ baseURL, err := defaultServerURLFor(&config)
+ if err != nil {
+ return false
+ }
+ return baseURL.Scheme == "https"
+}
+
+// transportNew is a modified copy of k8s.io/kubernetes/pkg/client/transport.New.
+// New returns an http.RoundTripper that will provide the authentication
+// or transport level security defined by the provided Config.
+func transportNew(config *restConfig) (http.RoundTripper, error) {
+ // REMOVED: custom config.Transport support.
+ // Set transport level security
+
+ var (
+ rt http.RoundTripper
+ err error
+ )
+
+ rt, err = tlsCacheGet(config)
+ if err != nil {
+ return nil, err
+ }
+
+ // REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains.
+ if len(config.Username) != 0 && len(config.BearerToken) != 0 {
+ return nil, errors.Errorf("username/password or bearer token may be set, but not both")
+ }
+
+ return rt, nil
+}
+
+// newProxierWithNoProxyCIDR is a modified copy of k8s.io/apimachinery/pkg/util/net.NewProxierWithNoProxyCIDR.
+// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if
+// no matching CIDRs are found
+func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) {
+ // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it
+ noProxyEnv := os.Getenv("NO_PROXY")
+ noProxyRules := strings.Split(noProxyEnv, ",")
+
+ cidrs := []*net.IPNet{}
+ for _, noProxyRule := range noProxyRules {
+ _, cidr, _ := net.ParseCIDR(noProxyRule)
+ if cidr != nil {
+ cidrs = append(cidrs, cidr)
+ }
+ }
+
+ if len(cidrs) == 0 {
+ return delegate
+ }
+
+ return func(req *http.Request) (*url.URL, error) {
+ host := req.URL.Host
+ // for some urls, the Host is already the host, not the host:port
+ if net.ParseIP(host) == nil {
+ var err error
+ host, _, err = net.SplitHostPort(req.URL.Host)
+ if err != nil {
+ return delegate(req)
+ }
+ }
+
+ ip := net.ParseIP(host)
+ if ip == nil {
+ return delegate(req)
+ }
+
+ for _, cidr := range cidrs {
+ if cidr.Contains(ip) {
+ return nil, nil
+ }
+ }
+
+ return delegate(req)
+ }
+}
+
+// tlsCacheGet is a modified copy of k8s.io/kubernetes/pkg/client/transport.tlsTransportCache.get.
+func tlsCacheGet(config *restConfig) (http.RoundTripper, error) {
+ // REMOVED: any actual caching
+
+ // Get the TLS options for this client config
+ tlsConfig, err := tlsConfigFor(config)
+ if err != nil {
+ return nil, err
+ }
+ // The options didn't require a custom TLS config
+ if tlsConfig == nil {
+ return http.DefaultTransport, nil
+ }
+
+ // REMOVED: Call to k8s.io/apimachinery/pkg/util/net.SetTransportDefaults; instead of the generic machinery and conditionals, hard-coded the result here.
+ t := &http.Transport{
+ // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings
+ // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY
+ Proxy: newProxierWithNoProxyCIDR(http.ProxyFromEnvironment),
+ TLSHandshakeTimeout: 10 * time.Second,
+ TLSClientConfig: tlsConfig,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ }
+ // Allow clients to disable http2 if needed.
+ if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 {
+ _ = http2.ConfigureTransport(t)
+ }
+ return t, nil
+}
+
+// tlsConfigFor is a modified copy of k8s.io/kubernetes/pkg/client/transport.TLSConfigFor.
+// TLSConfigFor returns a tls.Config that will provide the transport level security defined
+// by the provided Config. Will return nil if no transport level security is requested.
+func tlsConfigFor(c *restConfig) (*tls.Config, error) {
+ if !(c.HasCA() || c.HasCertAuth() || c.Insecure) {
+ return nil, nil
+ }
+ if c.HasCA() && c.Insecure {
+ return nil, errors.Errorf("specifying a root certificates file with the insecure flag is not allowed")
+ }
+ if err := loadTLSFiles(c); err != nil {
+ return nil, err
+ }
+
+ tlsConfig := &tls.Config{
+ // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability)
+ MinVersion: tls.VersionTLS10,
+ InsecureSkipVerify: c.Insecure,
+ }
+
+ if c.HasCA() {
+ tlsConfig.RootCAs = rootCertPool(c.CAData)
+ }
+
+ if c.HasCertAuth() {
+ cert, err := tls.X509KeyPair(c.CertData, c.KeyData)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.Certificates = []tls.Certificate{cert}
+ }
+
+ return tlsConfig, nil
+}
+
+// loadTLSFiles is a modified copy of k8s.io/kubernetes/pkg/client/transport.loadTLSFiles.
+// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData,
+// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are
+// either populated or were empty to start.
+func loadTLSFiles(c *restConfig) error {
+ var err error
+ c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile)
+ if err != nil {
+ return err
+ }
+
+ c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile)
+ if err != nil {
+ return err
+ }
+
+ c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// dataFromSliceOrFile is a modified copy of k8s.io/kubernetes/pkg/client/transport.dataFromSliceOrFile.
+// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file,
+// or an error if an error occurred reading the file
+func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
+ if len(data) > 0 {
+ return data, nil
+ }
+ if len(file) > 0 {
+ fileData, err := ioutil.ReadFile(file)
+ if err != nil {
+ return []byte{}, err
+ }
+ return fileData, nil
+ }
+ return nil, nil
+}
+
+// rootCertPool is a modified copy of k8s.io/kubernetes/pkg/client/transport.rootCertPool.
+// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs".
+// When caData is not empty, it will be the ONLY information used in the CertPool.
+func rootCertPool(caData []byte) *x509.CertPool {
+ // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go
+ // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values
+ // It doesn't allow trusting either/or, but hopefully that won't be an issue
+ if len(caData) == 0 {
+ return nil
+ }
+
+ // if we have caData, use it
+ certPool := x509.NewCertPool()
+ certPool.AppendCertsFromPEM(caData)
+ return certPool
+}
+
+// HasCA is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCA.
+// HasCA returns whether the configuration has a certificate authority or not.
+func (c *restConfig) HasCA() bool {
+ return len(c.CAData) > 0 || len(c.CAFile) > 0
+}
+
+// HasCertAuth is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCertAuth.
+// HasCertAuth returns whether the configuration has certificate authentication or not.
+func (c *restConfig) HasCertAuth() bool {
+ return len(c.CertData) != 0 || len(c.CertFile) != 0
+}
+
+// clientcmdConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Config.
+// Config holds the information needed to build connect to remote kubernetes clusters as a given user
+// IMPORTANT if you add fields to this struct, please update IsConfigEmpty()
+type clientcmdConfig struct {
+ // Clusters is a map of referencable names to cluster configs
+ Clusters clustersMap `json:"clusters"`
+ // AuthInfos is a map of referencable names to user configs
+ AuthInfos authInfosMap `json:"users"`
+ // Contexts is a map of referencable names to context configs
+ Contexts contextsMap `json:"contexts"`
+ // CurrentContext is the name of the context that you would like to use by default
+ CurrentContext string `json:"current-context"`
+}
+
+type clustersMap map[string]*clientcmdCluster
+
+func (m *clustersMap) UnmarshalJSON(data []byte) error {
+ var a []v1NamedCluster
+ if err := json.Unmarshal(data, &a); err != nil {
+ return err
+ }
+ for _, e := range a {
+ cluster := e.Cluster // Allocates a new instance in each iteration
+ (*m)[e.Name] = &cluster
+ }
+ return nil
+}
+
+type authInfosMap map[string]*clientcmdAuthInfo
+
+func (m *authInfosMap) UnmarshalJSON(data []byte) error {
+ var a []v1NamedAuthInfo
+ if err := json.Unmarshal(data, &a); err != nil {
+ return err
+ }
+ for _, e := range a {
+ authInfo := e.AuthInfo // Allocates a new instance in each iteration
+ (*m)[e.Name] = &authInfo
+ }
+ return nil
+}
+
+type contextsMap map[string]*clientcmdContext
+
+func (m *contextsMap) UnmarshalJSON(data []byte) error {
+ var a []v1NamedContext
+ if err := json.Unmarshal(data, &a); err != nil {
+ return err
+ }
+ for _, e := range a {
+ context := e.Context // Allocates a new instance in each iteration
+ (*m)[e.Name] = &context
+ }
+ return nil
+}
+
+// clientcmdNewConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.NewConfig.
+// NewConfig is a convenience function that returns a new Config object with non-nil maps
+func clientcmdNewConfig() *clientcmdConfig {
+ return &clientcmdConfig{
+ Clusters: make(map[string]*clientcmdCluster),
+ AuthInfos: make(map[string]*clientcmdAuthInfo),
+ Contexts: make(map[string]*clientcmdContext),
+ }
+}
+
+// clientcmdCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Cluster.
+// Cluster contains information about how to communicate with a kubernetes cluster
+type clientcmdCluster struct {
+ // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
+ LocationOfOrigin string
+ // Server is the address of the kubernetes cluster (https://hostname:port).
+ Server string `json:"server"`
+ // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure.
+ InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`
+ // CertificateAuthority is the path to a cert file for the certificate authority.
+ CertificateAuthority string `json:"certificate-authority,omitempty"`
+ // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority
+ CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
+}
+
+// clientcmdAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.AuthInfo.
+// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are.
+type clientcmdAuthInfo struct {
+ // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
+ LocationOfOrigin string
+ // ClientCertificate is the path to a client cert file for TLS.
+ ClientCertificate string `json:"client-certificate,omitempty"`
+ // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate
+ ClientCertificateData []byte `json:"client-certificate-data,omitempty"`
+ // ClientKey is the path to a client key file for TLS.
+ ClientKey string `json:"client-key,omitempty"`
+ // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey
+ ClientKeyData []byte `json:"client-key-data,omitempty"`
+ // Token is the bearer token for authentication to the kubernetes cluster.
+ Token string `json:"token,omitempty"`
+ // Username is the username for basic authentication to the kubernetes cluster.
+ Username string `json:"username,omitempty"`
+ // Password is the password for basic authentication to the kubernetes cluster.
+ Password string `json:"password,omitempty"`
+}
+
+// clientcmdContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Context.
+// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with)
+type clientcmdContext struct {
+ // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
+ LocationOfOrigin string
+ // Cluster is the name of the cluster for this context
+ Cluster string `json:"cluster"`
+ // AuthInfo is the name of the authInfo for this context
+ AuthInfo string `json:"user"`
+ // Namespace is the default namespace to use on unspecified requests
+ Namespace string `json:"namespace,omitempty"`
+}
+
+// v1NamedCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedCluster.
+// NamedCluster relates nicknames to cluster information
+type v1NamedCluster struct {
+ // Name is the nickname for this Cluster
+ Name string `json:"name"`
+ // Cluster holds the cluster information
+ Cluster clientcmdCluster `json:"cluster"`
+}
+
+// v1NamedContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedContext.
+// NamedContext relates nicknames to context information
+type v1NamedContext struct {
+ // Name is the nickname for this Context
+ Name string `json:"name"`
+ // Context holds the context information
+ Context clientcmdContext `json:"context"`
+}
+
+// v1NamedAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedAuthInfo.
+// NamedAuthInfo relates nicknames to auth information
+type v1NamedAuthInfo struct {
+ // Name is the nickname for this AuthInfo
+ Name string `json:"name"`
+ // AuthInfo holds the auth information
+ AuthInfo clientcmdAuthInfo `json:"user"`
+}
diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go
new file mode 100644
index 000000000..794521b14
--- /dev/null
+++ b/vendor/github.com/containers/image/openshift/openshift.go
@@ -0,0 +1,539 @@
+package openshift
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/containers/image/docker"
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/containers/image/version"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// openshiftClient is configuration for dealing with a single image stream, for reading or writing.
+type openshiftClient struct {
+ ref openshiftReference
+ baseURL *url.URL
+ // Values from Kubernetes configuration
+ httpClient *http.Client
+ bearerToken string // "" if not used
+ username string // "" if not used
+ password string // if username != ""
+}
+
+// newOpenshiftClient creates a new openshiftClient for the specified reference.
+func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) {
+ // We have already done this parsing in ParseReference, but thrown away
+ // httpClient. So, parse again.
+ // (We could also rework/split restClientFor to "get base URL" to be done
+ // in ParseReference, and "get httpClient" to be done here. But until/unless
+ // we support non-default clusters, this is good enough.)
+
+ // Overall, this is modelled on openshift/origin/pkg/cmd/util/clientcmd.New().ClientConfig() and openshift/origin/pkg/client.
+ cmdConfig := defaultClientConfig()
+ logrus.Debugf("cmdConfig: %#v", cmdConfig)
+ restConfig, err := cmdConfig.ClientConfig()
+ if err != nil {
+ return nil, err
+ }
+ // REMOVED: SetOpenShiftDefaults (values are not overridable in config files, so hard-coded these defaults.)
+ logrus.Debugf("restConfig: %#v", restConfig)
+ baseURL, httpClient, err := restClientFor(restConfig)
+ if err != nil {
+ return nil, err
+ }
+ logrus.Debugf("URL: %#v", *baseURL)
+
+ if httpClient == nil {
+ httpClient = http.DefaultClient
+ }
+
+ return &openshiftClient{
+ ref: ref,
+ baseURL: baseURL,
+ httpClient: httpClient,
+ bearerToken: restConfig.BearerToken,
+ username: restConfig.Username,
+ password: restConfig.Password,
+ }, nil
+}
+
+// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object.
+func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) {
+ url := *c.baseURL
+ url.Path = path
+ var requestBodyReader io.Reader
+ if requestBody != nil {
+ logrus.Debugf("Will send body: %s", requestBody)
+ requestBodyReader = bytes.NewReader(requestBody)
+ }
+ req, err := http.NewRequest(method, url.String(), requestBodyReader)
+ if err != nil {
+ return nil, err
+ }
+ req = req.WithContext(ctx)
+
+ if len(c.bearerToken) != 0 {
+ req.Header.Set("Authorization", "Bearer "+c.bearerToken)
+ } else if len(c.username) != 0 {
+ req.SetBasicAuth(c.username, c.password)
+ }
+ req.Header.Set("Accept", "application/json, */*")
+ req.Header.Set("User-Agent", fmt.Sprintf("skopeo/%s", version.Version))
+ if requestBody != nil {
+ req.Header.Set("Content-Type", "application/json")
+ }
+
+ logrus.Debugf("%s %s", method, url)
+ res, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ body, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+ logrus.Debugf("Got body: %s", body)
+ // FIXME: Just throwing this useful information away only to try to guess later...
+ logrus.Debugf("Got content-type: %s", res.Header.Get("Content-Type"))
+
+ var status status
+ statusValid := false
+ if err := json.Unmarshal(body, &status); err == nil && len(status.Status) > 0 {
+ statusValid = true
+ }
+
+ switch {
+ case res.StatusCode == http.StatusSwitchingProtocols: // FIXME?! No idea why this weird case exists in k8s.io/kubernetes/pkg/client/restclient.
+ if statusValid && status.Status != "Success" {
+ return nil, errors.New(status.Message)
+ }
+ case res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusPartialContent:
+ // OK.
+ default:
+ if statusValid {
+ return nil, errors.New(status.Message)
+ }
+ return nil, errors.Errorf("HTTP error: status code: %d, body: %s", res.StatusCode, string(body))
+ }
+
+ return body, nil
+}
+
+// getImage loads the specified image object.
+func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) {
+ // FIXME: validate components per validation.IsValidPathSegmentName?
+ path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName)
+ body, err := c.doRequest(ctx, "GET", path, nil)
+ if err != nil {
+ return nil, err
+ }
+ // Note: This does absolutely no kind/version checking or conversions.
+ var isi imageStreamImage
+ if err := json.Unmarshal(body, &isi); err != nil {
+ return nil, err
+ }
+ return &isi.Image, nil
+}
+
+// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use;
+// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside.
+func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
+ parts := strings.SplitN(ref, "/", 2)
+ if len(parts) != 2 {
+ return "", errors.Errorf("Invalid format of docker reference %s: missing '/'", ref)
+ }
+ return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil
+}
+
+type openshiftImageSource struct {
+ client *openshiftClient
+ // Values specific to this image
+ ctx *types.SystemContext
+ // State
+ docker types.ImageSource // The Docker Registry endpoint, or nil if not resolved yet
+ imageStreamImageName string // Resolved image identifier, or "" if not known yet
+}
+
+// newImageSource creates a new ImageSource for the specified reference.
+// The caller must call .Close() on the returned ImageSource.
+func newImageSource(ctx *types.SystemContext, ref openshiftReference) (types.ImageSource, error) {
+ client, err := newOpenshiftClient(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ return &openshiftImageSource{
+ client: client,
+ ctx: ctx,
+ }, nil
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (s *openshiftImageSource) Reference() types.ImageReference {
+ return s.client.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *openshiftImageSource) Close() error {
+ if s.docker != nil {
+ err := s.docker.Close()
+ s.docker = nil
+
+ return err
+ }
+
+ return nil
+}
+
+func (s *openshiftImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
+ if err := s.ensureImageIsResolved(context.TODO()); err != nil {
+ return nil, "", err
+ }
+ return s.docker.GetTargetManifest(digest)
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+func (s *openshiftImageSource) GetManifest() ([]byte, string, error) {
+ if err := s.ensureImageIsResolved(context.TODO()); err != nil {
+ return nil, "", err
+ }
+ return s.docker.GetManifest()
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+func (s *openshiftImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
+ if err := s.ensureImageIsResolved(context.TODO()); err != nil {
+ return nil, 0, err
+ }
+ return s.docker.GetBlob(info)
+}
+
+func (s *openshiftImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
+ if err := s.ensureImageIsResolved(ctx); err != nil {
+ return nil, err
+ }
+
+ image, err := s.client.getImage(ctx, s.imageStreamImageName)
+ if err != nil {
+ return nil, err
+ }
+ var sigs [][]byte
+ for _, sig := range image.Signatures {
+ if sig.Type == imageSignatureTypeAtomic {
+ sigs = append(sigs, sig.Content)
+ }
+ }
+ return sigs, nil
+}
+
+func (s *openshiftImageSource) UpdatedLayerInfos() []types.BlobInfo {
+ return nil
+}
+
+// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
+func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error {
+ if s.docker != nil {
+ return nil
+ }
+
+ // FIXME: validate components per validation.IsValidPathSegmentName?
+ path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream)
+ body, err := s.client.doRequest(ctx, "GET", path, nil)
+ if err != nil {
+ return err
+ }
+ // Note: This does absolutely no kind/version checking or conversions.
+ var is imageStream
+ if err := json.Unmarshal(body, &is); err != nil {
+ return err
+ }
+ var te *tagEvent
+ for _, tag := range is.Status.Tags {
+ if tag.Tag != s.client.ref.dockerReference.Tag() {
+ continue
+ }
+ if len(tag.Items) > 0 {
+ te = &tag.Items[0]
+ break
+ }
+ }
+ if te == nil {
+ return errors.Errorf("No matching tag found")
+ }
+ logrus.Debugf("tag event %#v", te)
+ dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference)
+ if err != nil {
+ return err
+ }
+ logrus.Debugf("Resolved reference %#v", dockerRefString)
+ dockerRef, err := docker.ParseReference("//" + dockerRefString)
+ if err != nil {
+ return err
+ }
+ d, err := dockerRef.NewImageSource(s.ctx)
+ if err != nil {
+ return err
+ }
+ s.docker = d
+ s.imageStreamImageName = te.Image
+ return nil
+}
+
+type openshiftImageDestination struct {
+ client *openshiftClient
+ docker types.ImageDestination // The Docker Registry endpoint
+ // State
+ imageStreamImageName string // "" if not yet known
+}
+
+// newImageDestination creates a new ImageDestination for the specified reference.
+func newImageDestination(ctx *types.SystemContext, ref openshiftReference) (types.ImageDestination, error) {
+ client, err := newOpenshiftClient(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match,
+ // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know
+ // the manifest digest at this point.
+ dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag())
+ dockerRef, err := docker.ParseReference(dockerRefString)
+ if err != nil {
+ return nil, err
+ }
+ docker, err := dockerRef.NewImageDestination(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ return &openshiftImageDestination{
+ client: client,
+ docker: docker,
+ }, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (d *openshiftImageDestination) Reference() types.ImageReference {
+ return d.client.ref
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any.
+func (d *openshiftImageDestination) Close() error {
+ return d.docker.Close()
+}
+
+func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string {
+ return d.docker.SupportedManifestMIMETypes()
+}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+func (d *openshiftImageDestination) SupportsSignatures() error {
+ return nil
+}
+
+// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
+func (d *openshiftImageDestination) ShouldCompressLayers() bool {
+ return true
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool {
+ return true
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+func (d *openshiftImageDestination) MustMatchRuntimeOS() bool {
+ return false
+}
+
+// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Size is the expected length of stream, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *openshiftImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
+ return d.docker.PutBlob(stream, inputInfo)
+}
+
+// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
+// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
+// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
+// it returns a non-nil error only on an unexpected failure.
+func (d *openshiftImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
+ return d.docker.HasBlob(info)
+}
+
+func (d *openshiftImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
+ return d.docker.ReapplyBlob(info)
+}
+
+// PutManifest writes manifest to the destination.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+func (d *openshiftImageDestination) PutManifest(m []byte) error {
+ manifestDigest, err := manifest.Digest(m)
+ if err != nil {
+ return err
+ }
+ d.imageStreamImageName = manifestDigest.String()
+
+ return d.docker.PutManifest(m)
+}
+
+func (d *openshiftImageDestination) PutSignatures(signatures [][]byte) error {
+ if d.imageStreamImageName == "" {
+ return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures")
+ }
+ // Because image signatures are a shared resource in Atomic Registry, the default upload
+ // always adds signatures. Eventually we should also allow removing signatures.
+
+ if len(signatures) == 0 {
+ return nil // No need to even read the old state.
+ }
+
+ image, err := d.client.getImage(context.TODO(), d.imageStreamImageName)
+ if err != nil {
+ return err
+ }
+ existingSigNames := map[string]struct{}{}
+ for _, sig := range image.Signatures {
+ existingSigNames[sig.objectMeta.Name] = struct{}{}
+ }
+
+sigExists:
+ for _, newSig := range signatures {
+ for _, existingSig := range image.Signatures {
+ if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
+ continue sigExists
+ }
+ }
+
+ // The API expect us to invent a new unique name. This is racy, but hopefully good enough.
+ var signatureName string
+ for {
+ randBytes := make([]byte, 16)
+ n, err := rand.Read(randBytes)
+ if err != nil || n != 16 {
+ return errors.Wrapf(err, "Error generating random signature len %d", n)
+ }
+ signatureName = fmt.Sprintf("%s@%032x", d.imageStreamImageName, randBytes)
+ if _, ok := existingSigNames[signatureName]; !ok {
+ break
+ }
+ }
+ // Note: This does absolutely no kind/version checking or conversions.
+ sig := imageSignature{
+ typeMeta: typeMeta{
+ Kind: "ImageSignature",
+ APIVersion: "v1",
+ },
+ objectMeta: objectMeta{Name: signatureName},
+ Type: imageSignatureTypeAtomic,
+ Content: newSig,
+ }
+ body, err := json.Marshal(sig)
+ _, err = d.client.doRequest(context.TODO(), "POST", "/oapi/v1/imagesignatures", body)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (d *openshiftImageDestination) Commit() error {
+ return d.docker.Commit()
+}
+
+// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies.
+type imageStream struct {
+ Status imageStreamStatus `json:"status,omitempty"`
+}
+type imageStreamStatus struct {
+ DockerImageRepository string `json:"dockerImageRepository"`
+ Tags []namedTagEventList `json:"tags,omitempty"`
+}
+type namedTagEventList struct {
+ Tag string `json:"tag"`
+ Items []tagEvent `json:"items"`
+}
+type tagEvent struct {
+ DockerImageReference string `json:"dockerImageReference"`
+ Image string `json:"image"`
+}
+type imageStreamImage struct {
+ Image image `json:"image"`
+}
+type image struct {
+ objectMeta `json:"metadata,omitempty"`
+ DockerImageReference string `json:"dockerImageReference,omitempty"`
+ // DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty"`
+ DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty"`
+ DockerImageManifest string `json:"dockerImageManifest,omitempty"`
+ // DockerImageLayers []ImageLayer `json:"dockerImageLayers"`
+ Signatures []imageSignature `json:"signatures,omitempty"`
+}
+
+const imageSignatureTypeAtomic string = "atomic"
+
+type imageSignature struct {
+ typeMeta `json:",inline"`
+ objectMeta `json:"metadata,omitempty"`
+ Type string `json:"type"`
+ Content []byte `json:"content"`
+ // Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+ // ImageIdentity string `json:"imageIdentity,omitempty"`
+ // SignedClaims map[string]string `json:"signedClaims,omitempty"`
+ // Created *unversioned.Time `json:"created,omitempty"`
+ // IssuedBy SignatureIssuer `json:"issuedBy,omitempty"`
+ // IssuedTo SignatureSubject `json:"issuedTo,omitempty"`
+}
+type typeMeta struct {
+ Kind string `json:"kind,omitempty"`
+ APIVersion string `json:"apiVersion,omitempty"`
+}
+type objectMeta struct {
+ Name string `json:"name,omitempty"`
+ GenerateName string `json:"generateName,omitempty"`
+ Namespace string `json:"namespace,omitempty"`
+ SelfLink string `json:"selfLink,omitempty"`
+ ResourceVersion string `json:"resourceVersion,omitempty"`
+ Generation int64 `json:"generation,omitempty"`
+ DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"`
+ Labels map[string]string `json:"labels,omitempty"`
+ Annotations map[string]string `json:"annotations,omitempty"`
+}
+
+// A subset of k8s.io/kubernetes/pkg/api/unversioned/Status
+type status struct {
+ Status string `json:"status,omitempty"`
+ Message string `json:"message,omitempty"`
+ // Reason StatusReason `json:"reason,omitempty"`
+ // Details *StatusDetails `json:"details,omitempty"`
+ Code int32 `json:"code,omitempty"`
+}
diff --git a/vendor/github.com/containers/image/openshift/openshift_transport.go b/vendor/github.com/containers/image/openshift/openshift_transport.go
new file mode 100644
index 000000000..7db35d96e
--- /dev/null
+++ b/vendor/github.com/containers/image/openshift/openshift_transport.go
@@ -0,0 +1,155 @@
+package openshift
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/containers/image/docker/policyconfiguration"
+ "github.com/containers/image/docker/reference"
+ genericImage "github.com/containers/image/image"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+// Transport is an ImageTransport for OpenShift registry-hosted images.
+var Transport = openshiftTransport{}
+
+type openshiftTransport struct{}
+
+func (t openshiftTransport) Name() string {
+ return "atomic"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (t openshiftTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return ParseReference(reference)
+}
+
+// Note that imageNameRegexp is namespace/stream:tag, this
+// is HOSTNAME/namespace/stream:tag or parent prefixes.
+// Keep this in sync with imageNameRegexp!
+var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$")
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error {
+ if scopeRegexp.FindStringIndex(scope) == nil {
+ return errors.Errorf("Invalid scope name %s", scope)
+ }
+ return nil
+}
+
+// openshiftReference is an ImageReference for OpenShift images.
+type openshiftReference struct {
+ dockerReference reference.NamedTagged
+ namespace string // Computed from dockerReference in advance.
+ stream string // Computed from dockerReference in advance.
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OpenShift ImageReference.
+func ParseReference(ref string) (types.ImageReference, error) {
+ r, err := reference.ParseNormalizedNamed(ref)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to parse image reference %q", ref)
+ }
+ tagged, ok := r.(reference.NamedTagged)
+ if !ok {
+ return nil, errors.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref)
+ }
+ return NewReference(tagged)
+}
+
+// NewReference returns an OpenShift reference for a reference.NamedTagged
+func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) {
+ r := strings.SplitN(reference.Path(dockerRef), "/", 3)
+ if len(r) != 2 {
+ return nil, errors.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'",
+ reference.FamiliarString(dockerRef))
+ }
+ return openshiftReference{
+ namespace: r[0],
+ stream: r[1],
+ dockerReference: dockerRef,
+ }, nil
+}
+
+func (ref openshiftReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
+func (ref openshiftReference) StringWithinTransport() string {
+ return reference.FamiliarString(ref.dockerReference)
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref openshiftReference) DockerReference() reference.Named {
+ return ref.dockerReference
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+// (i.e. various references with exactly the same semantics should return the same configuration identity)
+// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+// not required/guaranteed that it will be a valid input to Transport().ParseReference().
+// Returns "" if configuration identities for these references are not supported.
+func (ref openshiftReference) PolicyConfigurationIdentity() string {
+ res, err := policyconfiguration.DockerReferenceIdentity(ref.dockerReference)
+ if res == "" || err != nil { // Coverage: Should never happen, NewReference constructs a valid tagged reference.
+ panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err))
+ }
+ return res
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref openshiftReference) PolicyConfigurationNamespaces() []string {
+ return policyconfiguration.DockerReferenceNamespaces(ref.dockerReference)
+}
+
+// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned Image.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+func (ref openshiftReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
+ src, err := newImageSource(ctx, ref)
+ if err != nil {
+ return nil, err
+ }
+ return genericImage.FromSource(src)
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref openshiftReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(ctx, ref)
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref openshiftReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
+ return newImageDestination(ctx, ref)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref openshiftReference) DeleteImage(ctx *types.SystemContext) error {
+ return errors.Errorf("Deleting images not implemented for atomic: images")
+}
diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go
new file mode 100644
index 000000000..26137431d
--- /dev/null
+++ b/vendor/github.com/containers/image/ostree/ostree_dest.go
@@ -0,0 +1,332 @@
+// +build !containers_image_ostree_stub
+
+package ostree
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+
+ "github.com/ostreedev/ostree-go/pkg/otbuiltin"
+)
+
+type blobToImport struct {
+ Size int64
+ Digest digest.Digest
+ BlobPath string
+}
+
+type descriptor struct {
+ Size int64 `json:"size"`
+ Digest digest.Digest `json:"digest"`
+}
+
+type manifestSchema struct {
+ ConfigDescriptor descriptor `json:"config"`
+ LayersDescriptors []descriptor `json:"layers"`
+}
+
+type ostreeImageDestination struct {
+ ref ostreeReference
+ manifest string
+ schema manifestSchema
+ tmpDirPath string
+ blobs map[string]*blobToImport
+ digest digest.Digest
+}
+
+// newImageDestination returns an ImageDestination for writing to an existing ostree.
+func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDestination, error) {
+ tmpDirPath = filepath.Join(tmpDirPath, ref.branchName)
+ if err := ensureDirectoryExists(tmpDirPath); err != nil {
+ return nil, err
+ }
+ return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, ""}, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (d *ostreeImageDestination) Reference() types.ImageReference {
+ return d.ref
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any.
+func (d *ostreeImageDestination) Close() error {
+ return os.RemoveAll(d.tmpDirPath)
+}
+
+func (d *ostreeImageDestination) SupportedManifestMIMETypes() []string {
+ return []string{
+ manifest.DockerV2Schema2MediaType,
+ }
+}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+func (d *ostreeImageDestination) SupportsSignatures() error {
+ return nil
+}
+
+// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
+func (d *ostreeImageDestination) ShouldCompressLayers() bool {
+ return false
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool {
+ return false
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+func (d *ostreeImageDestination) MustMatchRuntimeOS() bool {
+ return true
+}
+
+func (d *ostreeImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
+ tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob")
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ blobPath := filepath.Join(tmpDir, "content")
+ blobFile, err := os.Create(blobPath)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ defer blobFile.Close()
+
+ digester := digest.Canonical.Digester()
+ tee := io.TeeReader(stream, digester.Hash())
+
+ size, err := io.Copy(blobFile, tee)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ computedDigest := digester.Digest()
+ if inputInfo.Size != -1 && size != inputInfo.Size {
+ return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
+ }
+ if err := blobFile.Sync(); err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ hash := computedDigest.Hex()
+ d.blobs[hash] = &blobToImport{Size: size, Digest: computedDigest, BlobPath: blobPath}
+ return types.BlobInfo{Digest: computedDigest, Size: size}, nil
+}
+
+func fixFiles(dir string, usermode bool) error {
+ entries, err := ioutil.ReadDir(dir)
+ if err != nil {
+ return err
+ }
+
+ for _, info := range entries {
+ fullpath := filepath.Join(dir, info.Name())
+ if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
+ if err := os.Remove(fullpath); err != nil {
+ return err
+ }
+ continue
+ }
+ if info.IsDir() {
+ if usermode {
+ if err := os.Chmod(fullpath, info.Mode()|0700); err != nil {
+ return err
+ }
+ }
+ err = fixFiles(fullpath, usermode)
+ if err != nil {
+ return err
+ }
+ } else if usermode && (info.Mode().IsRegular()) {
+ if err := os.Chmod(fullpath, info.Mode()|0600); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch string, root string, metadata []string) error {
+ opts := otbuiltin.NewCommitOptions()
+ opts.AddMetadataString = metadata
+ opts.Timestamp = time.Now()
+ // OCI layers have no parent OSTree commit
+ opts.Parent = "0000000000000000000000000000000000000000000000000000000000000000"
+ _, err := repo.Commit(root, branch, opts)
+ return err
+}
+
+func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToImport) error {
+ ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
+ destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root")
+ if err := ensureDirectoryExists(destinationPath); err != nil {
+ return err
+ }
+ defer func() {
+ os.Remove(blob.BlobPath)
+ os.RemoveAll(destinationPath)
+ }()
+
+ if os.Getuid() == 0 {
+ if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil {
+ return err
+ }
+ if err := fixFiles(destinationPath, false); err != nil {
+ return err
+ }
+ } else {
+ os.MkdirAll(destinationPath, 0755)
+ if err := exec.Command("tar", "-C", destinationPath, "--no-same-owner", "--no-same-permissions", "--delay-directory-restore", "-xf", blob.BlobPath).Run(); err != nil {
+ return err
+ }
+
+ if err := fixFiles(destinationPath, true); err != nil {
+ return err
+ }
+ }
+ return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
+}
+
+func (d *ostreeImageDestination) importConfig(blob *blobToImport) error {
+ ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
+
+ return exec.Command("ostree", "commit",
+ "--repo", d.ref.repo,
+ fmt.Sprintf("--add-metadata-string=docker.size=%d", blob.Size),
+ "--branch", ostreeBranch, filepath.Dir(blob.BlobPath)).Run()
+}
+
+func (d *ostreeImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
+ branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
+ output, err := exec.Command("ostree", "show", "--repo", d.ref.repo, "--print-metadata-key=docker.size", branch).CombinedOutput()
+ if err != nil {
+ if bytes.Index(output, []byte("not found")) >= 0 || bytes.Index(output, []byte("No such")) >= 0 {
+ return false, -1, nil
+ }
+ return false, -1, err
+ }
+ size, err := strconv.ParseInt(strings.Trim(string(output), "'\n"), 10, 64)
+ if err != nil {
+ return false, -1, err
+ }
+
+ return true, size, nil
+}
+
+func (d *ostreeImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
+ return info, nil
+}
+
+// PutManifest writes manifest to the destination.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+func (d *ostreeImageDestination) PutManifest(manifestBlob []byte) error {
+ d.manifest = string(manifestBlob)
+
+ if err := json.Unmarshal(manifestBlob, &d.schema); err != nil {
+ return err
+ }
+
+ manifestPath := filepath.Join(d.tmpDirPath, d.ref.manifestPath())
+ if err := ensureParentDirectoryExists(manifestPath); err != nil {
+ return err
+ }
+
+ digest, err := manifest.Digest(manifestBlob)
+ if err != nil {
+ return err
+ }
+ d.digest = digest
+
+ return ioutil.WriteFile(manifestPath, manifestBlob, 0644)
+}
+
+func (d *ostreeImageDestination) PutSignatures(signatures [][]byte) error {
+ path := filepath.Join(d.tmpDirPath, d.ref.signaturePath(0))
+ if err := ensureParentDirectoryExists(path); err != nil {
+ return err
+ }
+
+ for i, sig := range signatures {
+ signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i))
+ if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (d *ostreeImageDestination) Commit() error {
+ repo, err := otbuiltin.OpenRepo(d.ref.repo)
+ if err != nil {
+ return err
+ }
+
+ _, err = repo.PrepareTransaction()
+ if err != nil {
+ return err
+ }
+
+ for _, layer := range d.schema.LayersDescriptors {
+ hash := layer.Digest.Hex()
+ blob := d.blobs[hash]
+ // if the blob is not present in d.blobs then it is already stored in OSTree,
+ // and we don't need to import it.
+ if blob == nil {
+ continue
+ }
+ err := d.importBlob(repo, blob)
+ if err != nil {
+ return err
+ }
+ }
+
+ hash := d.schema.ConfigDescriptor.Digest.Hex()
+ blob := d.blobs[hash]
+ if blob != nil {
+ err := d.importConfig(blob)
+ if err != nil {
+ return err
+ }
+ }
+
+ manifestPath := filepath.Join(d.tmpDirPath, "manifest")
+
+ metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), fmt.Sprintf("docker.digest=%s", string(d.digest))}
+ err = d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata)
+
+ _, err = repo.CommitTransaction()
+ return err
+}
+
+func ensureDirectoryExists(path string) error {
+ if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func ensureParentDirectoryExists(path string) error {
+ return ensureDirectoryExists(filepath.Dir(path))
+}
diff --git a/vendor/github.com/containers/image/ostree/ostree_transport.go b/vendor/github.com/containers/image/ostree/ostree_transport.go
new file mode 100644
index 000000000..0de74a71d
--- /dev/null
+++ b/vendor/github.com/containers/image/ostree/ostree_transport.go
@@ -0,0 +1,226 @@
+// +build !containers_image_ostree_stub
+
+package ostree
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "github.com/pkg/errors"
+
+ "github.com/containers/image/directory/explicitfilepath"
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+)
+
+const defaultOSTreeRepo = "/ostree/repo"
+
+// Transport is an ImageTransport for ostree paths.
+var Transport = ostreeTransport{}
+
+type ostreeTransport struct{}
+
+func (t ostreeTransport) Name() string {
+ return "ostree"
+}
+
+func init() {
+ transports.Register(Transport)
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error {
+ sep := strings.Index(scope, ":")
+ if sep < 0 {
+ return errors.Errorf("Invalid ostree: scope %s: Must include a repo", scope)
+ }
+ repo := scope[:sep]
+
+ if !strings.HasPrefix(repo, "/") {
+ return errors.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope)
+ }
+ cleaned := filepath.Clean(repo)
+ if cleaned != repo {
+ return errors.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
+ }
+
+ // FIXME? In the namespaces within a repo,
+ // we could be verifying the various character set and length restrictions
+ // from docker/distribution/reference.regexp.go, but other than that there
+ // are few semantically invalid strings.
+ return nil
+}
+
+// ostreeReference is an ImageReference for ostree paths.
+type ostreeReference struct {
+ image string
+ branchName string
+ repo string
+}
+
+func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) {
+ var repo = ""
+ var image = ""
+ s := strings.SplitN(ref, "@/", 2)
+ if len(s) == 1 {
+ image, repo = s[0], defaultOSTreeRepo
+ } else {
+ image, repo = s[0], "/"+s[1]
+ }
+
+ return NewReference(image, repo)
+}
+
+// NewReference returns an OSTree reference for a specified repo and image.
+func NewReference(image string, repo string) (types.ImageReference, error) {
+ // image is not _really_ in a containers/image/docker/reference format;
+ // as far as the libOSTree ociimage/* namespace is concerned, it is more or
+ // less an arbitrary string with an implied tag.
+ // Parse the image using reference.ParseNormalizedNamed so that we can
+ // check whether the images has a tag specified and we can add ":latest" if needed
+ ostreeImage, err := reference.ParseNormalizedNamed(image)
+ if err != nil {
+ return nil, err
+ }
+
+ if reference.IsNameOnly(ostreeImage) {
+ image = image + ":latest"
+ }
+
+ resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo)
+ if err != nil {
+ // With os.IsNotExist(err), the parent directory of repo is also not existent;
+ // that should ordinarily not happen, but it would be a bit weird to reject
+ // references which do not specify a repo just because the implicit defaultOSTreeRepo
+ // does not exist.
+ if os.IsNotExist(err) && repo == defaultOSTreeRepo {
+ resolved = repo
+ } else {
+ return nil, err
+ }
+ }
+ // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
+ // from being ambiguous with values of PolicyConfigurationIdentity.
+ if strings.Contains(resolved, ":") {
+ return nil, errors.Errorf("Invalid OSTreeCI reference %s@%s: path %s contains a colon", image, repo, resolved)
+ }
+
+ return ostreeReference{
+ image: image,
+ branchName: encodeOStreeRef(image),
+ repo: resolved,
+ }, nil
+}
+
+func (ref ostreeReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
+func (ref ostreeReference) StringWithinTransport() string {
+ return fmt.Sprintf("%s@%s", ref.image, ref.repo)
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref ostreeReference) DockerReference() reference.Named {
+ return nil
+}
+
+func (ref ostreeReference) PolicyConfigurationIdentity() string {
+ return fmt.Sprintf("%s:%s", ref.repo, ref.image)
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref ostreeReference) PolicyConfigurationNamespaces() []string {
+ s := strings.SplitN(ref.image, ":", 2)
+ if len(s) != 2 { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag.
+ panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image))
+ }
+ name := s[0]
+ res := []string{}
+ for {
+ res = append(res, fmt.Sprintf("%s:%s", ref.repo, name))
+
+ lastSlash := strings.LastIndex(name, "/")
+ if lastSlash == -1 {
+ break
+ }
+ name = name[:lastSlash]
+ }
+ return res
+}
+
+// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned Image.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+func (ref ostreeReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
+ return nil, errors.New("Reading ostree: images is currently not supported")
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref ostreeReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
+ return nil, errors.New("Reading ostree: images is currently not supported")
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref ostreeReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
+ var tmpDir string
+ if ctx == nil || ctx.OSTreeTmpDirPath == "" {
+ tmpDir = os.TempDir()
+ } else {
+ tmpDir = ctx.OSTreeTmpDirPath
+ }
+ return newImageDestination(ref, tmpDir)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref ostreeReference) DeleteImage(ctx *types.SystemContext) error {
+ return errors.Errorf("Deleting images not implemented for ostree: images")
+}
+
+var ostreeRefRegexp = regexp.MustCompile(`^[A-Za-z0-9.-]$`)
+
+func encodeOStreeRef(in string) string {
+ var buffer bytes.Buffer
+ for i := range in {
+ sub := in[i : i+1]
+ if ostreeRefRegexp.MatchString(sub) {
+ buffer.WriteString(sub)
+ } else {
+ buffer.WriteString(fmt.Sprintf("_%02X", sub[0]))
+ }
+
+ }
+ return buffer.String()
+}
+
+// manifestPath returns a path for the manifest within a ostree using our conventions.
+func (ref ostreeReference) manifestPath() string {
+ return filepath.Join("manifest", "manifest.json")
+}
+
+// signaturePath returns a path for a signature within a ostree using our conventions.
+func (ref ostreeReference) signaturePath(index int) string {
+ return filepath.Join("manifest", fmt.Sprintf("signature-%d", index+1))
+}
diff --git a/vendor/github.com/containers/image/pkg/compression/compression.go b/vendor/github.com/containers/image/pkg/compression/compression.go
new file mode 100644
index 000000000..c19d962ee
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/compression/compression.go
@@ -0,0 +1,67 @@
+package compression
+
+import (
+ "bytes"
+ "compress/bzip2"
+ "compress/gzip"
+ "io"
+
+ "github.com/pkg/errors"
+
+ "github.com/sirupsen/logrus"
+)
+
+// DecompressorFunc returns the decompressed stream, given a compressed stream.
+type DecompressorFunc func(io.Reader) (io.Reader, error)
+
+// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm.
+func GzipDecompressor(r io.Reader) (io.Reader, error) {
+ return gzip.NewReader(r)
+}
+
+// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm.
+func Bzip2Decompressor(r io.Reader) (io.Reader, error) {
+ return bzip2.NewReader(r), nil
+}
+
+// XzDecompressor is a DecompressorFunc for the xz compression algorithm.
+func XzDecompressor(r io.Reader) (io.Reader, error) {
+ return nil, errors.New("Decompressing xz streams is not supported")
+}
+
+// compressionAlgos is an internal implementation detail of DetectCompression
+var compressionAlgos = map[string]struct {
+ prefix []byte
+ decompressor DecompressorFunc
+}{
+ "gzip": {[]byte{0x1F, 0x8B, 0x08}, GzipDecompressor}, // gzip (RFC 1952)
+ "bzip2": {[]byte{0x42, 0x5A, 0x68}, Bzip2Decompressor}, // bzip2 (decompress.c:BZ2_decompress)
+ "xz": {[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor}, // xz (/usr/share/doc/xz/xz-file-format.txt)
+}
+
+// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise.
+// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning.
+func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) {
+ buffer := [8]byte{}
+
+ n, err := io.ReadAtLeast(input, buffer[:], len(buffer))
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again.
+ // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later.
+ return nil, nil, err
+ }
+
+ var decompressor DecompressorFunc
+ for name, algo := range compressionAlgos {
+ if bytes.HasPrefix(buffer[:n], algo.prefix) {
+ logrus.Debugf("Detected compression format %s", name)
+ decompressor = algo.decompressor
+ break
+ }
+ }
+ if decompressor == nil {
+ logrus.Debugf("No compression detected")
+ }
+
+ return decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil
+}
diff --git a/vendor/github.com/containers/image/pkg/docker/config/config.go b/vendor/github.com/containers/image/pkg/docker/config/config.go
new file mode 100644
index 000000000..fd0ae7d84
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/docker/config/config.go
@@ -0,0 +1,295 @@
+package config
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/containers/image/types"
+ helperclient "github.com/docker/docker-credential-helpers/client"
+ "github.com/docker/docker-credential-helpers/credentials"
+ "github.com/docker/docker/pkg/homedir"
+ "github.com/pkg/errors"
+)
+
+type dockerAuthConfig struct {
+ Auth string `json:"auth,omitempty"`
+}
+
+type dockerConfigFile struct {
+ AuthConfigs map[string]dockerAuthConfig `json:"auths"`
+ CredHelpers map[string]string `json:"credHelpers,omitempty"`
+}
+
+const (
+ defaultPath = "/run/user"
+ authCfg = "containers"
+ authCfgFileName = "auth.json"
+ dockerCfg = ".docker"
+ dockerCfgFileName = "config.json"
+ dockerLegacyCfg = ".dockercfg"
+)
+
+var (
+ // ErrNotLoggedIn is returned for users not logged into a registry
+ // that they are trying to logout of
+ ErrNotLoggedIn = errors.New("not logged in")
+)
+
+// SetAuthentication stores the username and password in the auth.json file
+func SetAuthentication(ctx *types.SystemContext, registry, username, password string) error {
+ return modifyJSON(ctx, func(auths *dockerConfigFile) (bool, error) {
+ if ch, exists := auths.CredHelpers[registry]; exists {
+ return false, setAuthToCredHelper(ch, registry, username, password)
+ }
+
+ creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
+ newCreds := dockerAuthConfig{Auth: creds}
+ auths.AuthConfigs[registry] = newCreds
+ return true, nil
+ })
+}
+
+// GetAuthentication returns the registry credentials stored in
+// either auth.json file or .docker/config.json
+// If an entry is not found empty strings are returned for the username and password
+func GetAuthentication(ctx *types.SystemContext, registry string) (string, string, error) {
+ if ctx != nil && ctx.DockerAuthConfig != nil {
+ return ctx.DockerAuthConfig.Username, ctx.DockerAuthConfig.Password, nil
+ }
+
+ dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyCfg)
+ paths := [3]string{getPathToAuth(ctx), filepath.Join(homedir.Get(), dockerCfg, dockerCfgFileName), dockerLegacyPath}
+
+ for _, path := range paths {
+ legacyFormat := path == dockerLegacyPath
+ username, password, err := findAuthentication(registry, path, legacyFormat)
+ if err != nil {
+ return "", "", err
+ }
+ if username != "" && password != "" {
+ return username, password, nil
+ }
+ }
+ return "", "", nil
+}
+
+// GetUserLoggedIn returns the username logged in to registry from either
+// auth.json or XDG_RUNTIME_DIR
+// Used to tell the user if someone is logged in to the registry when logging in
+func GetUserLoggedIn(ctx *types.SystemContext, registry string) string {
+ path := getPathToAuth(ctx)
+ username, _, _ := findAuthentication(registry, path, false)
+ if username != "" {
+ return username
+ }
+ return ""
+}
+
+// RemoveAuthentication deletes the credentials stored in auth.json
+func RemoveAuthentication(ctx *types.SystemContext, registry string) error {
+ return modifyJSON(ctx, func(auths *dockerConfigFile) (bool, error) {
+ // First try cred helpers.
+ if ch, exists := auths.CredHelpers[registry]; exists {
+ return false, deleteAuthFromCredHelper(ch, registry)
+ }
+
+ if _, ok := auths.AuthConfigs[registry]; ok {
+ delete(auths.AuthConfigs, registry)
+ } else if _, ok := auths.AuthConfigs[normalizeRegistry(registry)]; ok {
+ delete(auths.AuthConfigs, normalizeRegistry(registry))
+ } else {
+ return false, ErrNotLoggedIn
+ }
+ return true, nil
+ })
+}
+
+// RemoveAllAuthentication deletes all the credentials stored in auth.json
+func RemoveAllAuthentication(ctx *types.SystemContext) error {
+ return modifyJSON(ctx, func(auths *dockerConfigFile) (bool, error) {
+ auths.CredHelpers = make(map[string]string)
+ auths.AuthConfigs = make(map[string]dockerAuthConfig)
+ return true, nil
+ })
+}
+
+// getPath gets the path of the auth.json file
+// The path can be overriden by the user if the overwrite-path flag is set
+// If the flag is not set and XDG_RUNTIME_DIR is ser, the auth.json file is saved in XDG_RUNTIME_DIR/containers
+// Otherwise, the auth.json file is stored in /run/user/UID/containers
+func getPathToAuth(ctx *types.SystemContext) string {
+ if ctx != nil {
+ if ctx.AuthFilePath != "" {
+ return ctx.AuthFilePath
+ }
+ if ctx.RootForImplicitAbsolutePaths != "" {
+ return filepath.Join(ctx.RootForImplicitAbsolutePaths, defaultPath, strconv.Itoa(os.Getuid()), authCfg, authCfgFileName)
+ }
+ }
+ runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
+ if runtimeDir == "" {
+ runtimeDir = filepath.Join(defaultPath, strconv.Itoa(os.Getuid()))
+ }
+ return filepath.Join(runtimeDir, authCfg, authCfgFileName)
+}
+
+// readJSONFile unmarshals the authentications stored in the auth.json file and returns it
+// or returns an empty dockerConfigFile data structure if auth.json does not exist
+// if the file exists and is empty, readJSONFile returns an error
+func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) {
+ var auths dockerConfigFile
+
+ raw, err := ioutil.ReadFile(path)
+ if os.IsNotExist(err) {
+ auths.AuthConfigs = map[string]dockerAuthConfig{}
+ return auths, nil
+ }
+
+ if legacyFormat {
+ if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil {
+ return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path)
+ }
+ return auths, nil
+ }
+
+ if err = json.Unmarshal(raw, &auths); err != nil {
+ return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path)
+ }
+
+ return auths, nil
+}
+
+// modifyJSON writes to auth.json if the dockerConfigFile has been updated
+func modifyJSON(ctx *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) error {
+ path := getPathToAuth(ctx)
+ dir := filepath.Dir(path)
+ if _, err := os.Stat(dir); os.IsNotExist(err) {
+ if err = os.Mkdir(dir, 0700); err != nil {
+ return errors.Wrapf(err, "error creating directory %q", dir)
+ }
+ }
+
+ auths, err := readJSONFile(path, false)
+ if err != nil {
+ return errors.Wrapf(err, "error reading JSON file %q", path)
+ }
+
+ updated, err := editor(&auths)
+ if err != nil {
+ return errors.Wrapf(err, "error updating %q", path)
+ }
+ if updated {
+ newData, err := json.MarshalIndent(auths, "", "\t")
+ if err != nil {
+ return errors.Wrapf(err, "error marshaling JSON %q", path)
+ }
+
+ if err = ioutil.WriteFile(path, newData, 0755); err != nil {
+ return errors.Wrapf(err, "error writing to file %q", path)
+ }
+ }
+
+ return nil
+}
+
+func getAuthFromCredHelper(credHelper, registry string) (string, string, error) {
+ helperName := fmt.Sprintf("docker-credential-%s", credHelper)
+ p := helperclient.NewShellProgramFunc(helperName)
+ creds, err := helperclient.Get(p, registry)
+ if err != nil {
+ return "", "", err
+ }
+ return creds.Username, creds.Secret, nil
+}
+
+func setAuthToCredHelper(credHelper, registry, username, password string) error {
+ helperName := fmt.Sprintf("docker-credential-%s", credHelper)
+ p := helperclient.NewShellProgramFunc(helperName)
+ creds := &credentials.Credentials{
+ ServerURL: registry,
+ Username: username,
+ Secret: password,
+ }
+ return helperclient.Store(p, creds)
+}
+
+func deleteAuthFromCredHelper(credHelper, registry string) error {
+ helperName := fmt.Sprintf("docker-credential-%s", credHelper)
+ p := helperclient.NewShellProgramFunc(helperName)
+ return helperclient.Erase(p, registry)
+}
+
+// findAuthentication looks for auth of registry in path
+func findAuthentication(registry, path string, legacyFormat bool) (string, string, error) {
+ auths, err := readJSONFile(path, legacyFormat)
+ if err != nil {
+ return "", "", errors.Wrapf(err, "error reading JSON file %q", path)
+ }
+
+ // First try cred helpers. They should always be normalized.
+ if ch, exists := auths.CredHelpers[registry]; exists {
+ return getAuthFromCredHelper(ch, registry)
+ }
+
+ // I'm feeling lucky
+ if val, exists := auths.AuthConfigs[registry]; exists {
+ return decodeDockerAuth(val.Auth)
+ }
+
+ // bad luck; let's normalize the entries first
+ registry = normalizeRegistry(registry)
+ normalizedAuths := map[string]dockerAuthConfig{}
+ for k, v := range auths.AuthConfigs {
+ normalizedAuths[normalizeRegistry(k)] = v
+ }
+ if val, exists := normalizedAuths[registry]; exists {
+ return decodeDockerAuth(val.Auth)
+ }
+ return "", "", nil
+}
+
+func decodeDockerAuth(s string) (string, string, error) {
+ decoded, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ return "", "", err
+ }
+ parts := strings.SplitN(string(decoded), ":", 2)
+ if len(parts) != 2 {
+ // if it's invalid just skip, as docker does
+ return "", "", nil
+ }
+ user := parts[0]
+ password := strings.Trim(parts[1], "\x00")
+ return user, password, nil
+}
+
+// convertToHostname converts a registry url which has http|https prepended
+// to just an hostname.
+// Copied from github.com/docker/docker/registry/auth.go
+func convertToHostname(url string) string {
+ stripped := url
+ if strings.HasPrefix(url, "http://") {
+ stripped = strings.TrimPrefix(url, "http://")
+ } else if strings.HasPrefix(url, "https://") {
+ stripped = strings.TrimPrefix(url, "https://")
+ }
+
+ nameParts := strings.SplitN(stripped, "/", 2)
+
+ return nameParts[0]
+}
+
+func normalizeRegistry(registry string) string {
+ normalized := convertToHostname(registry)
+ switch normalized {
+ case "registry-1.docker.io", "docker.io":
+ return "index.docker.io"
+ }
+ return normalized
+}
diff --git a/vendor/github.com/containers/image/pkg/strslice/README.md b/vendor/github.com/containers/image/pkg/strslice/README.md
new file mode 100644
index 000000000..ae6097e82
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/strslice/README.md
@@ -0,0 +1 @@
+This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice).
diff --git a/vendor/github.com/containers/image/pkg/strslice/strslice.go b/vendor/github.com/containers/image/pkg/strslice/strslice.go
new file mode 100644
index 000000000..bad493fb8
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/strslice/strslice.go
@@ -0,0 +1,30 @@
+package strslice
+
+import "encoding/json"
+
+// StrSlice represents a string or an array of strings.
+// We need to override the json decoder to accept both options.
+type StrSlice []string
+
+// UnmarshalJSON decodes the byte slice whether it's a string or an array of
+// strings. This method is needed to implement json.Unmarshaler.
+func (e *StrSlice) UnmarshalJSON(b []byte) error {
+ if len(b) == 0 {
+ // With no input, we preserve the existing value by returning nil and
+ // leaving the target alone. This allows defining default values for
+ // the type.
+ return nil
+ }
+
+ p := make([]string, 0, 1)
+ if err := json.Unmarshal(b, &p); err != nil {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ p = append(p, s)
+ }
+
+ *e = p
+ return nil
+}
diff --git a/vendor/github.com/containers/image/pkg/sysregistries/system_registries.go b/vendor/github.com/containers/image/pkg/sysregistries/system_registries.go
new file mode 100644
index 000000000..e5564a2a2
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/sysregistries/system_registries.go
@@ -0,0 +1,86 @@
+package sysregistries
+
+import (
+ "github.com/BurntSushi/toml"
+ "github.com/containers/image/types"
+ "io/ioutil"
+ "path/filepath"
+)
+
+// systemRegistriesConfPath is the path to the system-wide registry configuration file
+// and is used to add/subtract potential registries for obtaining images.
+// You can override this at build time with
+// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfPath=$your_path'
+var systemRegistriesConfPath = builtinRegistriesConfPath
+
+// builtinRegistriesConfPath is the path to registry configuration file
+// DO NOT change this, instead see systemRegistriesConfPath above.
+const builtinRegistriesConfPath = "/etc/containers/registries.conf"
+
+type registries struct {
+ Registries []string `toml:"registries"`
+}
+
+type tomlConfig struct {
+ Registries struct {
+ Search registries `toml:"search"`
+ Insecure registries `toml:"insecure"`
+ Block registries `toml:"block"`
+ } `toml:"registries"`
+}
+
+// Reads the global registry file from the filesystem. Returns
+// a byte array
+func readRegistryConf(ctx *types.SystemContext) ([]byte, error) {
+ dirPath := systemRegistriesConfPath
+ if ctx != nil {
+ if ctx.SystemRegistriesConfPath != "" {
+ dirPath = ctx.SystemRegistriesConfPath
+ } else if ctx.RootForImplicitAbsolutePaths != "" {
+ dirPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath)
+ }
+ }
+ configBytes, err := ioutil.ReadFile(dirPath)
+ return configBytes, err
+}
+
+// For mocking in unittests
+var readConf = readRegistryConf
+
+// Loads the registry configuration file from the filesystem and
+// then unmarshals it. Returns the unmarshalled object.
+func loadRegistryConf(ctx *types.SystemContext) (*tomlConfig, error) {
+ config := &tomlConfig{}
+
+ configBytes, err := readConf(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = toml.Unmarshal(configBytes, &config)
+ return config, err
+}
+
+// GetRegistries returns an array of strings that contain the names
+// of the registries as defined in the system-wide
+// registries file. it returns an empty array if none are
+// defined
+func GetRegistries(ctx *types.SystemContext) ([]string, error) {
+ config, err := loadRegistryConf(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return config.Registries.Search.Registries, nil
+}
+
+// GetInsecureRegistries returns an array of strings that contain the names
+// of the insecure registries as defined in the system-wide
+// registries file. it returns an empty array if none are
+// defined
+func GetInsecureRegistries(ctx *types.SystemContext) ([]string, error) {
+ config, err := loadRegistryConf(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return config.Registries.Insecure.Registries, nil
+}
diff --git a/vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go
new file mode 100644
index 000000000..0a32861ce
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go
@@ -0,0 +1,102 @@
+package tlsclientconfig
+
+import (
+ "crypto/tls"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/docker/go-connections/sockets"
+ "github.com/docker/go-connections/tlsconfig"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc
+func SetupCertificates(dir string, tlsc *tls.Config) error {
+ logrus.Debugf("Looking for TLS certificates and private keys in %s", dir)
+ fs, err := ioutil.ReadDir(dir)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ if os.IsPermission(err) {
+ logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err)
+ return nil
+ }
+ return err
+ }
+
+ for _, f := range fs {
+ fullPath := filepath.Join(dir, f.Name())
+ if strings.HasSuffix(f.Name(), ".crt") {
+ systemPool, err := tlsconfig.SystemCertPool()
+ if err != nil {
+ return errors.Wrap(err, "unable to get system cert pool")
+ }
+ tlsc.RootCAs = systemPool
+ logrus.Debugf(" crt: %s", fullPath)
+ data, err := ioutil.ReadFile(fullPath)
+ if err != nil {
+ return err
+ }
+ tlsc.RootCAs.AppendCertsFromPEM(data)
+ }
+ if strings.HasSuffix(f.Name(), ".cert") {
+ certName := f.Name()
+ keyName := certName[:len(certName)-5] + ".key"
+ logrus.Debugf(" cert: %s", fullPath)
+ if !hasFile(fs, keyName) {
+ return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
+ }
+ cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName))
+ if err != nil {
+ return err
+ }
+ tlsc.Certificates = append(tlsc.Certificates, cert)
+ }
+ if strings.HasSuffix(f.Name(), ".key") {
+ keyName := f.Name()
+ certName := keyName[:len(keyName)-4] + ".cert"
+ logrus.Debugf(" key: %s", fullPath)
+ if !hasFile(fs, certName) {
+ return errors.Errorf("missing client certificate %s for key %s", certName, keyName)
+ }
+ }
+ }
+ return nil
+}
+
+func hasFile(files []os.FileInfo, name string) bool {
+ for _, f := range files {
+ if f.Name() == name {
+ return true
+ }
+ }
+ return false
+}
+
+// NewTransport Creates a default transport
+func NewTransport() *http.Transport {
+ direct := &net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }
+ tr := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: direct.Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+ // TODO(dmcgowan): Call close idle connections when complete and use keep alive
+ DisableKeepAlives: true,
+ }
+ proxyDialer, err := sockets.DialerFromEnvironment(direct)
+ if err == nil {
+ tr.Dial = proxyDialer.Dial
+ }
+ return tr
+}
diff --git a/vendor/github.com/containers/image/signature/docker.go b/vendor/github.com/containers/image/signature/docker.go
new file mode 100644
index 000000000..16eb3f799
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/docker.go
@@ -0,0 +1,65 @@
+// Note: Consider the API unstable until the code supports at least three different image formats or transports.
+
+package signature
+
+import (
+ "fmt"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
+ "github.com/opencontainers/go-digest"
+)
+
+// SignDockerManifest returns a signature for manifest as the specified dockerReference,
+// using mech and keyIdentity.
+func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) {
+ manifestDigest, err := manifest.Digest(m)
+ if err != nil {
+ return nil, err
+ }
+ sig := newUntrustedSignature(manifestDigest, dockerReference)
+ return sig.sign(mech, keyIdentity)
+}
+
+// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference,
+// using mech.
+func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte,
+ expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) {
+ expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference)
+ if err != nil {
+ return nil, err
+ }
+ sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{
+ validateKeyIdentity: func(keyIdentity string) error {
+ if keyIdentity != expectedKeyIdentity {
+ return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)}
+ }
+ return nil
+ },
+ validateSignedDockerReference: func(signedDockerReference string) error {
+ signedRef, err := reference.ParseNormalizedNamed(signedDockerReference)
+ if err != nil {
+ return InvalidSignatureError{msg: fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)}
+ }
+ if signedRef.String() != expectedRef.String() {
+ return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s",
+ signedDockerReference, expectedDockerReference)}
+ }
+ return nil
+ },
+ validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error {
+ matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest)
+ if err != nil {
+ return err
+ }
+ if !matches {
+ return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)}
+ }
+ return nil
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ return sig, nil
+}
diff --git a/vendor/github.com/containers/image/signature/json.go b/vendor/github.com/containers/image/signature/json.go
new file mode 100644
index 000000000..9e592863d
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/json.go
@@ -0,0 +1,88 @@
+package signature
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+)
+
+// jsonFormatError is returned when JSON does not match expected format.
+type jsonFormatError string
+
+func (err jsonFormatError) Error() string {
+ return string(err)
+}
+
+// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
+// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to
+// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected.
+//
+// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy,
+// we could use reflection to automate this. Later?
+func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error {
+ seenKeys := map[string]struct{}{}
+
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return jsonFormatError(err.Error())
+ }
+ if t != json.Delim('{') {
+ return jsonFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t))
+ }
+ for {
+ t, err := dec.Token()
+ if err != nil {
+ return jsonFormatError(err.Error())
+ }
+ if t == json.Delim('}') {
+ break
+ }
+
+ key, ok := t.(string)
+ if !ok {
+ // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
+ return jsonFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t))
+ }
+ if _, ok := seenKeys[key]; ok {
+ return jsonFormatError(fmt.Sprintf("Duplicate key \"%s\"", key))
+ }
+ seenKeys[key] = struct{}{}
+
+ valuePtr := fieldResolver(key)
+ if valuePtr == nil {
+ return jsonFormatError(fmt.Sprintf("Unknown key \"%s\"", key))
+ }
+ // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value.
+ if err := dec.Decode(valuePtr); err != nil {
+ return jsonFormatError(err.Error())
+ }
+ }
+ if _, err := dec.Token(); err != io.EOF {
+ return jsonFormatError("Unexpected data after JSON object")
+ }
+ return nil
+}
+
+// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
+// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields
+// must be present exactly once, and none other fields are accepted.
+func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error {
+ seenKeys := map[string]struct{}{}
+ if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ if valuePtr, ok := exactFields[key]; ok {
+ seenKeys[key] = struct{}{}
+ return valuePtr
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ for key := range exactFields {
+ if _, ok := seenKeys[key]; !ok {
+ return jsonFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key))
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/image/signature/mechanism.go b/vendor/github.com/containers/image/signature/mechanism.go
new file mode 100644
index 000000000..bdf26c531
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/mechanism.go
@@ -0,0 +1,85 @@
+// Note: Consider the API unstable until the code supports at least three different image formats or transports.
+
+package signature
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "strings"
+
+ "golang.org/x/crypto/openpgp"
+)
+
+// SigningMechanism abstracts a way to sign binary blobs and verify their signatures.
+// Each mechanism should eventually be closed by calling Close().
+// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to
+// eliminate ambiguities, support CA signatures and perhaps other key properties)
+type SigningMechanism interface {
+ // Close removes resources associated with the mechanism, if any.
+ Close() error
+ // SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError.
+ SupportsSigning() error
+ // Sign creates a (non-detached) signature of input using keyIdentity.
+ // Fails with a SigningNotSupportedError if the mechanism does not support signing.
+ Sign(input []byte, keyIdentity string) ([]byte, error)
+ // Verify parses unverifiedSignature and returns the content and the signer's identity
+ Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error)
+ // UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
+ // along with a short identifier of the key used for signing.
+ // WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
+ // is NOT the same as a "key identity" used in other calls ot this interface, and
+ // the values may have no recognizable relationship if the public key is not available.
+ UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error)
+}
+
+// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that.
+type SigningNotSupportedError string
+
+func (err SigningNotSupportedError) Error() string {
+ return string(err)
+}
+
+// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism for the user’s default
+// GPG configuration ($GNUPGHOME / ~/.gnupg)
+// The caller must call .Close() on the returned SigningMechanism.
+func NewGPGSigningMechanism() (SigningMechanism, error) {
+ return newGPGSigningMechanismInDirectory("")
+}
+
+// NewEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which
+// recognizes _only_ public keys from the supplied blob, and returns the identities
+// of these keys.
+// The caller must call .Close() on the returned SigningMechanism.
+func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
+ return newEphemeralGPGSigningMechanism(blob)
+}
+
+// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
+// along with a short identifier of the key used for signing.
+// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
+// is NOT the same as a "key identity" used in other calls ot this interface, and
+// the values may have no recognizable relationship if the public key is not available.
+func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
+ // This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography.
+ md, err := openpgp.ReadMessage(bytes.NewReader(untrustedSignature), openpgp.EntityList{}, nil, nil)
+ if err != nil {
+ return nil, "", err
+ }
+ if !md.IsSigned {
+ return nil, "", errors.New("The input is not a signature")
+ }
+ content, err := ioutil.ReadAll(md.UnverifiedBody)
+ if err != nil {
+ // Coverage: An error during reading the body can happen only if
+ // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key
+ // to decrypt the contents anyway), or
+ // 2) the message is signed AND we give ReadMessage a correspnding public key, which we don’t.
+ return nil, "", err
+ }
+
+ // Uppercase the key ID for minimal consistency with the gpgme-returned fingerprints
+ // (but note that key ID is a suffix of the fingerprint only for V4 keys, not V3)!
+ return content, strings.ToUpper(fmt.Sprintf("%016X", md.SignedByKeyId)), nil
+}
diff --git a/vendor/github.com/containers/image/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/signature/mechanism_gpgme.go
new file mode 100644
index 000000000..4825ab27c
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/mechanism_gpgme.go
@@ -0,0 +1,175 @@
+// +build !containers_image_openpgp
+
+package signature
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/mtrmac/gpgme"
+)
+
+// A GPG/OpenPGP signing mechanism, implemented using gpgme.
+type gpgmeSigningMechanism struct {
+ ctx *gpgme.Context
+ ephemeralDir string // If not "", a directory to be removed on Close()
+}
+
+// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
+// The caller must call .Close() on the returned SigningMechanism.
+func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
+ ctx, err := newGPGMEContext(optionalDir)
+ if err != nil {
+ return nil, err
+ }
+ return &gpgmeSigningMechanism{
+ ctx: ctx,
+ ephemeralDir: "",
+ }, nil
+}
+
+// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which
+// recognizes _only_ public keys from the supplied blob, and returns the identities
+// of these keys.
+// The caller must call .Close() on the returned SigningMechanism.
+func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
+ dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-")
+ if err != nil {
+ return nil, nil, err
+ }
+ removeDir := true
+ defer func() {
+ if removeDir {
+ os.RemoveAll(dir)
+ }
+ }()
+ ctx, err := newGPGMEContext(dir)
+ if err != nil {
+ return nil, nil, err
+ }
+ mech := &gpgmeSigningMechanism{
+ ctx: ctx,
+ ephemeralDir: dir,
+ }
+ keyIdentities, err := mech.importKeysFromBytes(blob)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ removeDir = false
+ return mech, keyIdentities, nil
+}
+
+// newGPGMEContext returns a new *gpgme.Context, using optionalDir if not empty.
+func newGPGMEContext(optionalDir string) (*gpgme.Context, error) {
+ ctx, err := gpgme.New()
+ if err != nil {
+ return nil, err
+ }
+ if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil {
+ return nil, err
+ }
+ if optionalDir != "" {
+ err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir)
+ if err != nil {
+ return nil, err
+ }
+ }
+ ctx.SetArmor(false)
+ ctx.SetTextMode(false)
+ return ctx, nil
+}
+
+func (m *gpgmeSigningMechanism) Close() error {
+ if m.ephemeralDir != "" {
+ os.RemoveAll(m.ephemeralDir) // Ignore an error, if any
+ }
+ return nil
+}
+
+// importKeysFromBytes imports public keys from the supplied blob and returns their identities.
+// The blob is assumed to have an appropriate format (the caller is expected to know which one).
+// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism);
+// but we do not make this public, it can only be used through newEphemeralGPGSigningMechanism.
+func (m *gpgmeSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) {
+ inputData, err := gpgme.NewDataBytes(blob)
+ if err != nil {
+ return nil, err
+ }
+ res, err := m.ctx.Import(inputData)
+ if err != nil {
+ return nil, err
+ }
+ keyIdentities := []string{}
+ for _, i := range res.Imports {
+ if i.Result == nil {
+ keyIdentities = append(keyIdentities, i.Fingerprint)
+ }
+ }
+ return keyIdentities, nil
+}
+
+// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError.
+func (m *gpgmeSigningMechanism) SupportsSigning() error {
+ return nil
+}
+
+// Sign creates a (non-detached) signature of input using keyIdentity.
+// Fails with a SigningNotSupportedError if the mechanism does not support signing.
+func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
+ key, err := m.ctx.GetKey(keyIdentity, true)
+ if err != nil {
+ return nil, err
+ }
+ inputData, err := gpgme.NewDataBytes(input)
+ if err != nil {
+ return nil, err
+ }
+ var sigBuffer bytes.Buffer
+ sigData, err := gpgme.NewDataWriter(&sigBuffer)
+ if err != nil {
+ return nil, err
+ }
+ if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil {
+ return nil, err
+ }
+ return sigBuffer.Bytes(), nil
+}
+
+// Verify parses unverifiedSignature and returns the content and the signer's identity
+func (m gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
+ signedBuffer := bytes.Buffer{}
+ signedData, err := gpgme.NewDataWriter(&signedBuffer)
+ if err != nil {
+ return nil, "", err
+ }
+ unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature)
+ if err != nil {
+ return nil, "", err
+ }
+ _, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData)
+ if err != nil {
+ return nil, "", err
+ }
+ if len(sigs) != 1 {
+ return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))}
+ }
+ sig := sigs[0]
+ // This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves
+ if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage {
+ // FIXME: Better error reporting eventually
+ return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)}
+ }
+ return signedBuffer.Bytes(), sig.Fingerprint, nil
+}
+
+// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
+// along with a short identifier of the key used for signing.
+// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
+// is NOT the same as a "key identity" used in other calls ot this interface, and
+// the values may have no recognizable relationship if the public key is not available.
+func (m gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
+ return gpgUntrustedSignatureContents(untrustedSignature)
+}
diff --git a/vendor/github.com/containers/image/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/signature/mechanism_openpgp.go
new file mode 100644
index 000000000..eccd610c9
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/mechanism_openpgp.go
@@ -0,0 +1,159 @@
+// +build containers_image_openpgp
+
+package signature
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/containers/storage/pkg/homedir"
+ "golang.org/x/crypto/openpgp"
+)
+
+// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp.
+type openpgpSigningMechanism struct {
+ keyring openpgp.EntityList
+}
+
+// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
+// The caller must call .Close() on the returned SigningMechanism.
+func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
+ m := &openpgpSigningMechanism{
+ keyring: openpgp.EntityList{},
+ }
+
+ gpgHome := optionalDir
+ if gpgHome == "" {
+ gpgHome = os.Getenv("GNUPGHOME")
+ if gpgHome == "" {
+ gpgHome = path.Join(homedir.Get(), ".gnupg")
+ }
+ }
+
+ pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg"))
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return nil, err
+ }
+ } else {
+ _, err := m.importKeysFromBytes(pubring)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return m, nil
+}
+
+// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which
+// recognizes _only_ public keys from the supplied blob, and returns the identities
+// of these keys.
+// The caller must call .Close() on the returned SigningMechanism.
+func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
+ m := &openpgpSigningMechanism{
+ keyring: openpgp.EntityList{},
+ }
+ keyIdentities, err := m.importKeysFromBytes(blob)
+ if err != nil {
+ return nil, nil, err
+ }
+ return m, keyIdentities, nil
+}
+
+func (m *openpgpSigningMechanism) Close() error {
+ return nil
+}
+
+// importKeysFromBytes imports public keys from the supplied blob and returns their identities.
+// The blob is assumed to have an appropriate format (the caller is expected to know which one).
+func (m *openpgpSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) {
+ keyring, err := openpgp.ReadKeyRing(bytes.NewReader(blob))
+ if err != nil {
+ k, e2 := openpgp.ReadArmoredKeyRing(bytes.NewReader(blob))
+ if e2 != nil {
+ return nil, err // The original error -- FIXME: is this better?
+ }
+ keyring = k
+ }
+
+ keyIdentities := []string{}
+ for _, entity := range keyring {
+ if entity.PrimaryKey == nil {
+ // Coverage: This should never happen, openpgp.ReadEntity fails with a
+ // openpgp.errors.StructuralError instead of returning an entity with this
+ // field set to nil.
+ continue
+ }
+ // Uppercase the fingerprint to be compatible with gpgme
+ keyIdentities = append(keyIdentities, strings.ToUpper(fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint)))
+ m.keyring = append(m.keyring, entity)
+ }
+ return keyIdentities, nil
+}
+
+// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError.
+func (m *openpgpSigningMechanism) SupportsSigning() error {
+ return SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag")
+}
+
+// Sign creates a (non-detached) signature of input using keyIdentity.
+// Fails with a SigningNotSupportedError if the mechanism does not support signing.
+func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
+ return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag")
+}
+
+// Verify parses unverifiedSignature and returns the content and the signer's identity
+func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
+ md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil)
+ if err != nil {
+ return nil, "", err
+ }
+ if !md.IsSigned {
+ return nil, "", errors.New("not signed")
+ }
+ content, err := ioutil.ReadAll(md.UnverifiedBody)
+ if err != nil {
+ // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted
+ // (and possibly also signed, but it _must_ be encrypted) and the signing
+ // “modification detection code” detects a mismatch. But in that case,
+ // we would expect the signature verification to fail as well, and that is checked
+ // first. Besides, we are not supplying any decryption keys, so we really
+ // can never reach this “encrypted data MDC mismatch” path.
+ return nil, "", err
+ }
+ if md.SignatureError != nil {
+ return nil, "", fmt.Errorf("signature error: %v", md.SignatureError)
+ }
+ if md.SignedBy == nil {
+ return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)}
+ }
+ if md.Signature != nil {
+ if md.Signature.SigLifetimeSecs != nil {
+ expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second)
+ if time.Now().After(expiry) {
+ return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)}
+ }
+ }
+ } else if md.SignatureV3 == nil {
+ // Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3,
+ // or sets md.SignatureError.
+ return nil, "", InvalidSignatureError{msg: "Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set"}
+ }
+
+ // Uppercase the fingerprint to be compatible with gpgme
+ return content, strings.ToUpper(fmt.Sprintf("%x", md.SignedBy.PublicKey.Fingerprint)), nil
+}
+
+// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
+// along with a short identifier of the key used for signing.
+// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
+// is NOT the same as a "key identity" used in other calls ot this interface, and
+// the values may have no recognizable relationship if the public key is not available.
+func (m openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
+ return gpgUntrustedSignatureContents(untrustedSignature)
+}
diff --git a/vendor/github.com/containers/image/signature/policy_config.go b/vendor/github.com/containers/image/signature/policy_config.go
new file mode 100644
index 000000000..bc6c5e9a7
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/policy_config.go
@@ -0,0 +1,684 @@
+// policy_config.go hanles creation of policy objects, either by parsing JSON
+// or by programs building them programmatically.
+
+// The New* constructors are intended to be a stable API. FIXME: after an independent review.
+
+// Do not invoke the internals of the JSON marshaling/unmarshaling directly.
+
+// We can't just blindly call json.Unmarshal because that would silently ignore
+// typos, and that would just not do for security policy.
+
+// FIXME? This is by no means an user-friendly parser: No location information in error messages, no other context.
+// But at least it is not worse than blind json.Unmarshal()…
+
+package signature
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+)
+
+// systemDefaultPolicyPath is the policy path used for DefaultPolicy().
+// You can override this at build time with
+// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path'
+var systemDefaultPolicyPath = builtinDefaultPolicyPath
+
+// builtinDefaultPolicyPath is the policy pat used for DefaultPolicy().
+// DO NOT change this, instead see systemDefaultPolicyPath above.
+const builtinDefaultPolicyPath = "/etc/containers/policy.json"
+
+// InvalidPolicyFormatError is returned when parsing an invalid policy configuration.
+type InvalidPolicyFormatError string
+
+func (err InvalidPolicyFormatError) Error() string {
+ return string(err)
+}
+
+// DefaultPolicy returns the default policy of the system.
+// Most applications should be using this method to get the policy configured
+// by the system administrator.
+// ctx should usually be nil, can be set to override the default.
+// NOTE: When this function returns an error, report it to the user and abort.
+// DO NOT hard-code fallback policies in your application.
+func DefaultPolicy(ctx *types.SystemContext) (*Policy, error) {
+ return NewPolicyFromFile(defaultPolicyPath(ctx))
+}
+
+// defaultPolicyPath returns a path to the default policy of the system.
+func defaultPolicyPath(ctx *types.SystemContext) string {
+ if ctx != nil {
+ if ctx.SignaturePolicyPath != "" {
+ return ctx.SignaturePolicyPath
+ }
+ if ctx.RootForImplicitAbsolutePaths != "" {
+ return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemDefaultPolicyPath)
+ }
+ }
+ return systemDefaultPolicyPath
+}
+
+// NewPolicyFromFile returns a policy configured in the specified file.
+func NewPolicyFromFile(fileName string) (*Policy, error) {
+ contents, err := ioutil.ReadFile(fileName)
+ if err != nil {
+ return nil, err
+ }
+ return NewPolicyFromBytes(contents)
+}
+
+// NewPolicyFromBytes returns a policy parsed from the specified blob.
+// Use this function instead of calling json.Unmarshal directly.
+func NewPolicyFromBytes(data []byte) (*Policy, error) {
+ p := Policy{}
+ if err := json.Unmarshal(data, &p); err != nil {
+ return nil, InvalidPolicyFormatError(err.Error())
+ }
+ return &p, nil
+}
+
+// Compile-time check that Policy implements json.Unmarshaler.
+var _ json.Unmarshaler = (*Policy)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (p *Policy) UnmarshalJSON(data []byte) error {
+ *p = Policy{}
+ transports := policyTransportsMap{}
+ if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ switch key {
+ case "default":
+ return &p.Default
+ case "transports":
+ return &transports
+ default:
+ return nil
+ }
+ }); err != nil {
+ return err
+ }
+
+ if p.Default == nil {
+ return InvalidPolicyFormatError("Default policy is missing")
+ }
+ p.Transports = map[string]PolicyTransportScopes(transports)
+ return nil
+}
+
+// policyTransportsMap is a specialization of this map type for the strict JSON parsing semantics appropriate for the Policy.Transports member.
+type policyTransportsMap map[string]PolicyTransportScopes
+
+// Compile-time check that policyTransportsMap implements json.Unmarshaler.
+var _ json.Unmarshaler = (*policyTransportsMap)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (m *policyTransportsMap) UnmarshalJSON(data []byte) error {
+ // We can't unmarshal directly into map values because it is not possible to take an address of a map value.
+ // So, use a temporary map of pointers-to-slices and convert.
+ tmpMap := map[string]*PolicyTransportScopes{}
+ if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ // transport can be nil
+ transport := transports.Get(key)
+ // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
+ if _, ok := tmpMap[key]; ok {
+ return nil
+ }
+ ptsWithTransport := policyTransportScopesWithTransport{
+ transport: transport,
+ dest: &PolicyTransportScopes{}, // This allocates a new instance on each call.
+ }
+ tmpMap[key] = ptsWithTransport.dest
+ return &ptsWithTransport
+ }); err != nil {
+ return err
+ }
+ for key, ptr := range tmpMap {
+ (*m)[key] = *ptr
+ }
+ return nil
+}
+
+// Compile-time check that PolicyTransportScopes "implements"" json.Unmarshaler.
+// we want to only use policyTransportScopesWithTransport
+var _ json.Unmarshaler = (*PolicyTransportScopes)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error {
+ return errors.New("Do not try to unmarshal PolicyTransportScopes directly")
+}
+
+// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes
+// while validating using a specific ImageTransport if not nil.
+type policyTransportScopesWithTransport struct {
+ transport types.ImageTransport
+ dest *PolicyTransportScopes
+}
+
+// Compile-time check that policyTransportScopesWithTransport implements json.Unmarshaler.
+var _ json.Unmarshaler = (*policyTransportScopesWithTransport)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error {
+ // We can't unmarshal directly into map values because it is not possible to take an address of a map value.
+ // So, use a temporary map of pointers-to-slices and convert.
+ tmpMap := map[string]*PolicyRequirements{}
+ if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
+ if _, ok := tmpMap[key]; ok {
+ return nil
+ }
+ if key != "" && m.transport != nil {
+ if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil {
+ return nil
+ }
+ }
+ ptr := &PolicyRequirements{} // This allocates a new instance on each call.
+ tmpMap[key] = ptr
+ return ptr
+ }); err != nil {
+ return err
+ }
+ for key, ptr := range tmpMap {
+ (*m.dest)[key] = *ptr
+ }
+ return nil
+}
+
+// Compile-time check that PolicyRequirements implements json.Unmarshaler.
+var _ json.Unmarshaler = (*PolicyRequirements)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (m *PolicyRequirements) UnmarshalJSON(data []byte) error {
+ reqJSONs := []json.RawMessage{}
+ if err := json.Unmarshal(data, &reqJSONs); err != nil {
+ return err
+ }
+ if len(reqJSONs) == 0 {
+ return InvalidPolicyFormatError("List of verification policy requirements must not be empty")
+ }
+ res := make([]PolicyRequirement, len(reqJSONs))
+ for i, reqJSON := range reqJSONs {
+ req, err := newPolicyRequirementFromJSON(reqJSON)
+ if err != nil {
+ return err
+ }
+ res[i] = req
+ }
+ *m = res
+ return nil
+}
+
+// newPolicyRequirementFromJSON parses JSON data into a PolicyRequirement implementation.
+func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) {
+ var typeField prCommon
+ if err := json.Unmarshal(data, &typeField); err != nil {
+ return nil, err
+ }
+ var res PolicyRequirement
+ switch typeField.Type {
+ case prTypeInsecureAcceptAnything:
+ res = &prInsecureAcceptAnything{}
+ case prTypeReject:
+ res = &prReject{}
+ case prTypeSignedBy:
+ res = &prSignedBy{}
+ case prTypeSignedBaseLayer:
+ res = &prSignedBaseLayer{}
+ default:
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type))
+ }
+ if err := json.Unmarshal(data, &res); err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+// newPRInsecureAcceptAnything is NewPRInsecureAcceptAnything, except it returns the private type.
+func newPRInsecureAcceptAnything() *prInsecureAcceptAnything {
+ return &prInsecureAcceptAnything{prCommon{Type: prTypeInsecureAcceptAnything}}
+}
+
+// NewPRInsecureAcceptAnything returns a new "insecureAcceptAnything" PolicyRequirement.
+func NewPRInsecureAcceptAnything() PolicyRequirement {
+ return newPRInsecureAcceptAnything()
+}
+
+// Compile-time check that prInsecureAcceptAnything implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error {
+ *pr = prInsecureAcceptAnything{}
+ var tmp prInsecureAcceptAnything
+ if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "type": &tmp.Type,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prTypeInsecureAcceptAnything {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ *pr = *newPRInsecureAcceptAnything()
+ return nil
+}
+
+// newPRReject is NewPRReject, except it returns the private type.
+func newPRReject() *prReject {
+ return &prReject{prCommon{Type: prTypeReject}}
+}
+
+// NewPRReject returns a new "reject" PolicyRequirement.
+func NewPRReject() PolicyRequirement {
+ return newPRReject()
+}
+
+// Compile-time check that prReject implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prReject)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (pr *prReject) UnmarshalJSON(data []byte) error {
+ *pr = prReject{}
+ var tmp prReject
+ if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "type": &tmp.Type,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prTypeReject {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ *pr = *newPRReject()
+ return nil
+}
+
+// newPRSignedBy returns a new prSignedBy if parameters are valid.
+func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
+ if !keyType.IsValid() {
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType))
+ }
+ if len(keyPath) > 0 && len(keyData) > 0 {
+ return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously")
+ }
+ if signedIdentity == nil {
+ return nil, InvalidPolicyFormatError("signedIdentity not specified")
+ }
+ return &prSignedBy{
+ prCommon: prCommon{Type: prTypeSignedBy},
+ KeyType: keyType,
+ KeyPath: keyPath,
+ KeyData: keyData,
+ SignedIdentity: signedIdentity,
+ }, nil
+}
+
+// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type.
+func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
+ return newPRSignedBy(keyType, keyPath, nil, signedIdentity)
+}
+
+// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath
+func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return newPRSignedByKeyPath(keyType, keyPath, signedIdentity)
+}
+
+// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type.
+func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
+ return newPRSignedBy(keyType, "", keyData, signedIdentity)
+}
+
+// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData
+func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return newPRSignedByKeyData(keyType, keyData, signedIdentity)
+}
+
+// Compile-time check that prSignedBy implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prSignedBy)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
+ *pr = prSignedBy{}
+ var tmp prSignedBy
+ var gotKeyPath, gotKeyData = false, false
+ var signedIdentity json.RawMessage
+ if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ switch key {
+ case "type":
+ return &tmp.Type
+ case "keyType":
+ return &tmp.KeyType
+ case "keyPath":
+ gotKeyPath = true
+ return &tmp.KeyPath
+ case "keyData":
+ gotKeyData = true
+ return &tmp.KeyData
+ case "signedIdentity":
+ return &signedIdentity
+ default:
+ return nil
+ }
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prTypeSignedBy {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ if signedIdentity == nil {
+ tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
+ } else {
+ si, err := newPolicyReferenceMatchFromJSON(signedIdentity)
+ if err != nil {
+ return err
+ }
+ tmp.SignedIdentity = si
+ }
+
+ var res *prSignedBy
+ var err error
+ switch {
+ case gotKeyPath && gotKeyData:
+ return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously")
+ case gotKeyPath && !gotKeyData:
+ res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity)
+ case !gotKeyPath && gotKeyData:
+ res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity)
+ case !gotKeyPath && !gotKeyData:
+ return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified")
+ default: // Coverage: This should never happen
+ return errors.Errorf("Impossible keyPath/keyData presence combination!?")
+ }
+ if err != nil {
+ return err
+ }
+ *pr = *res
+
+ return nil
+}
+
+// IsValid returns true iff kt is a recognized value
+func (kt sbKeyType) IsValid() bool {
+ switch kt {
+ case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys,
+ SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
+ return true
+ default:
+ return false
+ }
+}
+
+// Compile-time check that sbKeyType implements json.Unmarshaler.
+var _ json.Unmarshaler = (*sbKeyType)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (kt *sbKeyType) UnmarshalJSON(data []byte) error {
+ *kt = sbKeyType("")
+ var s string
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+ if !sbKeyType(s).IsValid() {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s))
+ }
+ *kt = sbKeyType(s)
+ return nil
+}
+
+// newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type.
+func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) {
+ if baseLayerIdentity == nil {
+ return nil, InvalidPolicyFormatError("baseLayerIdentity not specified")
+ }
+ return &prSignedBaseLayer{
+ prCommon: prCommon{Type: prTypeSignedBaseLayer},
+ BaseLayerIdentity: baseLayerIdentity,
+ }, nil
+}
+
+// NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement.
+func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return newPRSignedBaseLayer(baseLayerIdentity)
+}
+
+// Compile-time check that prSignedBaseLayer implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prSignedBaseLayer)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
+ *pr = prSignedBaseLayer{}
+ var tmp prSignedBaseLayer
+ var baseLayerIdentity json.RawMessage
+ if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "type": &tmp.Type,
+ "baseLayerIdentity": &baseLayerIdentity,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prTypeSignedBaseLayer {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity)
+ if err != nil {
+ return err
+ }
+ res, err := newPRSignedBaseLayer(bli)
+ if err != nil {
+ // Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid.
+ return err
+ }
+ *pr = *res
+ return nil
+}
+
+// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation.
+func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) {
+ var typeField prmCommon
+ if err := json.Unmarshal(data, &typeField); err != nil {
+ return nil, err
+ }
+ var res PolicyReferenceMatch
+ switch typeField.Type {
+ case prmTypeMatchExact:
+ res = &prmMatchExact{}
+ case prmTypeMatchRepoDigestOrExact:
+ res = &prmMatchRepoDigestOrExact{}
+ case prmTypeMatchRepository:
+ res = &prmMatchRepository{}
+ case prmTypeExactReference:
+ res = &prmExactReference{}
+ case prmTypeExactRepository:
+ res = &prmExactRepository{}
+ default:
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type))
+ }
+ if err := json.Unmarshal(data, &res); err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+// newPRMMatchExact is NewPRMMatchExact, except it resturns the private type.
+func newPRMMatchExact() *prmMatchExact {
+ return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}}
+}
+
+// NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch.
+func NewPRMMatchExact() PolicyReferenceMatch {
+ return newPRMMatchExact()
+}
+
+// Compile-time check that prmMatchExact implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prmMatchExact)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
+ *prm = prmMatchExact{}
+ var tmp prmMatchExact
+ if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "type": &tmp.Type,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prmTypeMatchExact {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ *prm = *newPRMMatchExact()
+ return nil
+}
+
+// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it resturns the private type.
+func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact {
+ return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}}
+}
+
+// NewPRMMatchRepoDigestOrExact returns a new "matchRepoDigestOrExact" PolicyReferenceMatch.
+func NewPRMMatchRepoDigestOrExact() PolicyReferenceMatch {
+ return newPRMMatchRepoDigestOrExact()
+}
+
+// Compile-time check that prmMatchRepoDigestOrExact implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
+ *prm = prmMatchRepoDigestOrExact{}
+ var tmp prmMatchRepoDigestOrExact
+ if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "type": &tmp.Type,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prmTypeMatchRepoDigestOrExact {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ *prm = *newPRMMatchRepoDigestOrExact()
+ return nil
+}
+
+// newPRMMatchRepository is NewPRMMatchRepository, except it resturns the private type.
+func newPRMMatchRepository() *prmMatchRepository {
+ return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}}
+}
+
+// NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch.
+func NewPRMMatchRepository() PolicyReferenceMatch {
+ return newPRMMatchRepository()
+}
+
+// Compile-time check that prmMatchRepository implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prmMatchRepository)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
+ *prm = prmMatchRepository{}
+ var tmp prmMatchRepository
+ if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "type": &tmp.Type,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prmTypeMatchRepository {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ *prm = *newPRMMatchRepository()
+ return nil
+}
+
+// newPRMExactReference is NewPRMExactReference, except it resturns the private type.
+func newPRMExactReference(dockerReference string) (*prmExactReference, error) {
+ ref, err := reference.ParseNormalizedNamed(dockerReference)
+ if err != nil {
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error()))
+ }
+ if reference.IsNameOnly(ref) {
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference))
+ }
+ return &prmExactReference{
+ prmCommon: prmCommon{Type: prmTypeExactReference},
+ DockerReference: dockerReference,
+ }, nil
+}
+
+// NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch.
+func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) {
+ return newPRMExactReference(dockerReference)
+}
+
+// Compile-time check that prmExactReference implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prmExactReference)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
+ *prm = prmExactReference{}
+ var tmp prmExactReference
+ if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "type": &tmp.Type,
+ "dockerReference": &tmp.DockerReference,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prmTypeExactReference {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+
+ res, err := newPRMExactReference(tmp.DockerReference)
+ if err != nil {
+ return err
+ }
+ *prm = *res
+ return nil
+}
+
+// newPRMExactRepository is NewPRMExactRepository, except it resturns the private type.
+func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) {
+ if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil {
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error()))
+ }
+ return &prmExactRepository{
+ prmCommon: prmCommon{Type: prmTypeExactRepository},
+ DockerRepository: dockerRepository,
+ }, nil
+}
+
+// NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch.
+func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) {
+ return newPRMExactRepository(dockerRepository)
+}
+
+// Compile-time check that prmExactRepository implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prmExactRepository)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
+ *prm = prmExactRepository{}
+ var tmp prmExactRepository
+ if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "type": &tmp.Type,
+ "dockerRepository": &tmp.DockerRepository,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prmTypeExactRepository {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+
+ res, err := newPRMExactRepository(tmp.DockerRepository)
+ if err != nil {
+ return err
+ }
+ *prm = *res
+ return nil
+}
diff --git a/vendor/github.com/containers/image/signature/policy_eval.go b/vendor/github.com/containers/image/signature/policy_eval.go
new file mode 100644
index 000000000..f818eb095
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/policy_eval.go
@@ -0,0 +1,289 @@
+// This defines the top-level policy evaluation API.
+// To the extent possible, the interface of the fuctions provided
+// here is intended to be completely unambiguous, and stable for users
+// to rely on.
+
+package signature
+
+import (
+ "context"
+
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// PolicyRequirementError is an explanatory text for rejecting a signature or an image.
+type PolicyRequirementError string
+
+func (err PolicyRequirementError) Error() string {
+ return string(err)
+}
+
+// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted.
+type signatureAcceptanceResult string
+
+const (
+ sarAccepted signatureAcceptanceResult = "sarAccepted"
+ sarRejected signatureAcceptanceResult = "sarRejected"
+ sarUnknown signatureAcceptanceResult = "sarUnknown"
+)
+
+// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image.
+// The type is public, but its definition is private.
+type PolicyRequirement interface {
+ // FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache
+ // costly initialization like creating temporary GPG home directories and reading files.
+ // Setup() (someState, error)
+ // Then, the operations below would be done on the someState object, not directly on a PolicyRequirement.
+
+ // isSignatureAuthorAccepted, given an image and a signature blob, returns:
+ // - sarAccepted if the signature has been verified against the appropriate public key
+ // (where "appropriate public key" may depend on the contents of the signature);
+ // in that case a parsed Signature should be returned.
+ // - sarRejected if the signature has not been verified;
+ // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation
+ // succeeded but the result was rejection.
+ // - sarUnknown if if this PolicyRequirement does not deal with signatures.
+ // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed.
+ // Returning sarUnknown and a non-nil error value is invalid.
+ // WARNING: This makes the signature contents acceptable for futher processing,
+ // but it does not necessarily mean that the contents of the signature are
+ // consistent with local policy.
+ // For example:
+ // - Do not use a true value to determine whether to run
+ // a container based on this image; use IsRunningImageAllowed instead.
+ // - Just because a signature is accepted does not automatically mean the contents of the
+ // signature are authorized to run code as root, or to affect system or cluster configuration.
+ isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error)
+
+ // isRunningImageAllowed returns true if the requirement allows running an image.
+ // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation
+ // succeeded but the result was rejection.
+ // WARNING: This validates signatures and the manifest, but does not download or validate the
+ // layers. Users must validate that the layers match their expected digests.
+ isRunningImageAllowed(image types.UnparsedImage) (bool, error)
+}
+
+// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
+// The type is public, but its implementation is private.
+type PolicyReferenceMatch interface {
+ // matchesDockerReference decides whether a specific image identity is accepted for an image
+ // (or, usually, for the image's Reference().DockerReference()). Note that
+ // image.Reference().DockerReference() may be nil.
+ matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool
+}
+
+// PolicyContext encapsulates a policy and possible cached state
+// for speeding up its evaluation.
+type PolicyContext struct {
+ Policy *Policy
+ state policyContextState // Internal consistency checking
+}
+
+// policyContextState is used internally to verify the users are not misusing a PolicyContext.
+type policyContextState string
+
+const (
+ pcInvalid policyContextState = ""
+ pcInitializing policyContextState = "Initializing"
+ pcReady policyContextState = "Ready"
+ pcInUse policyContextState = "InUse"
+ pcDestroying policyContextState = "Destroying"
+ pcDestroyed policyContextState = "Destroyed"
+)
+
+// changeContextState changes pc.state, or fails if the state is unexpected
+func (pc *PolicyContext) changeState(expected, new policyContextState) error {
+ if pc.state != expected {
+ return errors.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
+ }
+ pc.state = new
+ return nil
+}
+
+// NewPolicyContext sets up and initializes a context for the specified policy.
+// The policy must not be modified while the context exists. FIXME: make a deep copy?
+// If this function succeeds, the caller should call PolicyContext.Destroy() when done.
+func NewPolicyContext(policy *Policy) (*PolicyContext, error) {
+ pc := &PolicyContext{Policy: policy, state: pcInitializing}
+ // FIXME: initialize
+ if err := pc.changeState(pcInitializing, pcReady); err != nil {
+ // Huh?! This should never fail, we didn't give the pointer to anybody.
+ // Just give up and leave unclean state around.
+ return nil, err
+ }
+ return pc, nil
+}
+
+// Destroy should be called when the user of the context is done with it.
+func (pc *PolicyContext) Destroy() error {
+ if err := pc.changeState(pcReady, pcDestroying); err != nil {
+ return err
+ }
+ // FIXME: destroy
+ return pc.changeState(pcDestroying, pcDestroyed)
+}
+
+// policyIdentityLogName returns a string description of the image identity for policy purposes.
+// ONLY use this for log messages, not for any decisions!
+func policyIdentityLogName(ref types.ImageReference) string {
+ return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity()
+}
+
+// requirementsForImageRef selects the appropriate requirements for ref.
+func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements {
+ // Do we have a PolicyTransportScopes for this transport?
+ transportName := ref.Transport().Name()
+ if transportScopes, ok := pc.Policy.Transports[transportName]; ok {
+ // Look for a full match.
+ identity := ref.PolicyConfigurationIdentity()
+ if req, ok := transportScopes[identity]; ok {
+ logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity)
+ return req
+ }
+
+ // Look for a match of the possible parent namespaces.
+ for _, name := range ref.PolicyConfigurationNamespaces() {
+ if req, ok := transportScopes[name]; ok {
+ logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name)
+ return req
+ }
+ }
+
+ // Look for a default match for the transport.
+ if req, ok := transportScopes[""]; ok {
+ logrus.Debugf(` Using transport "%s" policy section ""`, transportName)
+ return req
+ }
+ }
+
+ logrus.Debugf(" Using default policy section")
+ return pc.Policy.Default
+}
+
+// GetSignaturesWithAcceptedAuthor returns those signatures from an image
+// for which the policy accepts the author (and which have been successfully
+// verified).
+// NOTE: This may legitimately return an empty list and no error, if the image
+// has no signatures or only invalid signatures.
+// WARNING: This makes the signature contents acceptable for futher processing,
+// but it does not necessarily mean that the contents of the signature are
+// consistent with local policy.
+// For example:
+// - Do not use a an existence of an accepted signature to determine whether to run
+// a container based on this image; use IsRunningImageAllowed instead.
+// - Just because a signature is accepted does not automatically mean the contents of the
+// signature are authorized to run code as root, or to affect system or cluster configuration.
+func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(image types.UnparsedImage) (sigs []*Signature, finalErr error) {
+ if err := pc.changeState(pcReady, pcInUse); err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err := pc.changeState(pcInUse, pcReady); err != nil {
+ sigs = nil
+ finalErr = err
+ }
+ }()
+
+ logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference()))
+ reqs := pc.requirementsForImageRef(image.Reference())
+
+ // FIXME: rename Signatures to UnverifiedSignatures
+ // FIXME: pass context.Context
+ unverifiedSignatures, err := image.Signatures(context.TODO())
+ if err != nil {
+ return nil, err
+ }
+
+ res := make([]*Signature, 0, len(unverifiedSignatures))
+ for sigNumber, sig := range unverifiedSignatures {
+ var acceptedSig *Signature // non-nil if accepted
+ rejected := false
+ // FIXME? Say more about the contents of the signature, i.e. parse it even before verification?!
+ logrus.Debugf("Evaluating signature %d:", sigNumber)
+ interpretingReqs:
+ for reqNumber, req := range reqs {
+ // FIXME: Log the requirement itself? For now, we use just the number.
+ // FIXME: supply state
+ switch res, as, err := req.isSignatureAuthorAccepted(image, sig); res {
+ case sarAccepted:
+ if as == nil { // Coverage: this should never happen
+ logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber)
+ rejected = true
+ break interpretingReqs
+ }
+ logrus.Debugf(" Requirement %d: signature accepted", reqNumber)
+ if acceptedSig == nil {
+ acceptedSig = as
+ } else if *as != *acceptedSig { // Coverage: this should never happen
+ // Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents?
+ logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber)
+ rejected = true
+ acceptedSig = nil
+ break interpretingReqs
+ }
+ case sarRejected:
+ logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error())
+ rejected = true
+ break interpretingReqs
+ case sarUnknown:
+ if err != nil { // Coverage: this should never happen
+ logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error())
+ rejected = true
+ break interpretingReqs
+ }
+ logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber)
+ default: // Coverage: this should never happen
+ logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res))
+ rejected = true
+ break interpretingReqs
+ }
+ }
+ // This also handles the (invalid) case of empty reqs, by rejecting the signature.
+ if acceptedSig != nil && !rejected {
+ logrus.Debugf(" Overall: OK, signature accepted")
+ res = append(res, acceptedSig)
+ } else {
+ logrus.Debugf(" Overall: Signature not accepted")
+ }
+ }
+ return res, nil
+}
+
+// IsRunningImageAllowed returns true iff the policy allows running the image.
+// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation
+// succeeded but the result was rejection.
+// WARNING: This validates signatures and the manifest, but does not download or validate the
+// layers. Users must validate that the layers match their expected digests.
+func (pc *PolicyContext) IsRunningImageAllowed(image types.UnparsedImage) (res bool, finalErr error) {
+ if err := pc.changeState(pcReady, pcInUse); err != nil {
+ return false, err
+ }
+ defer func() {
+ if err := pc.changeState(pcInUse, pcReady); err != nil {
+ res = false
+ finalErr = err
+ }
+ }()
+
+ logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference()))
+ reqs := pc.requirementsForImageRef(image.Reference())
+
+ if len(reqs) == 0 {
+ return false, PolicyRequirementError("List of verification policy requirements must not be empty")
+ }
+
+ for reqNumber, req := range reqs {
+ // FIXME: supply state
+ allowed, err := req.isRunningImageAllowed(image)
+ if !allowed {
+ logrus.Debugf("Requirement %d: denied, done", reqNumber)
+ return false, err
+ }
+ logrus.Debugf(" Requirement %d: allowed", reqNumber)
+ }
+ // We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image.
+ logrus.Debugf("Overall: allowed")
+ return true, nil
+}
diff --git a/vendor/github.com/containers/image/signature/policy_eval_baselayer.go b/vendor/github.com/containers/image/signature/policy_eval_baselayer.go
new file mode 100644
index 000000000..898958012
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/policy_eval_baselayer.go
@@ -0,0 +1,18 @@
+// Policy evaluation for prSignedBaseLayer.
+
+package signature
+
+import (
+ "github.com/containers/image/types"
+ "github.com/sirupsen/logrus"
+)
+
+func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+ return sarUnknown, nil, nil
+}
+
+func (pr *prSignedBaseLayer) isRunningImageAllowed(image types.UnparsedImage) (bool, error) {
+ // FIXME? Reject this at policy parsing time already?
+ logrus.Errorf("signedBaseLayer not implemented yet!")
+ return false, PolicyRequirementError("signedBaseLayer not implemented yet!")
+}
diff --git a/vendor/github.com/containers/image/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/signature/policy_eval_signedby.go
new file mode 100644
index 000000000..56665124c
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/policy_eval_signedby.go
@@ -0,0 +1,131 @@
+// Policy evaluation for prSignedBy.
+
+package signature
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "strings"
+
+ "github.com/pkg/errors"
+
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+)
+
+func (pr *prSignedBy) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+ switch pr.KeyType {
+ case SBKeyTypeGPGKeys:
+ case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
+ // FIXME? Reject this at policy parsing time already?
+ return sarRejected, nil, errors.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType))
+ default:
+ // This should never happen, newPRSignedBy ensures KeyType.IsValid()
+ return sarRejected, nil, errors.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType))
+ }
+
+ if pr.KeyPath != "" && pr.KeyData != nil {
+ return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`)
+ }
+ // FIXME: move this to per-context initialization
+ var data []byte
+ if pr.KeyData != nil {
+ data = pr.KeyData
+ } else {
+ d, err := ioutil.ReadFile(pr.KeyPath)
+ if err != nil {
+ return sarRejected, nil, err
+ }
+ data = d
+ }
+
+ // FIXME: move this to per-context initialization
+ mech, trustedIdentities, err := NewEphemeralGPGSigningMechanism(data)
+ if err != nil {
+ return sarRejected, nil, err
+ }
+ defer mech.Close()
+ if len(trustedIdentities) == 0 {
+ return sarRejected, nil, PolicyRequirementError("No public keys imported")
+ }
+
+ signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{
+ validateKeyIdentity: func(keyIdentity string) error {
+ for _, trustedIdentity := range trustedIdentities {
+ if keyIdentity == trustedIdentity {
+ return nil
+ }
+ }
+ // Coverage: We use a private GPG home directory and only import trusted keys, so this should
+ // not be reachable.
+ return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity))
+ },
+ validateSignedDockerReference: func(ref string) error {
+ if !pr.SignedIdentity.matchesDockerReference(image, ref) {
+ return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref))
+ }
+ return nil
+ },
+ validateSignedDockerManifestDigest: func(digest digest.Digest) error {
+ m, _, err := image.Manifest()
+ if err != nil {
+ return err
+ }
+ digestMatches, err := manifest.MatchesDigest(m, digest)
+ if err != nil {
+ return err
+ }
+ if !digestMatches {
+ return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest))
+ }
+ return nil
+ },
+ })
+ if err != nil {
+ return sarRejected, nil, err
+ }
+
+ return sarAccepted, signature, nil
+}
+
+func (pr *prSignedBy) isRunningImageAllowed(image types.UnparsedImage) (bool, error) {
+ // FIXME: pass context.Context
+ sigs, err := image.Signatures(context.TODO())
+ if err != nil {
+ return false, err
+ }
+ var rejections []error
+ for _, s := range sigs {
+ var reason error
+ switch res, _, err := pr.isSignatureAuthorAccepted(image, s); res {
+ case sarAccepted:
+ // One accepted signature is enough.
+ return true, nil
+ case sarRejected:
+ reason = err
+ case sarUnknown:
+ // Huh?! This should not happen at all; treat it as any other invalid value.
+ fallthrough
+ default:
+ reason = errors.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
+ }
+ rejections = append(rejections, reason)
+ }
+ var summary error
+ switch len(rejections) {
+ case 0:
+ summary = PolicyRequirementError("A signature was required, but no signature exists")
+ case 1:
+ summary = rejections[0]
+ default:
+ var msgs []string
+ for _, e := range rejections {
+ msgs = append(msgs, e.Error())
+ }
+ summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s",
+ strings.Join(msgs, "; ")))
+ }
+ return false, summary
+}
diff --git a/vendor/github.com/containers/image/signature/policy_eval_simple.go b/vendor/github.com/containers/image/signature/policy_eval_simple.go
new file mode 100644
index 000000000..19a71e6d9
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/policy_eval_simple.go
@@ -0,0 +1,28 @@
+// Policy evaluation for the various simple PolicyRequirement types.
+
+package signature
+
+import (
+ "fmt"
+
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+)
+
+func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+ // prInsecureAcceptAnything semantics: Every image is allowed to run,
+ // but this does not consider the signature as verified.
+ return sarUnknown, nil, nil
+}
+
+func (pr *prInsecureAcceptAnything) isRunningImageAllowed(image types.UnparsedImage) (bool, error) {
+ return true, nil
+}
+
+func (pr *prReject) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+ return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference())))
+}
+
+func (pr *prReject) isRunningImageAllowed(image types.UnparsedImage) (bool, error) {
+ return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference())))
+}
diff --git a/vendor/github.com/containers/image/signature/policy_reference_match.go b/vendor/github.com/containers/image/signature/policy_reference_match.go
new file mode 100644
index 000000000..a8dad6770
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/policy_reference_match.go
@@ -0,0 +1,101 @@
+// PolicyReferenceMatch implementations.
+
+package signature
+
+import (
+ "fmt"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+)
+
+// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images.
+func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (reference.Named, reference.Named, error) {
+ r1 := image.Reference().DockerReference()
+ if r1 == nil {
+ return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity",
+ transports.ImageName(image.Reference())))
+ }
+ r2, err := reference.ParseNormalizedNamed(s2)
+ if err != nil {
+ return nil, nil, err
+ }
+ return r1, r2, nil
+}
+
+func (prm *prmMatchExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+ intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
+ if err != nil {
+ return false
+ }
+ // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now.
+ if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) {
+ return false
+ }
+ return signature.String() == intended.String()
+}
+
+func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+ intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
+ if err != nil {
+ return false
+ }
+
+ // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now.
+ if reference.IsNameOnly(signature) {
+ return false
+ }
+ switch intended.(type) {
+ case reference.NamedTagged: // Includes the case when intended has both a tag and a digest.
+ return signature.String() == intended.String()
+ case reference.Canonical:
+ // We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest.
+ // Becase UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest,
+ // we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms)
+ return signature.Name() == intended.Name()
+ default: // !reference.IsNameOnly(intended)
+ return false
+ }
+}
+
+func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+ intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
+ if err != nil {
+ return false
+ }
+ return signature.Name() == intended.Name()
+}
+
+// parseDockerReferences converts two reference strings into parsed entities, failing on any error
+func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) {
+ r1, err := reference.ParseNormalizedNamed(s1)
+ if err != nil {
+ return nil, nil, err
+ }
+ r2, err := reference.ParseNormalizedNamed(s2)
+ if err != nil {
+ return nil, nil, err
+ }
+ return r1, r2, nil
+}
+
+func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+ intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference)
+ if err != nil {
+ return false
+ }
+ // prm.DockerReference and signatureDockerReference should be exact; so, verify that now.
+ if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) {
+ return false
+ }
+ return signature.String() == intended.String()
+}
+
+func (prm *prmExactRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+ intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference)
+ if err != nil {
+ return false
+ }
+ return signature.Name() == intended.Name()
+}
diff --git a/vendor/github.com/containers/image/signature/policy_types.go b/vendor/github.com/containers/image/signature/policy_types.go
new file mode 100644
index 000000000..4cd770f11
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/policy_types.go
@@ -0,0 +1,152 @@
+// Note: Consider the API unstable until the code supports at least three different image formats or transports.
+
+// This defines types used to represent a signature verification policy in memory.
+// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements
+// built using the constructor functions provided in policy_config.go.
+
+package signature
+
+// NOTE: Keep this in sync with docs/policy.json.md!
+
+// Policy defines requirements for considering a signature, or an image, valid.
+type Policy struct {
+ // Default applies to any image which does not have a matching policy in Transports.
+ // Note that this can happen even if a matching PolicyTransportScopes exists in Transports
+ // if the image matches none of the scopes.
+ Default PolicyRequirements `json:"default"`
+ Transports map[string]PolicyTransportScopes `json:"transports"`
+}
+
+// PolicyTransportScopes defines policies for images for a specific transport,
+// for various scopes, the map keys.
+// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.);
+// there is one scope precisely matching to a single image, and namespace scopes as prefixes
+// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]])
+// The empty scope, if exists, is considered a parent namespace of all other scopes.
+// Most specific scope wins, duplication is prohibited (hard failure).
+type PolicyTransportScopes map[string]PolicyRequirements
+
+// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature).
+// Must not be empty, frequently will only contain a single element.
+type PolicyRequirements []PolicyRequirement
+
+// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image.
+// The type is public, but its definition is private.
+
+// prCommon is the common type field in a JSON encoding of PolicyRequirement.
+type prCommon struct {
+ Type prTypeIdentifier `json:"type"`
+}
+
+// prTypeIdentifier is string designating a kind of a PolicyRequirement.
+type prTypeIdentifier string
+
+const (
+ prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything"
+ prTypeReject prTypeIdentifier = "reject"
+ prTypeSignedBy prTypeIdentifier = "signedBy"
+ prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer"
+)
+
+// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything:
+// every image is allowed to run.
+// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit).
+// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted).
+// FIXME? Better name?
+type prInsecureAcceptAnything struct {
+ prCommon
+}
+
+// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected.
+type prReject struct {
+ prCommon
+}
+
+// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity
+type prSignedBy struct {
+ prCommon
+
+ // KeyType specifies what kind of key reference KeyPath/KeyData is.
+ // Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs”
+ // FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only
+ KeyType sbKeyType `json:"keyType"`
+
+ // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified.
+ KeyPath string `json:"keyPath,omitempty"`
+ // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified.
+ KeyData []byte `json:"keyData,omitempty"`
+
+ // SignedIdentity specifies what image identity the signature must be claiming about the image.
+ // Defaults to "match-exact" if not specified.
+ SignedIdentity PolicyReferenceMatch `json:"signedIdentity"`
+}
+
+// sbKeyType are the allowed values for prSignedBy.KeyType
+type sbKeyType string
+
+const (
+ // SBKeyTypeGPGKeys refers to keys contained in a GPG keyring
+ SBKeyTypeGPGKeys sbKeyType = "GPGKeys"
+ // SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring
+ SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys"
+ // SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates
+ // FIXME: PEM, DER?
+ SBKeyTypeX509Certificates sbKeyType = "X509Certificates"
+ // SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs
+ // FIXME: PEM, DER?
+ SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs"
+)
+
+// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image.
+type prSignedBaseLayer struct {
+ prCommon
+ // BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful.
+ BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"`
+}
+
+// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
+// The type is public, but its implementation is private.
+
+// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch.
+type prmCommon struct {
+ Type prmTypeIdentifier `json:"type"`
+}
+
+// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch.
+type prmTypeIdentifier string
+
+const (
+ prmTypeMatchExact prmTypeIdentifier = "matchExact"
+ prmTypeMatchRepoDigestOrExact prmTypeIdentifier = "matchRepoDigestOrExact"
+ prmTypeMatchRepository prmTypeIdentifier = "matchRepository"
+ prmTypeExactReference prmTypeIdentifier = "exactReference"
+ prmTypeExactRepository prmTypeIdentifier = "exactRepository"
+)
+
+// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly.
+type prmMatchExact struct {
+ prmCommon
+}
+
+// prmMatchRepoDigestOrExact is a PolicyReferenceMatch with type = prmMatchExactOrDigest: the two references must match exactly,
+// except that digest references are also accepted if the repository name matches (regardless of tag/digest) and the signature applies to the referenced digest
+type prmMatchRepoDigestOrExact struct {
+ prmCommon
+}
+
+// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag.
+type prmMatchRepository struct {
+ prmCommon
+}
+
+// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly.
+type prmExactReference struct {
+ prmCommon
+ DockerReference string `json:"dockerReference"`
+}
+
+// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag.
+type prmExactRepository struct {
+ prmCommon
+ DockerRepository string `json:"dockerRepository"`
+}
diff --git a/vendor/github.com/containers/image/signature/signature.go b/vendor/github.com/containers/image/signature/signature.go
new file mode 100644
index 000000000..41f13f72f
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/signature.go
@@ -0,0 +1,280 @@
+// Note: Consider the API unstable until the code supports at least three different image formats or transports.
+
+// NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json!
+
+package signature
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/pkg/errors"
+
+ "github.com/containers/image/version"
+ "github.com/opencontainers/go-digest"
+)
+
+const (
+ signatureType = "atomic container signature"
+)
+
+// InvalidSignatureError is returned when parsing an invalid signature.
+type InvalidSignatureError struct {
+ msg string
+}
+
+func (err InvalidSignatureError) Error() string {
+ return err.msg
+}
+
+// Signature is a parsed content of a signature.
+// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below.
+type Signature struct {
+ DockerManifestDigest digest.Digest
+ DockerReference string // FIXME: more precise type?
+}
+
+// untrustedSignature is a parsed content of a signature.
+type untrustedSignature struct {
+ UntrustedDockerManifestDigest digest.Digest
+ UntrustedDockerReference string // FIXME: more precise type?
+ UntrustedCreatorID *string
+ // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
+ // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
+ // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
+ // we would add another field, UntrustedTimestampNS int64.
+ UntrustedTimestamp *int64
+}
+
+// UntrustedSignatureInformation is information available in an untrusted signature.
+// This may be useful when debugging signature verification failures,
+// or when managing a set of signatures on a single image.
+//
+// WARNING: Do not use the contents of this for ANY security decisions,
+// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
+// There is NO REASON to expect the values to be correct, or not intentionally misleading
+// (including things like “✅ Verified by $authority”)
+type UntrustedSignatureInformation struct {
+ UntrustedDockerManifestDigest digest.Digest
+ UntrustedDockerReference string // FIXME: more precise type?
+ UntrustedCreatorID *string
+ UntrustedTimestamp *time.Time
+ UntrustedShortKeyIdentifier string
+}
+
+// newUntrustedSignature returns an untrustedSignature object with
+// the specified primary contents and appropriate metadata.
+func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature {
+ // Use intermediate variables for these values so that we can take their addresses.
+ // Golang guarantees that they will have a new address on every execution.
+ creatorID := "atomic " + version.Version
+ timestamp := time.Now().Unix()
+ return untrustedSignature{
+ UntrustedDockerManifestDigest: dockerManifestDigest,
+ UntrustedDockerReference: dockerReference,
+ UntrustedCreatorID: &creatorID,
+ UntrustedTimestamp: &timestamp,
+ }
+}
+
+// Compile-time check that untrustedSignature implements json.Marshaler
+var _ json.Marshaler = (*untrustedSignature)(nil)
+
+// MarshalJSON implements the json.Marshaler interface.
+func (s untrustedSignature) MarshalJSON() ([]byte, error) {
+ if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" {
+ return nil, errors.New("Unexpected empty signature content")
+ }
+ critical := map[string]interface{}{
+ "type": signatureType,
+ "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()},
+ "identity": map[string]string{"docker-reference": s.UntrustedDockerReference},
+ }
+ optional := map[string]interface{}{}
+ if s.UntrustedCreatorID != nil {
+ optional["creator"] = *s.UntrustedCreatorID
+ }
+ if s.UntrustedTimestamp != nil {
+ optional["timestamp"] = *s.UntrustedTimestamp
+ }
+ signature := map[string]interface{}{
+ "critical": critical,
+ "optional": optional,
+ }
+ return json.Marshal(signature)
+}
+
+// Compile-time check that untrustedSignature implements json.Unmarshaler
+var _ json.Unmarshaler = (*untrustedSignature)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (s *untrustedSignature) UnmarshalJSON(data []byte) error {
+ err := s.strictUnmarshalJSON(data)
+ if err != nil {
+ if _, ok := err.(jsonFormatError); ok {
+ err = InvalidSignatureError{msg: err.Error()}
+ }
+ }
+ return err
+}
+
+// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type.
+// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller.
+func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
+ var critical, optional json.RawMessage
+ if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "critical": &critical,
+ "optional": &optional,
+ }); err != nil {
+ return err
+ }
+
+ var creatorID string
+ var timestamp float64
+ var gotCreatorID, gotTimestamp = false, false
+ if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} {
+ switch key {
+ case "creator":
+ gotCreatorID = true
+ return &creatorID
+ case "timestamp":
+ gotTimestamp = true
+ return &timestamp
+ default:
+ var ignore interface{}
+ return &ignore
+ }
+ }); err != nil {
+ return err
+ }
+ if gotCreatorID {
+ s.UntrustedCreatorID = &creatorID
+ }
+ if gotTimestamp {
+ intTimestamp := int64(timestamp)
+ if float64(intTimestamp) != timestamp {
+ return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"}
+ }
+ s.UntrustedTimestamp = &intTimestamp
+ }
+
+ var t string
+ var image, identity json.RawMessage
+ if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
+ "type": &t,
+ "image": &image,
+ "identity": &identity,
+ }); err != nil {
+ return err
+ }
+ if t != signatureType {
+ return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)}
+ }
+
+ var digestString string
+ if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
+ "docker-manifest-digest": &digestString,
+ }); err != nil {
+ return err
+ }
+ s.UntrustedDockerManifestDigest = digest.Digest(digestString)
+
+ return paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
+ "docker-reference": &s.UntrustedDockerReference,
+ })
+}
+
+// Sign formats the signature and returns a blob signed using mech and keyIdentity
+// (If it seems surprising that this is a method on untrustedSignature, note that there
+// isn’t a good reason to think that a key used by the user is trusted by any component
+// of the system just because it is a private key — actually the presence of a private key
+// on the system increases the likelihood of an a successful attack on that private key
+// on that particular system.)
+func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) {
+ json, err := json.Marshal(s)
+ if err != nil {
+ return nil, err
+ }
+
+ return mech.Sign(json, keyIdentity)
+}
+
+// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable.
+// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies
+// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature
+// because the functions have the same or similar types, so there is a risk of exchanging the functions;
+// named members of this struct are more explicit.
+type signatureAcceptanceRules struct {
+ validateKeyIdentity func(string) error
+ validateSignedDockerReference func(string) error
+ validateSignedDockerManifestDigest func(digest.Digest) error
+}
+
+// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principial components
+// match expected values, both as specified by rules, and returns it
+func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) {
+ signed, keyIdentity, err := mech.Verify(unverifiedSignature)
+ if err != nil {
+ return nil, err
+ }
+ if err := rules.validateKeyIdentity(keyIdentity); err != nil {
+ return nil, err
+ }
+
+ var unmatchedSignature untrustedSignature
+ if err := json.Unmarshal(signed, &unmatchedSignature); err != nil {
+ return nil, InvalidSignatureError{msg: err.Error()}
+ }
+ if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil {
+ return nil, err
+ }
+ if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil {
+ return nil, err
+ }
+ // signatureAcceptanceRules have accepted this value.
+ return &Signature{
+ DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest,
+ DockerReference: unmatchedSignature.UntrustedDockerReference,
+ }, nil
+}
+
+// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature,
+// WITHOUT doing any cryptographic verification.
+// This may be useful when debugging signature verification failures,
+// or when managing a set of signatures on a single image.
+//
+// WARNING: Do not use the contents of this for ANY security decisions,
+// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
+// There is NO REASON to expect the values to be correct, or not intentionally misleading
+// (including things like “✅ Verified by $authority”)
+func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) {
+ // NOTE: This should eventualy do format autodetection.
+ mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
+ if err != nil {
+ return nil, err
+ }
+ defer mech.Close()
+
+ untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes)
+ if err != nil {
+ return nil, err
+ }
+ var untrustedDecodedContents untrustedSignature
+ if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil {
+ return nil, InvalidSignatureError{msg: err.Error()}
+ }
+
+ var timestamp *time.Time // = nil
+ if untrustedDecodedContents.UntrustedTimestamp != nil {
+ ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0)
+ timestamp = &ts
+ }
+ return &UntrustedSignatureInformation{
+ UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest,
+ UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference,
+ UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID,
+ UntrustedTimestamp: timestamp,
+ UntrustedShortKeyIdentifier: shortKeyIdentifier,
+ }, nil
+}
diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go
new file mode 100644
index 000000000..185e5f492
--- /dev/null
+++ b/vendor/github.com/containers/image/storage/storage_image.go
@@ -0,0 +1,878 @@
+package storage
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync/atomic"
+
+ "github.com/containers/image/image"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/docker/docker/api/types/versions"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs.
+
+var (
+ // ErrBlobDigestMismatch is returned when PutBlob() is given a blob
+ // with a digest-based name that doesn't match its contents.
+ ErrBlobDigestMismatch = errors.New("blob digest mismatch")
+ // ErrBlobSizeMismatch is returned when PutBlob() is given a blob
+ // with an expected size that doesn't match the reader.
+ ErrBlobSizeMismatch = errors.New("blob size mismatch")
+ // ErrNoManifestLists is returned when GetTargetManifest() is called.
+ ErrNoManifestLists = errors.New("manifest lists are not supported by this transport")
+ // ErrNoSuchImage is returned when we attempt to access an image which
+ // doesn't exist in the storage area.
+ ErrNoSuchImage = storage.ErrNotAnImage
+)
+
+type storageImageSource struct {
+ imageRef storageReference
+ ID string
+ layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
+ cachedManifest []byte // A cached copy of the manifest, if already known, or nil
+ SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
+}
+
+type storageImageDestination struct {
+ imageRef storageReference // The reference we'll use to name the image
+ publicRef storageReference // The reference we return when asked about the name we'll give to the image
+ directory string // Temporary directory where we store blobs until Commit() time
+ nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
+ manifest []byte // Manifest contents, temporary
+ signatures []byte // Signature contents, temporary
+ blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
+ fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
+ filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
+ SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
+}
+
+type storageImage struct {
+ types.Image
+ size int64
+}
+
+// newImageSource sets up an image for reading.
+func newImageSource(imageRef storageReference) (*storageImageSource, error) {
+ // First, locate the image.
+ img, err := imageRef.resolveImage()
+ if err != nil {
+ return nil, err
+ }
+
+ // Build the reader object.
+ image := &storageImageSource{
+ imageRef: imageRef,
+ ID: img.ID,
+ layerPosition: make(map[digest.Digest]int),
+ SignatureSizes: []int{},
+ }
+ if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
+ return nil, errors.Wrap(err, "error decoding metadata for source image")
+ }
+ return image, nil
+}
+
+// Reference returns the image reference that we used to find this image.
+func (s storageImageSource) Reference() types.ImageReference {
+ return s.imageRef
+}
+
+// Close cleans up any resources we tied up while reading the image.
+func (s storageImageSource) Close() error {
+ return nil
+}
+
+// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given.
+func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) {
+ rc, n, _, err = s.getBlobAndLayerID(info)
+ return rc, n, err
+}
+
+// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given.
+func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) {
+ var layer storage.Layer
+ var diffOptions *storage.DiffOptions
+ // We need a valid digest value.
+ err = info.Digest.Validate()
+ if err != nil {
+ return nil, -1, "", err
+ }
+ // Check if the blob corresponds to a diff that was used to initialize any layers. Our
+ // callers should try to retrieve layers using their uncompressed digests, so no need to
+ // check if they're using one of the compressed digests, which we can't reproduce anyway.
+ layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest)
+ // If it's not a layer, then it must be a data item.
+ if len(layers) == 0 {
+ b, err := s.imageRef.transport.store.ImageBigData(s.ID, info.Digest.String())
+ if err != nil {
+ return nil, -1, "", err
+ }
+ r := bytes.NewReader(b)
+ logrus.Debugf("exporting opaque data as blob %q", info.Digest.String())
+ return ioutil.NopCloser(r), int64(r.Len()), "", nil
+ }
+ // Step through the list of matching layers. Tests may want to verify that if we have multiple layers
+ // which claim to have the same contents, that we actually do have multiple layers, otherwise we could
+ // just go ahead and use the first one every time.
+ i := s.layerPosition[info.Digest]
+ s.layerPosition[info.Digest] = i + 1
+ if len(layers) > 0 {
+ layer = layers[i%len(layers)]
+ }
+ // Force the storage layer to not try to match any compression that was used when the layer was first
+ // handed to it.
+ noCompression := archive.Uncompressed
+ diffOptions = &storage.DiffOptions{
+ Compression: &noCompression,
+ }
+ if layer.UncompressedSize < 0 {
+ n = -1
+ } else {
+ n = layer.UncompressedSize
+ }
+ logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest)
+ rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
+ if err != nil {
+ return nil, -1, "", err
+ }
+ return rc, n, layer.ID, err
+}
+
+// GetManifest() reads the image's manifest.
+func (s *storageImageSource) GetManifest() (manifestBlob []byte, MIMEType string, err error) {
+ if len(s.cachedManifest) == 0 {
+ cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.ID, "manifest")
+ if err != nil {
+ return nil, "", err
+ }
+ s.cachedManifest = cachedBlob
+ }
+ return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
+}
+
+// UpdatedLayerInfos() returns the list of layer blobs that make up the root filesystem of
+// the image, after they've been decompressed.
+func (s *storageImageSource) UpdatedLayerInfos() []types.BlobInfo {
+ simg, err := s.imageRef.transport.store.Image(s.ID)
+ if err != nil {
+ logrus.Errorf("error reading image %q: %v", s.ID, err)
+ return nil
+ }
+ updatedBlobInfos := []types.BlobInfo{}
+ layerID := simg.TopLayer
+ _, manifestType, err := s.GetManifest()
+ if err != nil {
+ logrus.Errorf("error reading image manifest for %q: %v", s.ID, err)
+ return nil
+ }
+ uncompressedLayerType := ""
+ switch manifestType {
+ case imgspecv1.MediaTypeImageManifest:
+ uncompressedLayerType = imgspecv1.MediaTypeImageLayer
+ case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
+ // This is actually a compressed type, but there's no uncompressed type defined
+ uncompressedLayerType = manifest.DockerV2Schema2LayerMediaType
+ }
+ for layerID != "" {
+ layer, err := s.imageRef.transport.store.Layer(layerID)
+ if err != nil {
+ logrus.Errorf("error reading layer %q in image %q: %v", layerID, s.ID, err)
+ return nil
+ }
+ if layer.UncompressedDigest == "" {
+ logrus.Errorf("uncompressed digest for layer %q is unknown", layerID)
+ return nil
+ }
+ if layer.UncompressedSize < 0 {
+ logrus.Errorf("uncompressed size for layer %q is unknown", layerID)
+ return nil
+ }
+ blobInfo := types.BlobInfo{
+ Digest: layer.UncompressedDigest,
+ Size: layer.UncompressedSize,
+ MediaType: uncompressedLayerType,
+ }
+ updatedBlobInfos = append([]types.BlobInfo{blobInfo}, updatedBlobInfos...)
+ layerID = layer.Parent
+ }
+ return updatedBlobInfos
+}
+
+// GetTargetManifest() is not supported.
+func (s *storageImageSource) GetTargetManifest(d digest.Digest) (manifestBlob []byte, MIMEType string, err error) {
+ return nil, "", ErrNoManifestLists
+}
+
+// GetSignatures() parses the image's signatures blob into a slice of byte slices.
+func (s *storageImageSource) GetSignatures(ctx context.Context) (signatures [][]byte, err error) {
+ var offset int
+ sigslice := [][]byte{}
+ signature := []byte{}
+ if len(s.SignatureSizes) > 0 {
+ signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures")
+ if err != nil {
+ return nil, errors.Wrapf(err, "error looking up signatures data for image %q", s.ID)
+ }
+ signature = signatureBlob
+ }
+ for _, length := range s.SignatureSizes {
+ sigslice = append(sigslice, signature[offset:offset+length])
+ offset += length
+ }
+ if offset != len(signature) {
+ return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset)
+ }
+ return sigslice, nil
+}
+
+// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
+// it's time to Commit() the image
+func newImageDestination(imageRef storageReference) (*storageImageDestination, error) {
+ directory, err := ioutil.TempDir(temporaryDirectoryForBigFiles, "storage")
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating a temporary directory")
+ }
+ // Break reading of the reference we're writing, so that copy.Image() won't try to rewrite
+ // schema1 image manifests to remove embedded references, since that changes the manifest's
+ // digest, and that makes the image unusable if we subsequently try to access it using a
+ // reference that mentions the no-longer-correct digest.
+ publicRef := imageRef
+ publicRef.name = nil
+ image := &storageImageDestination{
+ imageRef: imageRef,
+ publicRef: publicRef,
+ directory: directory,
+ blobDiffIDs: make(map[digest.Digest]digest.Digest),
+ fileSizes: make(map[digest.Digest]int64),
+ filenames: make(map[digest.Digest]string),
+ SignatureSizes: []int{},
+ }
+ return image, nil
+}
+
+// Reference returns a mostly-usable image reference that can't return a DockerReference, to
+// avoid triggering logic in copy.Image() that rewrites schema 1 image manifests in order to
+// remove image names that they contain which don't match the value we're using.
+func (s storageImageDestination) Reference() types.ImageReference {
+ return s.publicRef
+}
+
+// Close cleans up the temporary directory.
+func (s *storageImageDestination) Close() error {
+ return os.RemoveAll(s.directory)
+}
+
+// ShouldCompressLayers indicates whether or not a caller should compress not-already-compressed
+// data when handing it to us.
+func (s storageImageDestination) ShouldCompressLayers() bool {
+ // We ultimately have to decompress layers to populate trees on disk, so callers shouldn't
+ // bother compressing them before handing them to us, if they're not already compressed.
+ return false
+}
+
+// PutBlob stores a layer or data blob in our temporary directory, checking that any information
+// in the blobinfo matches the incoming data.
+func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) {
+ errorBlobInfo := types.BlobInfo{
+ Digest: "",
+ Size: -1,
+ }
+ // Set up to digest the blob and count its size while saving it to a file.
+ hasher := digest.Canonical.Digester()
+ if blobinfo.Digest.Validate() == nil {
+ if a := blobinfo.Digest.Algorithm(); a.Available() {
+ hasher = a.Digester()
+ }
+ }
+ diffID := digest.Canonical.Digester()
+ filename := filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
+ file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
+ if err != nil {
+ return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename)
+ }
+ defer file.Close()
+ counter := ioutils.NewWriteCounter(hasher.Hash())
+ reader := io.TeeReader(io.TeeReader(stream, counter), file)
+ decompressed, err := archive.DecompressStream(reader)
+ if err != nil {
+ return errorBlobInfo, errors.Wrap(err, "error setting up to decompress blob")
+ }
+ // Copy the data to the file.
+ _, err = io.Copy(diffID.Hash(), decompressed)
+ decompressed.Close()
+ if err != nil {
+ return errorBlobInfo, errors.Wrapf(err, "error storing blob to file %q", filename)
+ }
+ // Ensure that any information that we were given about the blob is correct.
+ if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() {
+ return errorBlobInfo, ErrBlobDigestMismatch
+ }
+ if blobinfo.Size >= 0 && blobinfo.Size != counter.Count {
+ return errorBlobInfo, ErrBlobSizeMismatch
+ }
+ // Record information about the blob.
+ s.blobDiffIDs[hasher.Digest()] = diffID.Digest()
+ s.fileSizes[hasher.Digest()] = counter.Count
+ s.filenames[hasher.Digest()] = filename
+ blobDigest := blobinfo.Digest
+ if blobDigest.Validate() != nil {
+ blobDigest = hasher.Digest()
+ }
+ blobSize := blobinfo.Size
+ if blobSize < 0 {
+ blobSize = counter.Count
+ }
+ return types.BlobInfo{
+ Digest: blobDigest,
+ Size: blobSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
+}
+
+// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be
+// reapplied using ReapplyBlob.
+//
+// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
+// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
+// it returns a non-nil error only on an unexpected failure.
+func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) {
+ if blobinfo.Digest == "" {
+ return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`)
+ }
+ if err := blobinfo.Digest.Validate(); err != nil {
+ return false, -1, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
+ }
+ // Check if we've already cached it in a file.
+ if size, ok := s.fileSizes[blobinfo.Digest]; ok {
+ return true, size, nil
+ }
+ // Check if we have a wasn't-compressed layer in storage that's based on that blob.
+ layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
+ if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
+ return false, -1, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest)
+ }
+ if len(layers) > 0 {
+ // Save this for completeness.
+ s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
+ return true, layers[0].UncompressedSize, nil
+ }
+ // Check if we have a was-compressed layer in storage that's based on that blob.
+ layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
+ if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
+ return false, -1, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest)
+ }
+ if len(layers) > 0 {
+ // Record the uncompressed value so that we can use it to calculate layer IDs.
+ s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
+ return true, layers[0].CompressedSize, nil
+ }
+ // Nope, we don't have it.
+ return false, -1, nil
+}
+
+// ReapplyBlob is now a no-op, assuming HasBlob() says we already have it, since Commit() can just apply the
+// same one when it walks the list in the manifest.
+func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.BlobInfo, error) {
+ present, size, err := s.HasBlob(blobinfo)
+ if !present {
+ return types.BlobInfo{}, errors.Errorf("error reapplying blob %+v: blob was not previously applied", blobinfo)
+ }
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrapf(err, "error reapplying blob %+v", blobinfo)
+ }
+ blobinfo.Size = size
+ return blobinfo, nil
+}
+
+// computeID computes a recommended image ID based on information we have so far.
+func (s *storageImageDestination) computeID(m manifest.Manifest) string {
+ mb, err := m.Serialize()
+ if err != nil {
+ return ""
+ }
+ switch manifest.GuessMIMEType(mb) {
+ case manifest.DockerV2Schema2MediaType, imgspecv1.MediaTypeImageManifest:
+ // For Schema2 and OCI1(?), the ID is just the hex part of the digest of the config blob.
+ logrus.Debugf("trivial image ID for configured blob")
+ configInfo := m.ConfigInfo()
+ if configInfo.Digest.Validate() == nil {
+ return configInfo.Digest.Hex()
+ }
+ return ""
+ case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
+ // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields
+ // that aren't directly comparable using info from the manifest.
+ logrus.Debugf("computing image ID using compat data")
+ s1, ok := m.(*manifest.Schema1)
+ if !ok {
+ logrus.Debugf("schema type was guessed wrong?")
+ return ""
+ }
+ if len(s1.History) == 0 {
+ logrus.Debugf("image has no layers")
+ return ""
+ }
+ s2 := struct {
+ manifest.Schema2Image
+ ID string `json:"id,omitempty"`
+ Parent string `json:"parent,omitempty"`
+ ParentID string `json:"parent_id,omitempty"`
+ LayerID string `json:"layer_id,omitempty"`
+ ThrowAway bool `json:"throwaway,omitempty"`
+ Size int64 `json:",omitempty"`
+ }{}
+ config := []byte(s1.History[0].V1Compatibility)
+ if json.Unmarshal(config, &s2) != nil {
+ logrus.Debugf("error decoding configuration")
+ return ""
+ }
+ // Images created with versions prior to 1.8.3 require us to rebuild the object.
+ if s2.DockerVersion != "" && versions.LessThan(s2.DockerVersion, "1.8.3") {
+ err = json.Unmarshal(config, &s2)
+ if err != nil {
+ logrus.Infof("error decoding compat image config %s: %v", string(config), err)
+ return ""
+ }
+ config, err = json.Marshal(&s2)
+ if err != nil {
+ logrus.Infof("error re-encoding compat image config %#v: %v", s2, err)
+ return ""
+ }
+ }
+ // Build the history.
+ for _, h := range s1.History {
+ compat := manifest.Schema1V1Compatibility{}
+ if json.Unmarshal([]byte(h.V1Compatibility), &compat) != nil {
+ logrus.Debugf("error decoding history information")
+ return ""
+ }
+ hitem := manifest.Schema2History{
+ Created: compat.Created,
+ CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "),
+ Comment: compat.Comment,
+ EmptyLayer: compat.ThrowAway,
+ }
+ s2.History = append([]manifest.Schema2History{hitem}, s2.History...)
+ }
+ // Build the rootfs information. We need the decompressed sums that we've been
+ // calculating to fill in the DiffIDs.
+ s2.RootFS = &manifest.Schema2RootFS{
+ Type: "layers",
+ }
+ for _, fslayer := range s1.FSLayers {
+ blobSum := fslayer.BlobSum
+ diffID, ok := s.blobDiffIDs[blobSum]
+ if !ok {
+ logrus.Infof("error looking up diffID for blob %q", string(blobSum))
+ return ""
+ }
+ s2.RootFS.DiffIDs = append([]digest.Digest{diffID}, s2.RootFS.DiffIDs...)
+ }
+ // And now for some raw manipulation.
+ raw := make(map[string]*json.RawMessage)
+ err = json.Unmarshal(config, &raw)
+ if err != nil {
+ logrus.Infof("error re-decoding compat image config %#v: %v", s2, err)
+ return ""
+ }
+ // Drop some fields.
+ delete(raw, "id")
+ delete(raw, "parent")
+ delete(raw, "parent_id")
+ delete(raw, "layer_id")
+ delete(raw, "throwaway")
+ delete(raw, "Size")
+ // Add the history and rootfs information.
+ rootfs, err := json.Marshal(s2.RootFS)
+ if err != nil {
+ logrus.Infof("error encoding rootfs information %#v: %v", s2.RootFS, err)
+ return ""
+ }
+ rawRootfs := json.RawMessage(rootfs)
+ raw["rootfs"] = &rawRootfs
+ history, err := json.Marshal(s2.History)
+ if err != nil {
+ logrus.Infof("error encoding history information %#v: %v", s2.History, err)
+ return ""
+ }
+ rawHistory := json.RawMessage(history)
+ raw["history"] = &rawHistory
+ // Encode the result, and take the digest of that result.
+ config, err = json.Marshal(raw)
+ if err != nil {
+ logrus.Infof("error re-encoding compat image config %#v: %v", s2, err)
+ return ""
+ }
+ return digest.FromBytes(config).Hex()
+ case manifest.DockerV2ListMediaType:
+ logrus.Debugf("no image ID for manifest list")
+ // FIXME
+ case imgspecv1.MediaTypeImageIndex:
+ logrus.Debugf("no image ID for manifest index")
+ // FIXME
+ default:
+ logrus.Debugf("no image ID for unrecognized manifest type %q", manifest.GuessMIMEType(mb))
+ // FIXME
+ }
+ return ""
+}
+
+// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig
+// information out of it for Inspect().
+func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) {
+ if info.Digest == "" {
+ return nil, errors.Errorf(`no digest supplied when reading blob`)
+ }
+ if err := info.Digest.Validate(); err != nil {
+ return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`)
+ }
+ // Assume it's a file, since we're only calling this from a place that expects to read files.
+ if filename, ok := s.filenames[info.Digest]; ok {
+ contents, err2 := ioutil.ReadFile(filename)
+ if err2 != nil {
+ return nil, errors.Wrapf(err2, `error reading blob from file %q`, filename)
+ }
+ return contents, nil
+ }
+ // If it's not a file, it's a bug, because we're not expecting to be asked for a layer.
+ return nil, errors.New("blob not found")
+}
+
+func (s *storageImageDestination) Commit() error {
+ // Find the list of layer blobs.
+ if len(s.manifest) == 0 {
+ return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
+ }
+ man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
+ if err != nil {
+ return errors.Wrapf(err, "error parsing manifest")
+ }
+ layerBlobs := man.LayerInfos()
+ // Extract or find the layers.
+ lastLayer := ""
+ addedLayers := []string{}
+ for _, blob := range layerBlobs {
+ var diff io.ReadCloser
+ // Check if there's already a layer with the ID that we'd give to the result of applying
+ // this layer blob to its parent, if it has one, or the blob's hex value otherwise.
+ diffID, haveDiffID := s.blobDiffIDs[blob.Digest]
+ if !haveDiffID {
+ // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
+ // or to even check if we had it.
+ logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
+ has, _, err := s.HasBlob(blob)
+ if err != nil {
+ return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
+ }
+ if !has {
+ return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String())
+ }
+ diffID, haveDiffID = s.blobDiffIDs[blob.Digest]
+ if !haveDiffID {
+ return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String())
+ }
+ }
+ id := diffID.Hex()
+ if lastLayer != "" {
+ id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex()
+ }
+ if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
+ // There's already a layer that should have the right contents, just reuse it.
+ lastLayer = layer.ID
+ continue
+ }
+ // Check if we cached a file with that blobsum. If we didn't already have a layer with
+ // the blob's contents, we should have gotten a copy.
+ if filename, ok := s.filenames[blob.Digest]; ok {
+ // Use the file's contents to initialize the layer.
+ file, err2 := os.Open(filename)
+ if err2 != nil {
+ return errors.Wrapf(err2, "error opening file %q", filename)
+ }
+ defer file.Close()
+ diff = file
+ }
+ if diff == nil {
+ // Try to find a layer with contents matching that blobsum.
+ layer := ""
+ layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(blob.Digest)
+ if err2 == nil && len(layers) > 0 {
+ layer = layers[0].ID
+ } else {
+ layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest)
+ if err2 == nil && len(layers) > 0 {
+ layer = layers[0].ID
+ }
+ }
+ if layer == "" {
+ return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest)
+ }
+ // Use the layer's contents to initialize the new layer.
+ noCompression := archive.Uncompressed
+ diffOptions := &storage.DiffOptions{
+ Compression: &noCompression,
+ }
+ diff, err2 = s.imageRef.transport.store.Diff("", layer, diffOptions)
+ if err2 != nil {
+ return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest)
+ }
+ defer diff.Close()
+ }
+ if diff == nil {
+ // This shouldn't have happened.
+ return errors.Errorf("error applying blob %q: content not found", blob.Digest)
+ }
+ // Build the new layer using the diff, regardless of where it came from.
+ layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, diff)
+ if err != nil {
+ return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest)
+ }
+ lastLayer = layer.ID
+ addedLayers = append([]string{lastLayer}, addedLayers...)
+ }
+ // If one of those blobs was a configuration blob, then we can try to dig out the date when the image
+ // was originally created, in case we're just copying it. If not, no harm done.
+ var options *storage.ImageOptions
+ if inspect, err := man.Inspect(s.getConfigBlob); err == nil {
+ logrus.Debugf("setting image creation date to %s", inspect.Created)
+ options = &storage.ImageOptions{
+ CreationDate: inspect.Created,
+ }
+ }
+ // Create the image record, pointing to the most-recently added layer.
+ intendedID := s.imageRef.id
+ if intendedID == "" {
+ intendedID = s.computeID(man)
+ }
+ oldNames := []string{}
+ img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options)
+ if err != nil {
+ if errors.Cause(err) != storage.ErrDuplicateID {
+ logrus.Debugf("error creating image: %q", err)
+ return errors.Wrapf(err, "error creating image %q", intendedID)
+ }
+ img, err = s.imageRef.transport.store.Image(intendedID)
+ if err != nil {
+ return errors.Wrapf(err, "error reading image %q", intendedID)
+ }
+ if img.TopLayer != lastLayer {
+ logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID)
+ return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID)
+ }
+ logrus.Debugf("reusing image ID %q", img.ID)
+ oldNames = append(oldNames, img.Names...)
+ } else {
+ logrus.Debugf("created new image ID %q", img.ID)
+ }
+ // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so
+ // we just need to screen out the ones that are actually layers to get the list of non-layers.
+ dataBlobs := make(map[digest.Digest]struct{})
+ for blob := range s.filenames {
+ dataBlobs[blob] = struct{}{}
+ }
+ for _, layerBlob := range layerBlobs {
+ delete(dataBlobs, layerBlob.Digest)
+ }
+ for blob := range dataBlobs {
+ v, err := ioutil.ReadFile(s.filenames[blob])
+ if err != nil {
+ return errors.Wrapf(err, "error copying non-layer blob %q to image", blob)
+ }
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v); err != nil {
+ if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
+ logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
+ }
+ logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err)
+ return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID)
+ }
+ }
+ // Set the reference's name on the image.
+ if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil {
+ names := []string{}
+ if name != nil {
+ names = append(names, verboseName(name))
+ }
+ if len(oldNames) > 0 {
+ names = append(names, oldNames...)
+ }
+ if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil {
+ if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
+ logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
+ }
+ logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err)
+ return errors.Wrapf(err, "error setting names %v on image %q", names, img.ID)
+ }
+ logrus.Debugf("set names of image %q to %v", img.ID, names)
+ }
+ // Save the manifest.
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, "manifest", s.manifest); err != nil {
+ if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
+ logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
+ }
+ logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
+ return err
+ }
+ // Save the signatures, if we have any.
+ if len(s.signatures) > 0 {
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures); err != nil {
+ if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
+ logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
+ }
+ logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
+ return err
+ }
+ }
+ // Save our metadata.
+ metadata, err := json.Marshal(s)
+ if err != nil {
+ if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
+ logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
+ }
+ logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err)
+ return err
+ }
+ if len(metadata) != 0 {
+ if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil {
+ if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
+ logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
+ }
+ logrus.Debugf("error saving metadata for image %q: %v", img.ID, err)
+ return err
+ }
+ logrus.Debugf("saved image metadata %q", string(metadata))
+ }
+ return nil
+}
+
+var manifestMIMETypes = []string{
+ imgspecv1.MediaTypeImageManifest,
+ manifest.DockerV2Schema2MediaType,
+ manifest.DockerV2Schema1SignedMediaType,
+ manifest.DockerV2Schema1MediaType,
+}
+
+func (s *storageImageDestination) SupportedManifestMIMETypes() []string {
+ return manifestMIMETypes
+}
+
+// PutManifest writes the manifest to the destination.
+func (s *storageImageDestination) PutManifest(manifest []byte) error {
+ s.manifest = make([]byte, len(manifest))
+ copy(s.manifest, manifest)
+ return nil
+}
+
+// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was
+// previously supplied to PutSignatures().
+func (s *storageImageDestination) SupportsSignatures() error {
+ return nil
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be
+// uploaded to the image destination, true otherwise.
+func (s *storageImageDestination) AcceptsForeignLayerURLs() bool {
+ return false
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+func (s *storageImageDestination) MustMatchRuntimeOS() bool {
+ return true
+}
+
+// PutSignatures records the image's signatures for committing as a single data blob.
+func (s *storageImageDestination) PutSignatures(signatures [][]byte) error {
+ sizes := []int{}
+ sigblob := []byte{}
+ for _, sig := range signatures {
+ sizes = append(sizes, len(sig))
+ newblob := make([]byte, len(sigblob)+len(sig))
+ copy(newblob, sigblob)
+ copy(newblob[len(sigblob):], sig)
+ sigblob = newblob
+ }
+ s.signatures = sigblob
+ s.SignatureSizes = sizes
+ return nil
+}
+
+// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the
+// signatures, and the uncompressed sizes of all of the image's layers.
+func (s *storageImageSource) getSize() (int64, error) {
+ var sum int64
+ // Size up the data blobs.
+ dataNames, err := s.imageRef.transport.store.ListImageBigData(s.ID)
+ if err != nil {
+ return -1, errors.Wrapf(err, "error reading image %q", s.ID)
+ }
+ for _, dataName := range dataNames {
+ bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.ID, dataName)
+ if err != nil {
+ return -1, errors.Wrapf(err, "error reading data blob size %q for %q", dataName, s.ID)
+ }
+ sum += bigSize
+ }
+ // Add the signature sizes.
+ for _, sigSize := range s.SignatureSizes {
+ sum += int64(sigSize)
+ }
+ // Prepare to walk the layer list.
+ img, err := s.imageRef.transport.store.Image(s.ID)
+ if err != nil {
+ return -1, errors.Wrapf(err, "error reading image info %q", s.ID)
+ }
+ // Walk the layer list.
+ layerID := img.TopLayer
+ for layerID != "" {
+ layer, err := s.imageRef.transport.store.Layer(layerID)
+ if err != nil {
+ return -1, err
+ }
+ if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 {
+ return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID)
+ }
+ sum += layer.UncompressedSize
+ if layer.Parent == "" {
+ break
+ }
+ layerID = layer.Parent
+ }
+ return sum, nil
+}
+
+// newImage creates an image that also knows its size
+func newImage(s storageReference) (types.Image, error) {
+ src, err := newImageSource(s)
+ if err != nil {
+ return nil, err
+ }
+ img, err := image.FromSource(src)
+ if err != nil {
+ return nil, err
+ }
+ size, err := src.getSize()
+ if err != nil {
+ return nil, err
+ }
+ return &storageImage{Image: img, size: size}, nil
+}
+
+// Size() returns the previously-computed size of the image, with no error.
+func (s storageImage) Size() (int64, error) {
+ return s.size, nil
+}
diff --git a/vendor/github.com/containers/image/storage/storage_reference.go b/vendor/github.com/containers/image/storage/storage_reference.go
new file mode 100644
index 000000000..fb8b0ccc6
--- /dev/null
+++ b/vendor/github.com/containers/image/storage/storage_reference.go
@@ -0,0 +1,178 @@
+package storage
+
+import (
+ "strings"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/types"
+ "github.com/containers/storage"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte
+// value hex-encoded into a 64-character string, and a reference to a Store
+// where an image is, or would be, kept.
+type storageReference struct {
+ transport storageTransport
+ reference string
+ id string
+ name reference.Named
+ tag string
+ digest digest.Digest
+}
+
+func newReference(transport storageTransport, reference, id string, name reference.Named, tag string, digest digest.Digest) *storageReference {
+ // We take a copy of the transport, which contains a pointer to the
+ // store that it used for resolving this reference, so that the
+ // transport that we'll return from Transport() won't be affected by
+ // further calls to the original transport's SetStore() method.
+ return &storageReference{
+ transport: transport,
+ reference: reference,
+ id: id,
+ name: name,
+ tag: tag,
+ digest: digest,
+ }
+}
+
+// Resolve the reference's name to an image ID in the store, if there's already
+// one present with the same name or ID, and return the image.
+func (s *storageReference) resolveImage() (*storage.Image, error) {
+ if s.id == "" {
+ image, err := s.transport.store.Image(s.reference)
+ if image != nil && err == nil {
+ s.id = image.ID
+ }
+ }
+ if s.id == "" {
+ logrus.Errorf("reference %q does not resolve to an image ID", s.StringWithinTransport())
+ return nil, ErrNoSuchImage
+ }
+ img, err := s.transport.store.Image(s.id)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading image %q", s.id)
+ }
+ if s.reference != "" {
+ nameMatch := false
+ for _, name := range img.Names {
+ if name == s.reference {
+ nameMatch = true
+ break
+ }
+ }
+ if !nameMatch {
+ logrus.Errorf("no image matching reference %q found", s.StringWithinTransport())
+ return nil, ErrNoSuchImage
+ }
+ }
+ return img, nil
+}
+
+// Return a Transport object that defaults to using the same store that we used
+// to build this reference object.
+func (s storageReference) Transport() types.ImageTransport {
+ return &storageTransport{
+ store: s.transport.store,
+ defaultUIDMap: s.transport.defaultUIDMap,
+ defaultGIDMap: s.transport.defaultGIDMap,
+ }
+}
+
+// Return a name with a tag or digest, if we have either, else return it bare.
+func (s storageReference) DockerReference() reference.Named {
+ if s.name == nil {
+ return nil
+ }
+ if s.tag != "" {
+ if namedTagged, err := reference.WithTag(s.name, s.tag); err == nil {
+ return namedTagged
+ }
+ }
+ if s.digest != "" {
+ if canonical, err := reference.WithDigest(s.name, s.digest); err == nil {
+ return canonical
+ }
+ }
+ return s.name
+}
+
+// Return a name with a tag, prefixed with the graph root and driver name, to
+// disambiguate between images which may be present in multiple stores and
+// share only their names.
+func (s storageReference) StringWithinTransport() string {
+ optionsList := ""
+ options := s.transport.store.GraphOptions()
+ if len(options) > 0 {
+ optionsList = ":" + strings.Join(options, ",")
+ }
+ storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]"
+ if s.reference == "" {
+ return storeSpec + "@" + s.id
+ }
+ if s.id == "" {
+ return storeSpec + s.reference
+ }
+ return storeSpec + s.reference + "@" + s.id
+}
+
+func (s storageReference) PolicyConfigurationIdentity() string {
+ storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]"
+ if s.name == nil {
+ return storeSpec + "@" + s.id
+ }
+ if s.id == "" {
+ return storeSpec + s.reference
+ }
+ return storeSpec + s.reference + "@" + s.id
+}
+
+// Also accept policy that's tied to the combination of the graph root and
+// driver name, to apply to all images stored in the Store, and to just the
+// graph root, in case we're using multiple drivers in the same directory for
+// some reason.
+func (s storageReference) PolicyConfigurationNamespaces() []string {
+ storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]"
+ driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]"
+ namespaces := []string{}
+ if s.name != nil {
+ name := reference.TrimNamed(s.name)
+ components := strings.Split(name.String(), "/")
+ for len(components) > 0 {
+ namespaces = append(namespaces, storeSpec+strings.Join(components, "/"))
+ components = components[:len(components)-1]
+ }
+ }
+ namespaces = append(namespaces, storeSpec)
+ namespaces = append(namespaces, driverlessStoreSpec)
+ return namespaces
+}
+
+func (s storageReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
+ return newImage(s)
+}
+
+func (s storageReference) DeleteImage(ctx *types.SystemContext) error {
+ img, err := s.resolveImage()
+ if err != nil {
+ return err
+ }
+ layers, err := s.transport.store.DeleteImage(img.ID, true)
+ if err == nil {
+ logrus.Debugf("deleted image %q", img.ID)
+ for _, layer := range layers {
+ logrus.Debugf("deleted layer %q", layer)
+ }
+ }
+ return err
+}
+
+func (s storageReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(s)
+}
+
+func (s storageReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
+ return newImageDestination(s)
+}
diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go
new file mode 100644
index 000000000..39c81a581
--- /dev/null
+++ b/vendor/github.com/containers/image/storage/storage_transport.go
@@ -0,0 +1,442 @@
+package storage
+
+import (
+ "path/filepath"
+ "strings"
+
+ "github.com/pkg/errors"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/idtools"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ minimumTruncatedIDLength = 3
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+var (
+ // Transport is an ImageTransport that uses either a default
+ // storage.Store or one that's it's explicitly told to use.
+ Transport StoreTransport = &storageTransport{}
+ // ErrInvalidReference is returned when ParseReference() is passed an
+ // empty reference.
+ ErrInvalidReference = errors.New("invalid reference")
+ // ErrPathNotAbsolute is returned when a graph root is not an absolute
+ // path name.
+ ErrPathNotAbsolute = errors.New("path name is not absolute")
+)
+
+// StoreTransport is an ImageTransport that uses a storage.Store to parse
+// references, either its own default or one that it's told to use.
+type StoreTransport interface {
+ types.ImageTransport
+ // SetStore sets the default store for this transport.
+ SetStore(storage.Store)
+ // GetImage retrieves the image from the transport's store that's named
+ // by the reference.
+ GetImage(types.ImageReference) (*storage.Image, error)
+ // GetStoreImage retrieves the image from a specified store that's named
+ // by the reference.
+ GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error)
+ // ParseStoreReference parses a reference, overriding any store
+ // specification that it may contain.
+ ParseStoreReference(store storage.Store, reference string) (*storageReference, error)
+ // SetDefaultUIDMap sets the default UID map to use when opening stores.
+ SetDefaultUIDMap(idmap []idtools.IDMap)
+ // SetDefaultGIDMap sets the default GID map to use when opening stores.
+ SetDefaultGIDMap(idmap []idtools.IDMap)
+ // DefaultUIDMap returns the default UID map used when opening stores.
+ DefaultUIDMap() []idtools.IDMap
+ // DefaultGIDMap returns the default GID map used when opening stores.
+ DefaultGIDMap() []idtools.IDMap
+}
+
+type storageTransport struct {
+ store storage.Store
+ defaultUIDMap []idtools.IDMap
+ defaultGIDMap []idtools.IDMap
+}
+
+func (s *storageTransport) Name() string {
+ // Still haven't really settled on a name.
+ return "containers-storage"
+}
+
+// SetStore sets the Store object which the Transport will use for parsing
+// references when information about a Store is not directly specified as part
+// of the reference. If one is not set, the library will attempt to initialize
+// one with default settings when a reference needs to be parsed. Calling
+// SetStore does not affect previously parsed references.
+func (s *storageTransport) SetStore(store storage.Store) {
+ s.store = store
+}
+
+// SetDefaultUIDMap sets the default UID map to use when opening stores.
+func (s *storageTransport) SetDefaultUIDMap(idmap []idtools.IDMap) {
+ s.defaultUIDMap = idmap
+}
+
+// SetDefaultGIDMap sets the default GID map to use when opening stores.
+func (s *storageTransport) SetDefaultGIDMap(idmap []idtools.IDMap) {
+ s.defaultGIDMap = idmap
+}
+
+// DefaultUIDMap returns the default UID map used when opening stores.
+func (s *storageTransport) DefaultUIDMap() []idtools.IDMap {
+ return s.defaultUIDMap
+}
+
+// DefaultGIDMap returns the default GID map used when opening stores.
+func (s *storageTransport) DefaultGIDMap() []idtools.IDMap {
+ return s.defaultGIDMap
+}
+
+// ParseStoreReference takes a name or an ID, tries to figure out which it is
+// relative to the given store, and returns it in a reference object.
+func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) {
+ var name reference.Named
+ if ref == "" {
+ return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference")
+ }
+ if ref[0] == '[' {
+ // Ignore the store specifier.
+ closeIndex := strings.IndexRune(ref, ']')
+ if closeIndex < 1 {
+ return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref)
+ }
+ ref = ref[closeIndex+1:]
+ }
+
+ // The last segment, if there's more than one, is either a digest from a reference, or an image ID.
+ split := strings.LastIndex(ref, "@")
+ idOrDigest := ""
+ if split != -1 {
+ // Peel off that last bit so that we can work on the rest.
+ idOrDigest = ref[split+1:]
+ if idOrDigest == "" {
+ return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest)
+ }
+ ref = ref[:split]
+ }
+
+ // The middle segment (now the last segment), if there is one, is a digest.
+ split = strings.LastIndex(ref, "@")
+ sum := digest.Digest("")
+ if split != -1 {
+ sum = digest.Digest(ref[split+1:])
+ if sum == "" {
+ return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum)
+ }
+ ref = ref[:split]
+ }
+
+ // If we have something that unambiguously should be a digest, validate it, and then the third part,
+ // if we have one, as an ID.
+ id := ""
+ if sum != "" {
+ if idSum, err := digest.Parse("sha256:" + idOrDigest); err != nil || idSum.Validate() != nil {
+ return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID", idOrDigest)
+ }
+ if err := sum.Validate(); err != nil {
+ return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum)
+ }
+ id = idOrDigest
+ if img, err := store.Image(idOrDigest); err == nil && img != nil && len(id) >= minimumTruncatedIDLength {
+ // The ID is a truncated version of the ID of an image that's present in local storage,
+ // so we might as well use the expanded value.
+ id = img.ID
+ }
+ } else if idOrDigest != "" {
+ // There was no middle portion, so the final portion could be either a digest or an ID.
+ if idSum, err := digest.Parse("sha256:" + idOrDigest); err == nil && idSum.Validate() == nil {
+ // It's an ID.
+ id = idOrDigest
+ } else if idSum, err := digest.Parse(idOrDigest); err == nil && idSum.Validate() == nil {
+ // It's a digest.
+ sum = idSum
+ } else if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength {
+ // It's a truncated version of the ID of an image that's present in local storage,
+ // and we may need the expanded value.
+ id = img.ID
+ } else {
+ return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest)
+ }
+ }
+
+ // If we only had one portion, then _maybe_ it's a truncated image ID. Only check on that if it's
+ // at least of what we guess is a reasonable minimum length, because we don't want a really short value
+ // like "a" matching an image by ID prefix when the input was actually meant to specify an image name.
+ if len(ref) >= minimumTruncatedIDLength && sum == "" && id == "" {
+ if img, err := store.Image(idOrDigest); err == nil && img != nil {
+ // It's a truncated version of the ID of an image that's present in local storage;
+ // we need to expand it.
+ id = img.ID
+ ref = ""
+ }
+ }
+
+ // The initial portion is probably a name, possibly with a tag.
+ if ref != "" {
+ var err error
+ if name, err = reference.ParseNormalizedNamed(ref); err != nil {
+ return nil, errors.Wrapf(err, "error parsing named reference %q", ref)
+ }
+ }
+ if name == nil && sum == "" && id == "" {
+ return nil, errors.Errorf("error parsing reference")
+ }
+
+ // Construct a copy of the store spec.
+ optionsList := ""
+ options := store.GraphOptions()
+ if len(options) > 0 {
+ optionsList = ":" + strings.Join(options, ",")
+ }
+ storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]"
+
+ // Convert the name back into a reference string, if we got a name.
+ refname := ""
+ tag := ""
+ if name != nil {
+ if sum.Validate() == nil {
+ canonical, err := reference.WithDigest(name, sum)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error mixing name %q with digest %q", name, sum)
+ }
+ refname = verboseName(canonical)
+ } else {
+ name = reference.TagNameOnly(name)
+ tagged, ok := name.(reference.Tagged)
+ if !ok {
+ return nil, errors.Errorf("error parsing possibly-tagless name %q", ref)
+ }
+ refname = verboseName(name)
+ tag = tagged.Tag()
+ }
+ }
+ if refname == "" {
+ logrus.Debugf("parsed reference into %q", storeSpec+"@"+id)
+ } else if id == "" {
+ logrus.Debugf("parsed reference into %q", storeSpec+refname)
+ } else {
+ logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id)
+ }
+ return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name, tag, sum), nil
+}
+
+func (s *storageTransport) GetStore() (storage.Store, error) {
+ // Return the transport's previously-set store. If we don't have one
+ // of those, initialize one now.
+ if s.store == nil {
+ options := storage.DefaultStoreOptions
+ options.UIDMap = s.defaultUIDMap
+ options.GIDMap = s.defaultGIDMap
+ store, err := storage.GetStore(options)
+ if err != nil {
+ return nil, err
+ }
+ s.store = store
+ }
+ return s.store, nil
+}
+
+// ParseReference takes a name and a tag or digest and/or ID
+// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"),
+// possibly prefixed with a store specifier in the form "[_graphroot_]" or
+// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or
+// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]",
+// tries to figure out which it is, and returns it in a reference object.
+// If _id_ is the ID of an image that's present in local storage, it can be truncated, and
+// even be specified as if it were a _name_, value.
+func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) {
+ var store storage.Store
+ // Check if there's a store location prefix. If there is, then it
+ // needs to match a store that was previously initialized using
+ // storage.GetStore(), or be enough to let the storage library fill out
+ // the rest using knowledge that it has from elsewhere.
+ if reference[0] == '[' {
+ closeIndex := strings.IndexRune(reference, ']')
+ if closeIndex < 1 {
+ return nil, ErrInvalidReference
+ }
+ storeSpec := reference[1:closeIndex]
+ reference = reference[closeIndex+1:]
+ // Peel off a "driver@" from the start.
+ driverInfo := ""
+ driverSplit := strings.SplitN(storeSpec, "@", 2)
+ if len(driverSplit) != 2 {
+ if storeSpec == "" {
+ return nil, ErrInvalidReference
+ }
+ } else {
+ driverInfo = driverSplit[0]
+ if driverInfo == "" {
+ return nil, ErrInvalidReference
+ }
+ storeSpec = driverSplit[1]
+ if storeSpec == "" {
+ return nil, ErrInvalidReference
+ }
+ }
+ // Peel off a ":options" from the end.
+ var options []string
+ optionsSplit := strings.SplitN(storeSpec, ":", 2)
+ if len(optionsSplit) == 2 {
+ options = strings.Split(optionsSplit[1], ",")
+ storeSpec = optionsSplit[0]
+ }
+ // Peel off a "+runroot" from the new end.
+ runRootInfo := ""
+ runRootSplit := strings.SplitN(storeSpec, "+", 2)
+ if len(runRootSplit) == 2 {
+ runRootInfo = runRootSplit[1]
+ storeSpec = runRootSplit[0]
+ }
+ // The rest is our graph root.
+ rootInfo := storeSpec
+ // Check that any paths are absolute paths.
+ if rootInfo != "" && !filepath.IsAbs(rootInfo) {
+ return nil, ErrPathNotAbsolute
+ }
+ if runRootInfo != "" && !filepath.IsAbs(runRootInfo) {
+ return nil, ErrPathNotAbsolute
+ }
+ store2, err := storage.GetStore(storage.StoreOptions{
+ GraphDriverName: driverInfo,
+ GraphRoot: rootInfo,
+ RunRoot: runRootInfo,
+ GraphDriverOptions: options,
+ UIDMap: s.defaultUIDMap,
+ GIDMap: s.defaultGIDMap,
+ })
+ if err != nil {
+ return nil, err
+ }
+ store = store2
+ } else {
+ // We didn't have a store spec, so use the default.
+ store2, err := s.GetStore()
+ if err != nil {
+ return nil, err
+ }
+ store = store2
+ }
+ return s.ParseStoreReference(store, reference)
+}
+
+func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) {
+ dref := ref.DockerReference()
+ if dref == nil {
+ if sref, ok := ref.(*storageReference); ok {
+ if sref.id != "" {
+ if img, err := store.Image(sref.id); err == nil {
+ return img, nil
+ }
+ }
+ }
+ return nil, ErrInvalidReference
+ }
+ return store.Image(verboseName(dref))
+}
+
+func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) {
+ store, err := s.GetStore()
+ if err != nil {
+ return nil, err
+ }
+ return s.GetStoreImage(store, ref)
+}
+
+func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
+ // Check that there's a store location prefix. Values we're passed are
+ // expected to come from PolicyConfigurationIdentity or
+ // PolicyConfigurationNamespaces, so if there's no store location,
+ // something's wrong.
+ if scope[0] != '[' {
+ return ErrInvalidReference
+ }
+ // Parse the store location prefix.
+ closeIndex := strings.IndexRune(scope, ']')
+ if closeIndex < 1 {
+ return ErrInvalidReference
+ }
+ storeSpec := scope[1:closeIndex]
+ scope = scope[closeIndex+1:]
+ storeInfo := strings.SplitN(storeSpec, "@", 2)
+ if len(storeInfo) == 1 && storeInfo[0] != "" {
+ // One component: the graph root.
+ if !filepath.IsAbs(storeInfo[0]) {
+ return ErrPathNotAbsolute
+ }
+ } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" {
+ // Two components: the driver type and the graph root.
+ if !filepath.IsAbs(storeInfo[1]) {
+ return ErrPathNotAbsolute
+ }
+ } else {
+ // Anything else: scope specified in a form we don't
+ // recognize.
+ return ErrInvalidReference
+ }
+ // That might be all of it, and that's okay.
+ if scope == "" {
+ return nil
+ }
+ // But if there is anything left, it has to be a name, with or without
+ // a tag, with or without an ID, since we don't return namespace values
+ // that are just bare IDs.
+ scopeInfo := strings.SplitN(scope, "@", 2)
+ if len(scopeInfo) == 1 && scopeInfo[0] != "" {
+ _, err := reference.ParseNormalizedNamed(scopeInfo[0])
+ if err != nil {
+ return err
+ }
+ } else if len(scopeInfo) == 2 && scopeInfo[0] != "" && scopeInfo[1] != "" {
+ _, err := reference.ParseNormalizedNamed(scopeInfo[0])
+ if err != nil {
+ return err
+ }
+ _, err = digest.Parse("sha256:" + scopeInfo[1])
+ if err != nil {
+ return err
+ }
+ } else {
+ return ErrInvalidReference
+ }
+ return nil
+}
+
+func verboseName(r reference.Reference) string {
+ if r == nil {
+ return ""
+ }
+ named, isNamed := r.(reference.Named)
+ digested, isDigested := r.(reference.Digested)
+ tagged, isTagged := r.(reference.Tagged)
+ name := ""
+ tag := ""
+ sum := ""
+ if isNamed {
+ name = (reference.TrimNamed(named)).String()
+ }
+ if isTagged {
+ if tagged.Tag() != "" {
+ tag = ":" + tagged.Tag()
+ }
+ }
+ if isDigested {
+ if digested.Digest().Validate() == nil {
+ sum = "@" + digested.Digest().String()
+ }
+ }
+ return name + tag + sum
+}
diff --git a/vendor/github.com/containers/image/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/transports/alltransports/alltransports.go
new file mode 100644
index 000000000..4279b9d23
--- /dev/null
+++ b/vendor/github.com/containers/image/transports/alltransports/alltransports.go
@@ -0,0 +1,34 @@
+package alltransports
+
+import (
+ "strings"
+
+ // register all known transports
+ // NOTE: Make sure docs/policy.json.md is updated when adding or updating
+ // a transport.
+ _ "github.com/containers/image/directory"
+ _ "github.com/containers/image/docker"
+ _ "github.com/containers/image/docker/archive"
+ _ "github.com/containers/image/docker/daemon"
+ _ "github.com/containers/image/oci/archive"
+ _ "github.com/containers/image/oci/layout"
+ _ "github.com/containers/image/openshift"
+ // The ostree transport is registered by ostree*.go
+ _ "github.com/containers/image/storage"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+)
+
+// ParseImageName converts a URL-like image name to a types.ImageReference.
+func ParseImageName(imgName string) (types.ImageReference, error) {
+ parts := strings.SplitN(imgName, ":", 2)
+ if len(parts) != 2 {
+ return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
+ }
+ transport := transports.Get(parts[0])
+ if transport == nil {
+ return nil, errors.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0])
+ }
+ return transport.ParseReference(parts[1])
+}
diff --git a/vendor/github.com/containers/image/transports/alltransports/ostree.go b/vendor/github.com/containers/image/transports/alltransports/ostree.go
new file mode 100644
index 000000000..0fc5d7ef7
--- /dev/null
+++ b/vendor/github.com/containers/image/transports/alltransports/ostree.go
@@ -0,0 +1,8 @@
+// +build !containers_image_ostree_stub
+
+package alltransports
+
+import (
+ // Register the ostree transport
+ _ "github.com/containers/image/ostree"
+)
diff --git a/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go b/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go
new file mode 100644
index 000000000..8b01afe7c
--- /dev/null
+++ b/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go
@@ -0,0 +1,9 @@
+// +build containers_image_ostree_stub
+
+package alltransports
+
+import "github.com/containers/image/transports"
+
+func init() {
+ transports.Register(transports.NewStubTransport("ostree"))
+}
diff --git a/vendor/github.com/containers/image/transports/stub.go b/vendor/github.com/containers/image/transports/stub.go
new file mode 100644
index 000000000..087f69b6e
--- /dev/null
+++ b/vendor/github.com/containers/image/transports/stub.go
@@ -0,0 +1,36 @@
+package transports
+
+import (
+ "fmt"
+
+ "github.com/containers/image/types"
+)
+
+// stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”.
+type stubTransport string
+
+// NewStubTransport returns an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”.
+func NewStubTransport(name string) types.ImageTransport {
+ return stubTransport(name)
+}
+
+// Name returns the name of the transport, which must be unique among other transports.
+func (s stubTransport) Name() string {
+ return string(s)
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (s stubTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return nil, fmt.Errorf(`The transport "%s:" is not supported in this build`, string(s))
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (s stubTransport) ValidatePolicyConfigurationScope(scope string) error {
+ // Allowing any reference in here allows tools with some transports stubbed-out to still
+ // use signature verification policies which refer to these stubbed-out transports.
+ // See also the treatment of unknown transports in policyTransportScopesWithTransport.UnmarshalJSON .
+ return nil
+}
diff --git a/vendor/github.com/containers/image/transports/transports.go b/vendor/github.com/containers/image/transports/transports.go
new file mode 100644
index 000000000..687d0a44e
--- /dev/null
+++ b/vendor/github.com/containers/image/transports/transports.go
@@ -0,0 +1,90 @@
+package transports
+
+import (
+ "fmt"
+ "sort"
+ "sync"
+
+ "github.com/containers/image/types"
+)
+
+// knownTransports is a registry of known ImageTransport instances.
+type knownTransports struct {
+ transports map[string]types.ImageTransport
+ mu sync.Mutex
+}
+
+func (kt *knownTransports) Get(k string) types.ImageTransport {
+ kt.mu.Lock()
+ t := kt.transports[k]
+ kt.mu.Unlock()
+ return t
+}
+
+func (kt *knownTransports) Remove(k string) {
+ kt.mu.Lock()
+ delete(kt.transports, k)
+ kt.mu.Unlock()
+}
+
+func (kt *knownTransports) Add(t types.ImageTransport) {
+ kt.mu.Lock()
+ defer kt.mu.Unlock()
+ name := t.Name()
+ if t := kt.transports[name]; t != nil {
+ panic(fmt.Sprintf("Duplicate image transport name %s", name))
+ }
+ kt.transports[name] = t
+}
+
+var kt *knownTransports
+
+func init() {
+ kt = &knownTransports{
+ transports: make(map[string]types.ImageTransport),
+ }
+}
+
+// Get returns the transport specified by name or nil when unavailable.
+func Get(name string) types.ImageTransport {
+ return kt.Get(name)
+}
+
+// Delete deletes a transport from the registered transports.
+func Delete(name string) {
+ kt.Remove(name)
+}
+
+// Register registers a transport.
+func Register(t types.ImageTransport) {
+ kt.Add(t)
+}
+
+// ImageName converts a types.ImageReference into an URL-like image name, which MUST be such that
+// ParseImageName(ImageName(reference)) returns an equivalent reference.
+//
+// This is the generally recommended way to refer to images in the UI.
+//
+// NOTE: The returned string is not promised to be equal to the original input to ParseImageName;
+// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+func ImageName(ref types.ImageReference) string {
+ return ref.Transport().Name() + ":" + ref.StringWithinTransport()
+}
+
+// ListNames returns a list of non deprecated transport names.
+// Deprecated transports can be used, but are not presented to users.
+func ListNames() []string {
+ kt.mu.Lock()
+ defer kt.mu.Unlock()
+ deprecated := map[string]bool{
+ "atomic": true,
+ }
+ var names []string
+ for _, transport := range kt.transports {
+ if !deprecated[transport.Name()] {
+ names = append(names, transport.Name())
+ }
+ }
+ sort.Strings(names)
+ return names
+}
diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go
new file mode 100644
index 000000000..bae7319ac
--- /dev/null
+++ b/vendor/github.com/containers/image/types/types.go
@@ -0,0 +1,354 @@
+package types
+
+import (
+ "context"
+ "io"
+ "time"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/opencontainers/go-digest"
+ "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// ImageTransport is a top-level namespace for ways to to store/load an image.
+// It should generally correspond to ImageSource/ImageDestination implementations.
+//
+// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport.
+// For example, all Docker References would be used within a single "docker" transport, regardless of whether the images are pulled over HTTP or HTTPS
+// (or, even, IPv4 or IPv6).
+//
+// OTOH all images using the same transport should (apart from versions of the image format), be interoperable.
+// For example, several different ImageTransport implementations may be based on local filesystem paths,
+// but using completely different formats for the contents of that path (a single tar file, a directory containing tarballs, a fully expanded container filesystem, ...)
+//
+// See also transports.KnownTransports.
+type ImageTransport interface {
+ // Name returns the name of the transport, which must be unique among other transports.
+ Name() string
+ // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+ ParseReference(reference string) (ImageReference, error)
+ // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+ // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+ // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+ // scope passed to this function will not be "", that value is always allowed.
+ ValidatePolicyConfigurationScope(scope string) error
+}
+
+// ImageReference is an abstracted way to refer to an image location, namespaced within an ImageTransport.
+//
+// The object should preferably be immutable after creation, with any parsing/state-dependent resolving happening
+// within an ImageTransport.ParseReference() or equivalent API creating the reference object.
+// That's also why the various identification/formatting methods of this type do not support returning errors.
+//
+// WARNING: While this design freezes the content of the reference within this process, it can not freeze the outside
+// world: paths may be replaced by symlinks elsewhere, HTTP APIs may start returning different results, and so on.
+type ImageReference interface {
+ Transport() ImageTransport
+ // StringWithinTransport returns a string representation of the reference, which MUST be such that
+ // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+ // NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+ // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+ // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
+ // instead, see transports.ImageName().
+ StringWithinTransport() string
+
+ // DockerReference returns a Docker reference associated with this reference
+ // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+ // not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+ DockerReference() reference.Named
+
+ // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+ // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+ // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+ // (i.e. various references with exactly the same semantics should return the same configuration identity)
+ // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+ // not required/guaranteed that it will be a valid input to Transport().ParseReference().
+ // Returns "" if configuration identities for these references are not supported.
+ PolicyConfigurationIdentity() string
+
+ // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+ // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+ // in order, terminating on first match, and an implicit "" is always checked at the end.
+ // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+ // and each following element to be a prefix of the element preceding it.
+ PolicyConfigurationNamespaces() []string
+
+ // NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
+ // The caller must call .Close() on the returned Image.
+ // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+ // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+ NewImage(ctx *SystemContext) (Image, error)
+ // NewImageSource returns a types.ImageSource for this reference.
+ // The caller must call .Close() on the returned ImageSource.
+ NewImageSource(ctx *SystemContext) (ImageSource, error)
+ // NewImageDestination returns a types.ImageDestination for this reference.
+ // The caller must call .Close() on the returned ImageDestination.
+ NewImageDestination(ctx *SystemContext) (ImageDestination, error)
+
+ // DeleteImage deletes the named image from the registry, if supported.
+ DeleteImage(ctx *SystemContext) error
+}
+
+// BlobInfo collects known information about a blob (layer/config).
+// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that.
+type BlobInfo struct {
+ Digest digest.Digest // "" if unknown.
+ Size int64 // -1 if unknown
+ URLs []string
+ Annotations map[string]string
+ MediaType string
+}
+
+// ImageSource is a service, possibly remote (= slow), to download components of a single image.
+// This is primarily useful for copying images around; for examining their properties, Image (below)
+// is usually more useful.
+// Each ImageSource should eventually be closed by calling Close().
+//
+// WARNING: Various methods which return an object identified by digest generally do not
+// validate that the returned data actually matches that digest; this is the caller’s responsibility.
+type ImageSource interface {
+ // Reference returns the reference used to set up this source, _as specified by the user_
+ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+ Reference() ImageReference
+ // Close removes resources associated with an initialized ImageSource, if any.
+ Close() error
+ // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+ // It may use a remote (= slow) service.
+ GetManifest() ([]byte, string, error)
+ // GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest
+ // out of a manifest list.
+ GetTargetManifest(digest digest.Digest) ([]byte, string, error)
+ // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+ // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+ GetBlob(BlobInfo) (io.ReadCloser, int64, error)
+ // GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+ GetSignatures(context.Context) ([][]byte, error)
+ // UpdatedLayerInfos returns either nil (meaning there are no updates), or updated values for the layer blobsums that are listed in the image's manifest.
+ // The Digest field is guaranteed to be provided; Size may be -1.
+ // WARNING: The list may contain duplicates, and they are semantically relevant.
+ UpdatedLayerInfos() []BlobInfo
+}
+
+// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
+//
+// There is a specific required order for some of the calls:
+// PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time)
+// ReapplyBlob, if used, MUST only be called if HasBlob returned true for the same blob digest
+// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents)
+// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist.
+//
+// Each ImageDestination should eventually be closed by calling Close().
+type ImageDestination interface {
+ // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+ // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+ Reference() ImageReference
+ // Close removes resources associated with an initialized ImageDestination, if any.
+ Close() error
+
+ // SupportedManifestMIMETypes tells which manifest mime types the destination supports
+ // If an empty slice or nil it's returned, then any mime type can be tried to upload
+ SupportedManifestMIMETypes() []string
+ // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+ // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+ SupportsSignatures() error
+ // ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
+ ShouldCompressLayers() bool
+ // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+ // uploaded to the image destination, true otherwise.
+ AcceptsForeignLayerURLs() bool
+ // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+ MustMatchRuntimeOS() bool
+ // PutBlob writes contents of stream and returns data representing the result.
+ // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+ // inputInfo.Size is the expected length of stream, if known.
+ // inputInfo.MediaType describes the blob format, if known.
+ // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+ // to any other readers for download using the supplied digest.
+ // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+ PutBlob(stream io.Reader, inputInfo BlobInfo) (BlobInfo, error)
+ // HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
+ // Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
+ // If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
+ // it returns a non-nil error only on an unexpected failure.
+ HasBlob(info BlobInfo) (bool, int64, error)
+ // ReapplyBlob informs the image destination that a blob for which HasBlob previously returned true would have been passed to PutBlob if it had returned false. Like HasBlob and unlike PutBlob, the digest can not be empty. If the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree.
+ ReapplyBlob(info BlobInfo) (BlobInfo, error)
+ // PutManifest writes manifest to the destination.
+ // FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+ // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+ // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+ PutManifest(manifest []byte) error
+ PutSignatures(signatures [][]byte) error
+ // Commit marks the process of storing the image as successful and asks for the image to be persisted.
+ // WARNING: This does not have any transactional semantics:
+ // - Uploaded data MAY be visible to others before Commit() is called
+ // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+ Commit() error
+}
+
+// ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available,
+// refuses specifically this manifest type, but may accept a different manifest type.
+type ManifestTypeRejectedError struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise.
+ Err error
+}
+
+func (e ManifestTypeRejectedError) Error() string {
+ return e.Err.Error()
+}
+
+// UnparsedImage is an Image-to-be; until it is verified and accepted, it only caries its identity and caches manifest and signature blobs.
+// Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them,
+// allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else.
+// This also makes the UnparsedImage→Image conversion an explicitly visible step.
+// Each UnparsedImage should eventually be closed by calling Close().
+type UnparsedImage interface {
+ // Reference returns the reference used to set up this source, _as specified by the user_
+ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+ Reference() ImageReference
+ // Close removes resources associated with an initialized UnparsedImage, if any.
+ Close() error
+ // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
+ Manifest() ([]byte, string, error)
+ // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
+ Signatures(ctx context.Context) ([][]byte, error)
+ // UpdatedLayerInfos returns either nil (meaning there are no updates), or updated values for the layer blobsums that are listed in the image's manifest.
+ // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+ // WARNING: The list may contain duplicates, and they are semantically relevant.
+ UpdatedLayerInfos() []BlobInfo
+}
+
+// Image is the primary API for inspecting properties of images.
+// Each Image should eventually be closed by calling Close().
+type Image interface {
+ // Note that Reference may return nil in the return value of UpdatedImage!
+ UnparsedImage
+ // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+ // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+ ConfigInfo() BlobInfo
+ // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise.
+ // The result is cached; it is OK to call this however often you need.
+ ConfigBlob() ([]byte, error)
+ // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+ // layers in the resulting configuration isn't guaranteed to be returned to due how
+ // old image manifests work (docker v2s1 especially).
+ OCIConfig() (*v1.Image, error)
+ // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+ // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+ // WARNING: The list may contain duplicates, and they are semantically relevant.
+ LayerInfos() []BlobInfo
+ // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+ // It returns false if the manifest does not embed a Docker reference.
+ // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+ EmbeddedDockerReferenceConflicts(ref reference.Named) bool
+ // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+ Inspect() (*ImageInspectInfo, error)
+ // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+ // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+ // (most importantly it forces us to download the full layers even if they are already present at the destination).
+ UpdatedImageNeedsLayerDiffIDs(options ManifestUpdateOptions) bool
+ // UpdatedImage returns a types.Image modified according to options.
+ // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired.
+ // This does not change the state of the original Image object.
+ UpdatedImage(options ManifestUpdateOptions) (Image, error)
+ // IsMultiImage returns true if the image's manifest is a list of images, false otherwise.
+ IsMultiImage() bool
+ // Size returns an approximation of the amount of disk space which is consumed by the image in its current
+ // location. If the size is not known, -1 will be returned.
+ Size() (int64, error)
+}
+
+// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest
+type ManifestUpdateOptions struct {
+ LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored.
+ EmbeddedDockerReference reference.Named
+ ManifestMIMEType string
+ // The values below are NOT requests to modify the image; they provide optional context which may or may not be used.
+ InformationOnly ManifestUpdateInformation
+}
+
+// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here
+// only to make writing struct literals possible.
+type ManifestUpdateInformation struct {
+ Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go)
+ LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers)
+ LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order.
+}
+
+// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration.
+// The Tag field is a legacy field which is here just for the Docker v2s1 manifest. It won't be supported
+// for other manifest types.
+type ImageInspectInfo struct {
+ Tag string
+ Created time.Time
+ DockerVersion string
+ Labels map[string]string
+ Architecture string
+ Os string
+ Layers []string
+}
+
+// DockerAuthConfig contains authorization information for connecting to a registry.
+type DockerAuthConfig struct {
+ Username string
+ Password string
+}
+
+// SystemContext allows parametrizing access to implicitly-accessed resources,
+// like configuration files in /etc and users' login state in their home directory.
+// Various components can share the same field only if their semantics is exactly
+// the same; if in doubt, add a new field.
+// It is always OK to pass nil instead of a SystemContext.
+type SystemContext struct {
+ // If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/).
+ // Not used for any of the more specific path overrides available in this struct.
+ // Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it).
+ // NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths .
+ // and there is no need to worry about the environment.)
+ // NOTE: This does NOT affect paths starting by $HOME.
+ RootForImplicitAbsolutePaths string
+
+ // === Global configuration overrides ===
+ // If not "", overrides the system's default path for signature.Policy configuration.
+ SignaturePolicyPath string
+ // If not "", overrides the system's default path for registries.d (Docker signature storage configuration)
+ RegistriesDirPath string
+ // Path to the system-wide registries configuration file
+ SystemRegistriesConfPath string
+ // If not "", overrides the default path for the authentication file
+ AuthFilePath string
+
+ // === OCI.Transport overrides ===
+ // If not "", a directory containing a CA certificate (ending with ".crt"),
+ // a client certificate (ending with ".cert") and a client ceritificate key
+ // (ending with ".key") used when downloading OCI image layers.
+ OCICertPath string
+ // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
+ OCIInsecureSkipTLSVerify bool
+
+ // === docker.Transport overrides ===
+ // If not "", a directory containing a CA certificate (ending with ".crt"),
+ // a client certificate (ending with ".cert") and a client ceritificate key
+ // (ending with ".key") used when talking to a Docker Registry.
+ DockerCertPath string
+ // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above.
+ // Ignored if DockerCertPath is non-empty.
+ DockerPerHostCertDirPath string
+ DockerInsecureSkipTLSVerify bool // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
+ // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials
+ DockerAuthConfig *DockerAuthConfig
+ // if not "", an User-Agent header is added to each request when contacting a registry.
+ DockerRegistryUserAgent string
+ // if true, a V1 ping attempt isn't done to give users a better error. Default is false.
+ // Note that this field is used mainly to integrate containers/image into projectatomic/docker
+ // in order to not break any existing docker's integration tests.
+ DockerDisableV1Ping bool
+ // Directory to use for OSTree temporary files
+ OSTreeTmpDirPath string
+}
+
+// ProgressProperties is used to pass information from the copy code to a monitor which
+// can use the real-time information to produce output or react to changes.
+type ProgressProperties struct {
+ Artifact BlobInfo
+ Offset uint64
+}
diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf
new file mode 100644
index 000000000..3263f5800
--- /dev/null
+++ b/vendor/github.com/containers/image/vendor.conf
@@ -0,0 +1,40 @@
+github.com/sirupsen/logrus v1.0.0
+github.com/containers/storage 9e0c323a4b425557f8310ee8d125634acd39d8f5
+github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
+github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
+github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
+github.com/docker/docker 30eb4d8cdc422b023d5f11f29a82ecb73554183b
+github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
+github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
+github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
+github.com/ghodss/yaml 04f313413ffd65ce25f2541bfd2b2ceec5c0908c
+github.com/gorilla/context 08b5f424b9271eedf6f9f0ce86cb9396ed337a42
+github.com/gorilla/mux 94e7d24fd285520f3d12ae998f7fdd6b5393d453
+github.com/imdario/mergo 50d4dbd4eb0e84778abe37cefef140271d96fade
+github.com/mattn/go-runewidth 14207d285c6c197daabb5c9793d63e7af9ab2d50
+github.com/mattn/go-shellwords 005a0944d84452842197c2108bd9168ced206f78
+github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
+github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
+github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc
+github.com/opencontainers/image-spec v1.0.0
+github.com/opencontainers/runc 6b1d0e76f239ffb435445e5ae316d2676c07c6e3
+github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
+github.com/pkg/errors 248dadf4e9068a0b3e79f02ed0a610d935de5302
+github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
+github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
+github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721
+golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8
+golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe
+golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08
+gopkg.in/cheggaaa/pb.v1 d7e6ca3010b6f084d8056847f55d7f572f180678
+gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a
+k8s.io/client-go bcde30fb7eaed76fd98a36b4120321b94995ffb6
+github.com/xeipuuv/gojsonschema master
+github.com/xeipuuv/gojsonreference master
+github.com/xeipuuv/gojsonpointer master
+github.com/tchap/go-patricia v2.2.6
+github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
+github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0
+github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
+github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8
+github.com/pquerna/ffjson master
diff --git a/vendor/github.com/containers/image/version/version.go b/vendor/github.com/containers/image/version/version.go
new file mode 100644
index 000000000..6644bcff3
--- /dev/null
+++ b/vendor/github.com/containers/image/version/version.go
@@ -0,0 +1,18 @@
+package version
+
+import "fmt"
+
+const (
+ // VersionMajor is for an API incompatible changes
+ VersionMajor = 0
+ // VersionMinor is for functionality in a backwards-compatible manner
+ VersionMinor = 1
+ // VersionPatch is for backwards-compatible bug fixes
+ VersionPatch = 0
+
+ // VersionDev indicates development branch. Releases will be empty string.
+ VersionDev = "-dev"
+)
+
+// Version is the specification version that the package types support.
+var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev)
diff --git a/vendor/github.com/containers/storage/LICENSE b/vendor/github.com/containers/storage/LICENSE
new file mode 100644
index 000000000..8f3fee627
--- /dev/null
+++ b/vendor/github.com/containers/storage/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2013-2016 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/containers/storage/NOTICE b/vendor/github.com/containers/storage/NOTICE
new file mode 100644
index 000000000..8a37c1c7b
--- /dev/null
+++ b/vendor/github.com/containers/storage/NOTICE
@@ -0,0 +1,19 @@
+Docker
+Copyright 2012-2016 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (https://www.docker.com).
+
+This product contains software (https://github.com/kr/pty) developed
+by Keith Rarick, licensed under the MIT License.
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see https://www.bis.doc.gov
+
+See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/vendor/github.com/containers/storage/README.md b/vendor/github.com/containers/storage/README.md
new file mode 100644
index 000000000..00a47fd98
--- /dev/null
+++ b/vendor/github.com/containers/storage/README.md
@@ -0,0 +1,43 @@
+`storage` is a Go library which aims to provide methods for storing filesystem
+layers, container images, and containers. A `containers-storage` CLI wrapper
+is also included for manual and scripting use.
+
+To build the CLI wrapper, use 'make build-binary'.
+
+Operations which use VMs expect to launch them using 'vagrant', defaulting to
+using its 'libvirt' provider. The boxes used are also available for the
+'virtualbox' provider, and can be selected by setting $VAGRANT_PROVIDER to
+'virtualbox' before kicking off the build.
+
+The library manages three types of items: layers, images, and containers.
+
+A *layer* is a copy-on-write filesystem which is notionally stored as a set of
+changes relative to its *parent* layer, if it has one. A given layer can only
+have one parent, but any layer can be the parent of multiple layers. Layers
+which are parents of other layers should be treated as read-only.
+
+An *image* is a reference to a particular layer (its _top_ layer), along with
+other information which the library can manage for the convenience of its
+caller. This information typically includes configuration templates for
+running a binary contained within the image's layers, and may include
+cryptographic signatures. Multiple images can reference the same layer, as the
+differences between two images may not be in their layer contents.
+
+A *container* is a read-write layer which is a child of an image's top layer,
+along with information which the library can manage for the convenience of its
+caller. This information typically includes configuration information for
+running the specific container. Multiple containers can be derived from a
+single image.
+
+Layers, images, and containers are represented primarily by 32 character
+hexadecimal IDs, but items of each kind can also have one or more arbitrary
+names attached to them, which the library will automatically resolve to IDs
+when they are passed in to API calls which expect IDs.
+
+The library can store what it calls *metadata* for each of these types of
+items. This is expected to be a small piece of data, since it is cached in
+memory and stored along with the library's own bookkeeping information.
+
+Additionally, the library can store one or more of what it calls *big data* for
+images and containers. This is a named chunk of larger data, which is only in
+memory when it is being read from or being written to its own disk file.
diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go
new file mode 100644
index 000000000..5631e31c3
--- /dev/null
+++ b/vendor/github.com/containers/storage/containers.go
@@ -0,0 +1,531 @@
+package storage
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/stringid"
+ "github.com/containers/storage/pkg/truncindex"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+// A Container is a reference to a read-write layer with metadata.
+type Container struct {
+ // ID is either one which was specified at create-time, or a random
+ // value which was generated by the library.
+ ID string `json:"id"`
+
+ // Names is an optional set of user-defined convenience values. The
+ // container can be referred to by its ID or any of its names. Names
+ // are unique among containers.
+ Names []string `json:"names,omitempty"`
+
+ // ImageID is the ID of the image which was used to create the container.
+ ImageID string `json:"image"`
+
+ // LayerID is the ID of the read-write layer for the container itself.
+ // It is assumed that the image's top layer is the parent of the container's
+ // read-write layer.
+ LayerID string `json:"layer"`
+
+ // Metadata is data we keep for the convenience of the caller. It is not
+ // expected to be large, since it is kept in memory.
+ Metadata string `json:"metadata,omitempty"`
+
+ // BigDataNames is a list of names of data items that we keep for the
+ // convenience of the caller. They can be large, and are only in
+ // memory when being read from or written to disk.
+ BigDataNames []string `json:"big-data-names,omitempty"`
+
+ // BigDataSizes maps the names in BigDataNames to the sizes of the data
+ // that has been stored, if they're known.
+ BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"`
+
+ // BigDataDigests maps the names in BigDataNames to the digests of the
+ // data that has been stored, if they're known.
+ BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"`
+
+ // Created is the datestamp for when this container was created. Older
+ // versions of the library did not track this information, so callers
+ // will likely want to use the IsZero() method to verify that a value
+ // is set before using it.
+ Created time.Time `json:"created,omitempty"`
+
+ Flags map[string]interface{} `json:"flags,omitempty"`
+}
+
+// ContainerStore provides bookkeeping for information about Containers.
+type ContainerStore interface {
+ FileBasedStore
+ MetadataStore
+ BigDataStore
+ FlaggableStore
+
+ // Create creates a container that has a specified ID (or generates a
+ // random one if an empty value is supplied) and optional names,
+ // based on the specified image, using the specified layer as its
+ // read-write layer.
+ Create(id string, names []string, image, layer, metadata string) (*Container, error)
+
+ // SetNames updates the list of names associated with the container
+ // with the specified ID.
+ SetNames(id string, names []string) error
+
+ // Get retrieves information about a container given an ID or name.
+ Get(id string) (*Container, error)
+
+ // Exists checks if there is a container with the given ID or name.
+ Exists(id string) bool
+
+ // Delete removes the record of the container.
+ Delete(id string) error
+
+ // Wipe removes records of all containers.
+ Wipe() error
+
+ // Lookup attempts to translate a name to an ID. Most methods do this
+ // implicitly.
+ Lookup(name string) (string, error)
+
+ // Containers returns a slice enumerating the known containers.
+ Containers() ([]Container, error)
+}
+
+type containerStore struct {
+ lockfile Locker
+ dir string
+ containers []*Container
+ idindex *truncindex.TruncIndex
+ byid map[string]*Container
+ bylayer map[string]*Container
+ byname map[string]*Container
+}
+
+func (r *containerStore) Containers() ([]Container, error) {
+ containers := make([]Container, len(r.containers))
+ for i := range r.containers {
+ containers[i] = *(r.containers[i])
+ }
+ return containers, nil
+}
+
+func (r *containerStore) containerspath() string {
+ return filepath.Join(r.dir, "containers.json")
+}
+
+func (r *containerStore) datadir(id string) string {
+ return filepath.Join(r.dir, id)
+}
+
+func (r *containerStore) datapath(id, key string) string {
+ return filepath.Join(r.datadir(id), makeBigDataBaseName(key))
+}
+
+func (r *containerStore) Load() error {
+ needSave := false
+ rpath := r.containerspath()
+ data, err := ioutil.ReadFile(rpath)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ containers := []*Container{}
+ layers := make(map[string]*Container)
+ idlist := []string{}
+ ids := make(map[string]*Container)
+ names := make(map[string]*Container)
+ if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil {
+ idlist = make([]string, 0, len(containers))
+ for n, container := range containers {
+ idlist = append(idlist, container.ID)
+ ids[container.ID] = containers[n]
+ layers[container.LayerID] = containers[n]
+ for _, name := range container.Names {
+ if conflict, ok := names[name]; ok {
+ r.removeName(conflict, name)
+ needSave = true
+ }
+ names[name] = containers[n]
+ }
+ }
+ }
+ r.containers = containers
+ r.idindex = truncindex.NewTruncIndex(idlist)
+ r.byid = ids
+ r.bylayer = layers
+ r.byname = names
+ if needSave {
+ return r.Save()
+ }
+ return nil
+}
+
+func (r *containerStore) Save() error {
+ rpath := r.containerspath()
+ if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
+ return err
+ }
+ jdata, err := json.Marshal(&r.containers)
+ if err != nil {
+ return err
+ }
+ defer r.Touch()
+ return ioutils.AtomicWriteFile(rpath, jdata, 0600)
+}
+
+func newContainerStore(dir string) (ContainerStore, error) {
+ if err := os.MkdirAll(dir, 0700); err != nil {
+ return nil, err
+ }
+ lockfile, err := GetLockfile(filepath.Join(dir, "containers.lock"))
+ if err != nil {
+ return nil, err
+ }
+ lockfile.Lock()
+ defer lockfile.Unlock()
+ cstore := containerStore{
+ lockfile: lockfile,
+ dir: dir,
+ containers: []*Container{},
+ byid: make(map[string]*Container),
+ bylayer: make(map[string]*Container),
+ byname: make(map[string]*Container),
+ }
+ if err := cstore.Load(); err != nil {
+ return nil, err
+ }
+ return &cstore, nil
+}
+
+func (r *containerStore) lookup(id string) (*Container, bool) {
+ if container, ok := r.byid[id]; ok {
+ return container, ok
+ } else if container, ok := r.byname[id]; ok {
+ return container, ok
+ } else if container, ok := r.bylayer[id]; ok {
+ return container, ok
+ } else if longid, err := r.idindex.Get(id); err == nil {
+ if container, ok := r.byid[longid]; ok {
+ return container, ok
+ }
+ }
+ return nil, false
+}
+
+func (r *containerStore) ClearFlag(id string, flag string) error {
+ container, ok := r.lookup(id)
+ if !ok {
+ return ErrContainerUnknown
+ }
+ delete(container.Flags, flag)
+ return r.Save()
+}
+
+func (r *containerStore) SetFlag(id string, flag string, value interface{}) error {
+ container, ok := r.lookup(id)
+ if !ok {
+ return ErrContainerUnknown
+ }
+ if container.Flags == nil {
+ container.Flags = make(map[string]interface{})
+ }
+ container.Flags[flag] = value
+ return r.Save()
+}
+
+func (r *containerStore) Create(id string, names []string, image, layer, metadata string) (container *Container, err error) {
+ if id == "" {
+ id = stringid.GenerateRandomID()
+ _, idInUse := r.byid[id]
+ for idInUse {
+ id = stringid.GenerateRandomID()
+ _, idInUse = r.byid[id]
+ }
+ }
+ if _, idInUse := r.byid[id]; idInUse {
+ return nil, ErrDuplicateID
+ }
+ names = dedupeNames(names)
+ for _, name := range names {
+ if _, nameInUse := r.byname[name]; nameInUse {
+ return nil, ErrDuplicateName
+ }
+ }
+ if err == nil {
+ container = &Container{
+ ID: id,
+ Names: names,
+ ImageID: image,
+ LayerID: layer,
+ Metadata: metadata,
+ BigDataNames: []string{},
+ BigDataSizes: make(map[string]int64),
+ BigDataDigests: make(map[string]digest.Digest),
+ Created: time.Now().UTC(),
+ Flags: make(map[string]interface{}),
+ }
+ r.containers = append(r.containers, container)
+ r.byid[id] = container
+ r.idindex.Add(id)
+ r.bylayer[layer] = container
+ for _, name := range names {
+ r.byname[name] = container
+ }
+ err = r.Save()
+ }
+ return container, err
+}
+
+func (r *containerStore) Metadata(id string) (string, error) {
+ if container, ok := r.lookup(id); ok {
+ return container.Metadata, nil
+ }
+ return "", ErrContainerUnknown
+}
+
+func (r *containerStore) SetMetadata(id, metadata string) error {
+ if container, ok := r.lookup(id); ok {
+ container.Metadata = metadata
+ return r.Save()
+ }
+ return ErrContainerUnknown
+}
+
+func (r *containerStore) removeName(container *Container, name string) {
+ container.Names = stringSliceWithoutValue(container.Names, name)
+}
+
+func (r *containerStore) SetNames(id string, names []string) error {
+ names = dedupeNames(names)
+ if container, ok := r.lookup(id); ok {
+ for _, name := range container.Names {
+ delete(r.byname, name)
+ }
+ for _, name := range names {
+ if otherContainer, ok := r.byname[name]; ok {
+ r.removeName(otherContainer, name)
+ }
+ r.byname[name] = container
+ }
+ container.Names = names
+ return r.Save()
+ }
+ return ErrContainerUnknown
+}
+
+func (r *containerStore) Delete(id string) error {
+ container, ok := r.lookup(id)
+ if !ok {
+ return ErrContainerUnknown
+ }
+ id = container.ID
+ toDeleteIndex := -1
+ for i, candidate := range r.containers {
+ if candidate.ID == id {
+ toDeleteIndex = i
+ break
+ }
+ }
+ delete(r.byid, id)
+ r.idindex.Delete(id)
+ delete(r.bylayer, container.LayerID)
+ for _, name := range container.Names {
+ delete(r.byname, name)
+ }
+ if toDeleteIndex != -1 {
+ // delete the container at toDeleteIndex
+ if toDeleteIndex == len(r.containers)-1 {
+ r.containers = r.containers[:len(r.containers)-1]
+ } else {
+ r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...)
+ }
+ }
+ if err := r.Save(); err != nil {
+ return err
+ }
+ if err := os.RemoveAll(r.datadir(id)); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (r *containerStore) Get(id string) (*Container, error) {
+ if container, ok := r.lookup(id); ok {
+ return container, nil
+ }
+ return nil, ErrContainerUnknown
+}
+
+func (r *containerStore) Lookup(name string) (id string, err error) {
+ if container, ok := r.lookup(name); ok {
+ return container.ID, nil
+ }
+ return "", ErrContainerUnknown
+}
+
+func (r *containerStore) Exists(id string) bool {
+ _, ok := r.lookup(id)
+ return ok
+}
+
+func (r *containerStore) BigData(id, key string) ([]byte, error) {
+ if key == "" {
+ return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve container big data value for empty name")
+ }
+ c, ok := r.lookup(id)
+ if !ok {
+ return nil, ErrContainerUnknown
+ }
+ return ioutil.ReadFile(r.datapath(c.ID, key))
+}
+
+func (r *containerStore) BigDataSize(id, key string) (int64, error) {
+ if key == "" {
+ return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of container big data with empty name")
+ }
+ c, ok := r.lookup(id)
+ if !ok {
+ return -1, ErrContainerUnknown
+ }
+ if c.BigDataSizes == nil {
+ c.BigDataSizes = make(map[string]int64)
+ }
+ if size, ok := c.BigDataSizes[key]; ok {
+ return size, nil
+ }
+ if data, err := r.BigData(id, key); err == nil && data != nil {
+ if r.SetBigData(id, key, data) == nil {
+ c, ok := r.lookup(id)
+ if !ok {
+ return -1, ErrContainerUnknown
+ }
+ if size, ok := c.BigDataSizes[key]; ok {
+ return size, nil
+ }
+ }
+ }
+ return -1, ErrSizeUnknown
+}
+
+func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
+ if key == "" {
+ return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of container big data value with empty name")
+ }
+ c, ok := r.lookup(id)
+ if !ok {
+ return "", ErrContainerUnknown
+ }
+ if c.BigDataDigests == nil {
+ c.BigDataDigests = make(map[string]digest.Digest)
+ }
+ if d, ok := c.BigDataDigests[key]; ok {
+ return d, nil
+ }
+ if data, err := r.BigData(id, key); err == nil && data != nil {
+ if r.SetBigData(id, key, data) == nil {
+ c, ok := r.lookup(id)
+ if !ok {
+ return "", ErrContainerUnknown
+ }
+ if d, ok := c.BigDataDigests[key]; ok {
+ return d, nil
+ }
+ }
+ }
+ return "", ErrDigestUnknown
+}
+
+func (r *containerStore) BigDataNames(id string) ([]string, error) {
+ c, ok := r.lookup(id)
+ if !ok {
+ return nil, ErrContainerUnknown
+ }
+ return c.BigDataNames, nil
+}
+
+func (r *containerStore) SetBigData(id, key string, data []byte) error {
+ if key == "" {
+ return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for container big data item")
+ }
+ c, ok := r.lookup(id)
+ if !ok {
+ return ErrContainerUnknown
+ }
+ if err := os.MkdirAll(r.datadir(c.ID), 0700); err != nil {
+ return err
+ }
+ err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0600)
+ if err == nil {
+ save := false
+ if c.BigDataSizes == nil {
+ c.BigDataSizes = make(map[string]int64)
+ }
+ oldSize, sizeOk := c.BigDataSizes[key]
+ c.BigDataSizes[key] = int64(len(data))
+ if c.BigDataDigests == nil {
+ c.BigDataDigests = make(map[string]digest.Digest)
+ }
+ oldDigest, digestOk := c.BigDataDigests[key]
+ newDigest := digest.Canonical.FromBytes(data)
+ c.BigDataDigests[key] = newDigest
+ if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
+ save = true
+ }
+ addName := true
+ for _, name := range c.BigDataNames {
+ if name == key {
+ addName = false
+ break
+ }
+ }
+ if addName {
+ c.BigDataNames = append(c.BigDataNames, key)
+ save = true
+ }
+ if save {
+ err = r.Save()
+ }
+ }
+ return err
+}
+
+func (r *containerStore) Wipe() error {
+ ids := make([]string, 0, len(r.byid))
+ for id := range r.byid {
+ ids = append(ids, id)
+ }
+ for _, id := range ids {
+ if err := r.Delete(id); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *containerStore) Lock() {
+ r.lockfile.Lock()
+}
+
+func (r *containerStore) Unlock() {
+ r.lockfile.Unlock()
+}
+
+func (r *containerStore) Touch() error {
+ return r.lockfile.Touch()
+}
+
+func (r *containerStore) Modified() (bool, error) {
+ return r.lockfile.Modified()
+}
+
+func (r *containerStore) IsReadWrite() bool {
+ return r.lockfile.IsReadWrite()
+}
+
+func (r *containerStore) TouchedSince(when time.Time) bool {
+ return r.lockfile.TouchedSince(when)
+}
diff --git a/vendor/github.com/containers/storage/containers_ffjson.go b/vendor/github.com/containers/storage/containers_ffjson.go
new file mode 100644
index 000000000..952619806
--- /dev/null
+++ b/vendor/github.com/containers/storage/containers_ffjson.go
@@ -0,0 +1,1194 @@
+// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
+// source: containers.go
+
+package storage
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "github.com/opencontainers/go-digest"
+ fflib "github.com/pquerna/ffjson/fflib/v1"
+)
+
+// MarshalJSON marshal bytes to json - template
+func (j *Container) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *Container) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{ "id":`)
+ fflib.WriteJsonString(buf, string(j.ID))
+ buf.WriteByte(',')
+ if len(j.Names) != 0 {
+ buf.WriteString(`"names":`)
+ if j.Names != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.Names {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ fflib.WriteJsonString(buf, string(v))
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ buf.WriteString(`"image":`)
+ fflib.WriteJsonString(buf, string(j.ImageID))
+ buf.WriteString(`,"layer":`)
+ fflib.WriteJsonString(buf, string(j.LayerID))
+ buf.WriteByte(',')
+ if len(j.Metadata) != 0 {
+ buf.WriteString(`"metadata":`)
+ fflib.WriteJsonString(buf, string(j.Metadata))
+ buf.WriteByte(',')
+ }
+ if len(j.BigDataNames) != 0 {
+ buf.WriteString(`"big-data-names":`)
+ if j.BigDataNames != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.BigDataNames {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ fflib.WriteJsonString(buf, string(v))
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.BigDataSizes) != 0 {
+ if j.BigDataSizes == nil {
+ buf.WriteString(`"big-data-sizes":null`)
+ } else {
+ buf.WriteString(`"big-data-sizes":{ `)
+ for key, value := range j.BigDataSizes {
+ fflib.WriteJsonString(buf, key)
+ buf.WriteString(`:`)
+ fflib.FormatBits2(buf, uint64(value), 10, value < 0)
+ buf.WriteByte(',')
+ }
+ buf.Rewind(1)
+ buf.WriteByte('}')
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.BigDataDigests) != 0 {
+ if j.BigDataDigests == nil {
+ buf.WriteString(`"big-data-digests":null`)
+ } else {
+ buf.WriteString(`"big-data-digests":{ `)
+ for key, value := range j.BigDataDigests {
+ fflib.WriteJsonString(buf, key)
+ buf.WriteString(`:`)
+ fflib.WriteJsonString(buf, string(value))
+ buf.WriteByte(',')
+ }
+ buf.Rewind(1)
+ buf.WriteByte('}')
+ }
+ buf.WriteByte(',')
+ }
+ if true {
+ buf.WriteString(`"created":`)
+
+ {
+
+ obj, err = j.Created.MarshalJSON()
+ if err != nil {
+ return err
+ }
+ buf.Write(obj)
+
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.Flags) != 0 {
+ buf.WriteString(`"flags":`)
+ /* Falling back. type=map[string]interface {} kind=map */
+ err = buf.Encode(j.Flags)
+ if err != nil {
+ return err
+ }
+ buf.WriteByte(',')
+ }
+ buf.Rewind(1)
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffjtContainerbase = iota
+ ffjtContainernosuchkey
+
+ ffjtContainerID
+
+ ffjtContainerNames
+
+ ffjtContainerImageID
+
+ ffjtContainerLayerID
+
+ ffjtContainerMetadata
+
+ ffjtContainerBigDataNames
+
+ ffjtContainerBigDataSizes
+
+ ffjtContainerBigDataDigests
+
+ ffjtContainerCreated
+
+ ffjtContainerFlags
+)
+
+var ffjKeyContainerID = []byte("id")
+
+var ffjKeyContainerNames = []byte("names")
+
+var ffjKeyContainerImageID = []byte("image")
+
+var ffjKeyContainerLayerID = []byte("layer")
+
+var ffjKeyContainerMetadata = []byte("metadata")
+
+var ffjKeyContainerBigDataNames = []byte("big-data-names")
+
+var ffjKeyContainerBigDataSizes = []byte("big-data-sizes")
+
+var ffjKeyContainerBigDataDigests = []byte("big-data-digests")
+
+var ffjKeyContainerCreated = []byte("created")
+
+var ffjKeyContainerFlags = []byte("flags")
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *Container) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *Container) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtContainerbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtContainernosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'b':
+
+ if bytes.Equal(ffjKeyContainerBigDataNames, kn) {
+ currentKey = ffjtContainerBigDataNames
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyContainerBigDataSizes, kn) {
+ currentKey = ffjtContainerBigDataSizes
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyContainerBigDataDigests, kn) {
+ currentKey = ffjtContainerBigDataDigests
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'c':
+
+ if bytes.Equal(ffjKeyContainerCreated, kn) {
+ currentKey = ffjtContainerCreated
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'f':
+
+ if bytes.Equal(ffjKeyContainerFlags, kn) {
+ currentKey = ffjtContainerFlags
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'i':
+
+ if bytes.Equal(ffjKeyContainerID, kn) {
+ currentKey = ffjtContainerID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyContainerImageID, kn) {
+ currentKey = ffjtContainerImageID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'l':
+
+ if bytes.Equal(ffjKeyContainerLayerID, kn) {
+ currentKey = ffjtContainerLayerID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'm':
+
+ if bytes.Equal(ffjKeyContainerMetadata, kn) {
+ currentKey = ffjtContainerMetadata
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'n':
+
+ if bytes.Equal(ffjKeyContainerNames, kn) {
+ currentKey = ffjtContainerNames
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.EqualFoldRight(ffjKeyContainerFlags, kn) {
+ currentKey = ffjtContainerFlags
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyContainerCreated, kn) {
+ currentKey = ffjtContainerCreated
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyContainerBigDataDigests, kn) {
+ currentKey = ffjtContainerBigDataDigests
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyContainerBigDataSizes, kn) {
+ currentKey = ffjtContainerBigDataSizes
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyContainerBigDataNames, kn) {
+ currentKey = ffjtContainerBigDataNames
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyContainerMetadata, kn) {
+ currentKey = ffjtContainerMetadata
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyContainerLayerID, kn) {
+ currentKey = ffjtContainerLayerID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyContainerImageID, kn) {
+ currentKey = ffjtContainerImageID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyContainerNames, kn) {
+ currentKey = ffjtContainerNames
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyContainerID, kn) {
+ currentKey = ffjtContainerID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffjtContainernosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtContainerID:
+ goto handle_ID
+
+ case ffjtContainerNames:
+ goto handle_Names
+
+ case ffjtContainerImageID:
+ goto handle_ImageID
+
+ case ffjtContainerLayerID:
+ goto handle_LayerID
+
+ case ffjtContainerMetadata:
+ goto handle_Metadata
+
+ case ffjtContainerBigDataNames:
+ goto handle_BigDataNames
+
+ case ffjtContainerBigDataSizes:
+ goto handle_BigDataSizes
+
+ case ffjtContainerBigDataDigests:
+ goto handle_BigDataDigests
+
+ case ffjtContainerCreated:
+ goto handle_Created
+
+ case ffjtContainerFlags:
+ goto handle_Flags
+
+ case ffjtContainernosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_ID:
+
+ /* handler: j.ID type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.ID = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Names:
+
+ /* handler: j.Names type=[]string kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.Names = nil
+ } else {
+
+ j.Names = []string{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJNames string
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJNames type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ tmpJNames = string(string(outBuf))
+
+ }
+ }
+
+ j.Names = append(j.Names, tmpJNames)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_ImageID:
+
+ /* handler: j.ImageID type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.ImageID = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_LayerID:
+
+ /* handler: j.LayerID type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.LayerID = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Metadata:
+
+ /* handler: j.Metadata type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.Metadata = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_BigDataNames:
+
+ /* handler: j.BigDataNames type=[]string kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.BigDataNames = nil
+ } else {
+
+ j.BigDataNames = []string{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJBigDataNames string
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJBigDataNames type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ tmpJBigDataNames = string(string(outBuf))
+
+ }
+ }
+
+ j.BigDataNames = append(j.BigDataNames, tmpJBigDataNames)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_BigDataSizes:
+
+ /* handler: j.BigDataSizes type=map[string]int64 kind=map quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.BigDataSizes = nil
+ } else {
+
+ j.BigDataSizes = make(map[string]int64, 0)
+
+ wantVal := true
+
+ for {
+
+ var k string
+
+ var tmpJBigDataSizes int64
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_bracket {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: k type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ k = string(string(outBuf))
+
+ }
+ }
+
+ // Expect ':' after key
+ tok = fs.Scan()
+ if tok != fflib.FFTok_colon {
+ return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok))
+ }
+
+ tok = fs.Scan()
+ /* handler: tmpJBigDataSizes type=int64 kind=int64 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ tmpJBigDataSizes = int64(tval)
+
+ }
+ }
+
+ j.BigDataSizes[k] = tmpJBigDataSizes
+
+ wantVal = false
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_BigDataDigests:
+
+ /* handler: j.BigDataDigests type=map[string]digest.Digest kind=map quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.BigDataDigests = nil
+ } else {
+
+ j.BigDataDigests = make(map[string]digest.Digest, 0)
+
+ wantVal := true
+
+ for {
+
+ var k string
+
+ var tmpJBigDataDigests digest.Digest
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_bracket {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: k type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ k = string(string(outBuf))
+
+ }
+ }
+
+ // Expect ':' after key
+ tok = fs.Scan()
+ if tok != fflib.FFTok_colon {
+ return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok))
+ }
+
+ tok = fs.Scan()
+ /* handler: tmpJBigDataDigests type=digest.Digest kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ tmpJBigDataDigests = digest.Digest(string(outBuf))
+
+ }
+ }
+
+ j.BigDataDigests[k] = tmpJBigDataDigests
+
+ wantVal = false
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Created:
+
+ /* handler: j.Created type=time.Time kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ err = j.Created.UnmarshalJSON(tbuf)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Flags:
+
+ /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.Flags = nil
+ } else {
+
+ j.Flags = make(map[string]interface{}, 0)
+
+ wantVal := true
+
+ for {
+
+ var k string
+
+ var tmpJFlags interface{}
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_bracket {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: k type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ k = string(string(outBuf))
+
+ }
+ }
+
+ // Expect ':' after key
+ tok = fs.Scan()
+ if tok != fflib.FFTok_colon {
+ return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok))
+ }
+
+ tok = fs.Scan()
+ /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/
+
+ {
+ /* Falling back. type=interface {} kind=interface */
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ err = json.Unmarshal(tbuf, &tmpJFlags)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+
+ j.Flags[k] = tmpJFlags
+
+ wantVal = false
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *containerStore) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *containerStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{}`)
+ return nil
+}
+
+const (
+ ffjtcontainerStorebase = iota
+ ffjtcontainerStorenosuchkey
+)
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *containerStore) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *containerStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtcontainerStorebase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtcontainerStorenosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ }
+
+ currentKey = ffjtcontainerStorenosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtcontainerStorenosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
new file mode 100644
index 000000000..aa0da7ad0
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
@@ -0,0 +1,691 @@
+// +build linux
+
+/*
+
+aufs driver directory structure
+
+ .
+ ├── layers // Metadata of layers
+ │ ├── 1
+ │ ├── 2
+ │ └── 3
+ ├── diff // Content of the layer
+ │ ├── 1 // Contains layers that need to be mounted for the id
+ │ ├── 2
+ │ └── 3
+ └── mnt // Mount points for the rw layers to be mounted
+ ├── 1
+ ├── 2
+ └── 3
+
+*/
+
+package aufs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/containers/storage/drivers"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/chrootarchive"
+ "github.com/containers/storage/pkg/directory"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/locker"
+ mountpk "github.com/containers/storage/pkg/mount"
+ "github.com/containers/storage/pkg/system"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/vbatts/tar-split/tar/storage"
+ "golang.org/x/sys/unix"
+)
+
+var (
+ // ErrAufsNotSupported is returned if aufs is not supported by the host.
+ ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems")
+ // ErrAufsNested means aufs cannot be used bc we are in a user namespace
+ ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace")
+ backingFs = "<unknown>"
+
+ enableDirpermLock sync.Once
+ enableDirperm bool
+)
+
+func init() {
+ graphdriver.Register("aufs", Init)
+}
+
+// Driver contains information about the filesystem mounted.
+type Driver struct {
+ sync.Mutex
+ root string
+ uidMaps []idtools.IDMap
+ gidMaps []idtools.IDMap
+ ctr *graphdriver.RefCounter
+ pathCacheLock sync.Mutex
+ pathCache map[string]string
+ naiveDiff graphdriver.DiffDriver
+ locker *locker.Locker
+}
+
+// Init returns a new AUFS driver.
+// An error is returned if AUFS is not supported.
+func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
+
+ // Try to load the aufs kernel module
+ if err := supportsAufs(); err != nil {
+ return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support aufs")
+
+ }
+
+ fsMagic, err := graphdriver.GetFSMagic(root)
+ if err != nil {
+ return nil, err
+ }
+ if fsName, ok := graphdriver.FsNames[fsMagic]; ok {
+ backingFs = fsName
+ }
+
+ switch fsMagic {
+ case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs:
+ logrus.Errorf("AUFS is not supported over %s", backingFs)
+ return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "AUFS is not supported over %q", backingFs)
+ }
+
+ paths := []string{
+ "mnt",
+ "diff",
+ "layers",
+ }
+
+ a := &Driver{
+ root: root,
+ uidMaps: uidMaps,
+ gidMaps: gidMaps,
+ pathCache: make(map[string]string),
+ ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)),
+ locker: locker.New(),
+ }
+
+ rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
+ if err != nil {
+ return nil, err
+ }
+ // Create the root aufs driver dir and return
+ // if it already exists
+ // If not populate the dir structure
+ if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil {
+ if os.IsExist(err) {
+ return a, nil
+ }
+ return nil, err
+ }
+
+ if err := mountpk.MakePrivate(root); err != nil {
+ return nil, err
+ }
+
+ // Populate the dir structure
+ for _, p := range paths {
+ if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil {
+ return nil, err
+ }
+ }
+ logger := logrus.WithFields(logrus.Fields{
+ "module": "graphdriver",
+ "driver": "aufs",
+ })
+
+ for _, path := range []string{"mnt", "diff"} {
+ p := filepath.Join(root, path)
+ entries, err := ioutil.ReadDir(p)
+ if err != nil {
+ logger.WithError(err).WithField("dir", p).Error("error reading dir entries")
+ continue
+ }
+ for _, entry := range entries {
+ if !entry.IsDir() {
+ continue
+ }
+ if strings.HasSuffix(entry.Name(), "-removing") {
+ logger.WithField("dir", entry.Name()).Debug("Cleaning up stale layer dir")
+ if err := system.EnsureRemoveAll(filepath.Join(p, entry.Name())); err != nil {
+ logger.WithField("dir", entry.Name()).WithError(err).Error("Error removing stale layer dir")
+ }
+ }
+ }
+ }
+
+ a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, uidMaps, gidMaps)
+ return a, nil
+}
+
+// Return a nil error if the kernel supports aufs
+// We cannot modprobe because inside dind modprobe fails
+// to run
+func supportsAufs() error {
+ // We can try to modprobe aufs first before looking at
+ // proc/filesystems for when aufs is supported
+ exec.Command("modprobe", "aufs").Run()
+
+ if rsystem.RunningInUserNS() {
+ return ErrAufsNested
+ }
+
+ f, err := os.Open("/proc/filesystems")
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ if strings.Contains(s.Text(), "aufs") {
+ return nil
+ }
+ }
+ return ErrAufsNotSupported
+}
+
+func (a *Driver) rootPath() string {
+ return a.root
+}
+
+func (*Driver) String() string {
+ return "aufs"
+}
+
+// Status returns current information about the filesystem such as root directory, number of directories mounted, etc.
+func (a *Driver) Status() [][2]string {
+ ids, _ := loadIds(path.Join(a.rootPath(), "layers"))
+ return [][2]string{
+ {"Root Dir", a.rootPath()},
+ {"Backing Filesystem", backingFs},
+ {"Dirs", fmt.Sprintf("%d", len(ids))},
+ {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())},
+ }
+}
+
+// Metadata not implemented
+func (a *Driver) Metadata(id string) (map[string]string, error) {
+ return nil, nil
+}
+
+// Exists returns true if the given id is registered with
+// this driver
+func (a *Driver) Exists(id string) bool {
+ if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil {
+ return false
+ }
+ return true
+}
+
+// AdditionalImageStores returns additional image stores supported by the driver
+func (a *Driver) AdditionalImageStores() []string {
+ return nil
+}
+
+// CreateReadWrite creates a layer that is writable for use as a container
+// file system.
+func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
+ return a.Create(id, parent, opts)
+}
+
+// Create three folders for each id
+// mnt, layers, and diff
+func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
+
+ if opts != nil && len(opts.StorageOpt) != 0 {
+ return fmt.Errorf("--storage-opt is not supported for aufs")
+ }
+
+ if err := a.createDirsFor(id); err != nil {
+ return err
+ }
+ // Write the layers metadata
+ f, err := os.Create(path.Join(a.rootPath(), "layers", id))
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ if parent != "" {
+ ids, err := getParentIDs(a.rootPath(), parent)
+ if err != nil {
+ return err
+ }
+
+ if _, err := fmt.Fprintln(f, parent); err != nil {
+ return err
+ }
+ for _, i := range ids {
+ if _, err := fmt.Fprintln(f, i); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// createDirsFor creates two directories for the given id.
+// mnt and diff
+func (a *Driver) createDirsFor(id string) error {
+ paths := []string{
+ "mnt",
+ "diff",
+ }
+
+ rootUID, rootGID, err := idtools.GetRootUIDGID(a.uidMaps, a.gidMaps)
+ if err != nil {
+ return err
+ }
+ // Directory permission is 0755.
+ // The path of directories are <aufs_root_path>/mnt/<image_id>
+ // and <aufs_root_path>/diff/<image_id>
+ for _, p := range paths {
+ if err := idtools.MkdirAllAs(path.Join(a.rootPath(), p, id), 0755, rootUID, rootGID); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Remove will unmount and remove the given id.
+func (a *Driver) Remove(id string) error {
+ a.locker.Lock(id)
+ defer a.locker.Unlock(id)
+ a.pathCacheLock.Lock()
+ mountpoint, exists := a.pathCache[id]
+ a.pathCacheLock.Unlock()
+ if !exists {
+ mountpoint = a.getMountpoint(id)
+ }
+
+ logger := logrus.WithFields(logrus.Fields{
+ "module": "graphdriver",
+ "driver": "aufs",
+ "layer": id,
+ })
+
+ var retries int
+ for {
+ mounted, err := a.mounted(mountpoint)
+ if err != nil {
+ if os.IsNotExist(err) {
+ break
+ }
+ return err
+ }
+ if !mounted {
+ break
+ }
+
+ err = a.unmount(mountpoint)
+ if err == nil {
+ break
+ }
+
+ if err != unix.EBUSY {
+ return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint)
+ }
+ if retries >= 5 {
+ return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint)
+ }
+ // If unmount returns EBUSY, it could be a transient error. Sleep and retry.
+ retries++
+ logger.Warnf("unmount failed due to EBUSY: retry count: %d", retries)
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ // Remove the layers file for the id
+ if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) {
+ return errors.Wrapf(err, "error removing layers dir for %s", id)
+ }
+
+ if err := atomicRemove(a.getDiffPath(id)); err != nil {
+ return errors.Wrapf(err, "could not remove diff path for id %s", id)
+ }
+
+ // Atomically remove each directory in turn by first moving it out of the
+ // way (so that container runtime doesn't find it anymore) before doing removal of
+ // the whole tree.
+ if err := atomicRemove(mountpoint); err != nil {
+ if errors.Cause(err) == unix.EBUSY {
+ logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY")
+ }
+ return errors.Wrapf(err, "could not remove mountpoint for id %s", id)
+ }
+
+ a.pathCacheLock.Lock()
+ delete(a.pathCache, id)
+ a.pathCacheLock.Unlock()
+ return nil
+}
+
+func atomicRemove(source string) error {
+ target := source + "-removing"
+
+ err := os.Rename(source, target)
+ switch {
+ case err == nil, os.IsNotExist(err):
+ case os.IsExist(err):
+ // Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove
+ if _, e := os.Stat(source); !os.IsNotExist(e) {
+ return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up")
+ }
+ default:
+ return errors.Wrapf(err, "error preparing atomic delete")
+ }
+
+ return system.EnsureRemoveAll(target)
+}
+
+// Get returns the rootfs path for the id.
+// This will mount the dir at its given path
+func (a *Driver) Get(id, mountLabel string) (string, error) {
+ a.locker.Lock(id)
+ defer a.locker.Unlock(id)
+ parents, err := a.getParentLayerPaths(id)
+ if err != nil && !os.IsNotExist(err) {
+ return "", err
+ }
+
+ a.pathCacheLock.Lock()
+ m, exists := a.pathCache[id]
+ a.pathCacheLock.Unlock()
+
+ if !exists {
+ m = a.getDiffPath(id)
+ if len(parents) > 0 {
+ m = a.getMountpoint(id)
+ }
+ }
+ if count := a.ctr.Increment(m); count > 1 {
+ return m, nil
+ }
+
+ // If a dir does not have a parent ( no layers )do not try to mount
+ // just return the diff path to the data
+ if len(parents) > 0 {
+ if err := a.mount(id, m, mountLabel, parents); err != nil {
+ return "", err
+ }
+ }
+
+ a.pathCacheLock.Lock()
+ a.pathCache[id] = m
+ a.pathCacheLock.Unlock()
+ return m, nil
+}
+
+// Put unmounts and updates list of active mounts.
+func (a *Driver) Put(id string) error {
+ a.locker.Lock(id)
+ defer a.locker.Unlock(id)
+ a.pathCacheLock.Lock()
+ m, exists := a.pathCache[id]
+ if !exists {
+ m = a.getMountpoint(id)
+ a.pathCache[id] = m
+ }
+ a.pathCacheLock.Unlock()
+ if count := a.ctr.Decrement(m); count > 0 {
+ return nil
+ }
+
+ err := a.unmount(m)
+ if err != nil {
+ logrus.Debugf("Failed to unmount %s aufs: %v", id, err)
+ }
+ return err
+}
+
+// isParent returns if the passed in parent is the direct parent of the passed in layer
+func (a *Driver) isParent(id, parent string) bool {
+ parents, _ := getParentIDs(a.rootPath(), id)
+ if parent == "" && len(parents) > 0 {
+ return false
+ }
+ return !(len(parents) > 0 && parent != parents[0])
+}
+
+// Diff produces an archive of the changes between the specified
+// layer and its parent layer which may be "".
+func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) {
+ if !a.isParent(id, parent) {
+ return a.naiveDiff.Diff(id, parent)
+ }
+
+ // AUFS doesn't need the parent layer to produce a diff.
+ return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
+ Compression: archive.Uncompressed,
+ ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir},
+ UIDMaps: a.uidMaps,
+ GIDMaps: a.gidMaps,
+ })
+}
+
+type fileGetNilCloser struct {
+ storage.FileGetter
+}
+
+func (f fileGetNilCloser) Close() error {
+ return nil
+}
+
+// DiffGetter returns a FileGetCloser that can read files from the directory that
+// contains files for the layer differences. Used for direct access for tar-split.
+func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
+ p := path.Join(a.rootPath(), "diff", id)
+ return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil
+}
+
+func (a *Driver) applyDiff(id string, diff io.Reader) error {
+ return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
+ UIDMaps: a.uidMaps,
+ GIDMaps: a.gidMaps,
+ })
+}
+
+// DiffSize calculates the changes between the specified id
+// and its parent and returns the size in bytes of the changes
+// relative to its base filesystem directory.
+func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
+ if !a.isParent(id, parent) {
+ return a.naiveDiff.DiffSize(id, parent)
+ }
+ // AUFS doesn't need the parent layer to calculate the diff size.
+ return directory.Size(path.Join(a.rootPath(), "diff", id))
+}
+
+// ApplyDiff extracts the changeset from the given diff into the
+// layer with the specified id and parent, returning the size of the
+// new layer in bytes.
+func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) {
+ if !a.isParent(id, parent) {
+ return a.naiveDiff.ApplyDiff(id, parent, diff)
+ }
+
+ // AUFS doesn't need the parent id to apply the diff if it is the direct parent.
+ if err = a.applyDiff(id, diff); err != nil {
+ return
+ }
+
+ return a.DiffSize(id, parent)
+}
+
+// Changes produces a list of changes between the specified layer
+// and its parent layer. If parent is "", then all changes will be ADD changes.
+func (a *Driver) Changes(id, parent string) ([]archive.Change, error) {
+ if !a.isParent(id, parent) {
+ return a.naiveDiff.Changes(id, parent)
+ }
+
+ // AUFS doesn't have snapshots, so we need to get changes from all parent
+ // layers.
+ layers, err := a.getParentLayerPaths(id)
+ if err != nil {
+ return nil, err
+ }
+ return archive.Changes(layers, path.Join(a.rootPath(), "diff", id))
+}
+
+func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
+ parentIds, err := getParentIDs(a.rootPath(), id)
+ if err != nil {
+ return nil, err
+ }
+ layers := make([]string, len(parentIds))
+
+ // Get the diff paths for all the parent ids
+ for i, p := range parentIds {
+ layers[i] = path.Join(a.rootPath(), "diff", p)
+ }
+ return layers, nil
+}
+
+func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error {
+ a.Lock()
+ defer a.Unlock()
+
+ // If the id is mounted or we get an error return
+ if mounted, err := a.mounted(target); err != nil || mounted {
+ return err
+ }
+
+ rw := a.getDiffPath(id)
+
+ if err := a.aufsMount(layers, rw, target, mountLabel); err != nil {
+ return fmt.Errorf("error creating aufs mount to %s: %v", target, err)
+ }
+ return nil
+}
+
+func (a *Driver) unmount(mountPath string) error {
+ a.Lock()
+ defer a.Unlock()
+
+ if mounted, err := a.mounted(mountPath); err != nil || !mounted {
+ return err
+ }
+ if err := Unmount(mountPath); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (a *Driver) mounted(mountpoint string) (bool, error) {
+ return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint)
+}
+
+// Cleanup aufs and unmount all mountpoints
+func (a *Driver) Cleanup() error {
+ var dirs []string
+ if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if !info.IsDir() {
+ return nil
+ }
+ dirs = append(dirs, path)
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ for _, m := range dirs {
+ if err := a.unmount(m); err != nil {
+ logrus.Debugf("aufs error unmounting %s: %s", m, err)
+ }
+ }
+ return mountpk.Unmount(a.root)
+}
+
+func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) {
+ defer func() {
+ if err != nil {
+ Unmount(target)
+ }
+ }()
+
+ // Mount options are clipped to page size(4096 bytes). If there are more
+ // layers then these are remounted individually using append.
+
+ offset := 54
+ if useDirperm() {
+ offset += len(",dirperm1")
+ }
+ b := make([]byte, unix.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel
+ bp := copy(b, fmt.Sprintf("br:%s=rw", rw))
+
+ index := 0
+ for ; index < len(ro); index++ {
+ layer := fmt.Sprintf(":%s=ro+wh", ro[index])
+ if bp+len(layer) > len(b) {
+ break
+ }
+ bp += copy(b[bp:], layer)
+ }
+
+ opts := "dio,xino=/dev/shm/aufs.xino"
+ if useDirperm() {
+ opts += ",dirperm1"
+ }
+ data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel)
+ if err = mount("none", target, "aufs", 0, data); err != nil {
+ return
+ }
+
+ for ; index < len(ro); index++ {
+ layer := fmt.Sprintf(":%s=ro+wh", ro[index])
+ data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel)
+ if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+// useDirperm checks dirperm1 mount option can be used with the current
+// version of aufs.
+func useDirperm() bool {
+ enableDirpermLock.Do(func() {
+ base, err := ioutil.TempDir("", "storage-aufs-base")
+ if err != nil {
+ logrus.Errorf("error checking dirperm1: %v", err)
+ return
+ }
+ defer os.RemoveAll(base)
+
+ union, err := ioutil.TempDir("", "storage-aufs-union")
+ if err != nil {
+ logrus.Errorf("error checking dirperm1: %v", err)
+ return
+ }
+ defer os.RemoveAll(union)
+
+ opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base)
+ if err := mount("none", union, "aufs", 0, opts); err != nil {
+ return
+ }
+ enableDirperm = true
+ if err := Unmount(union); err != nil {
+ logrus.Errorf("error checking dirperm1: failed to unmount %v", err)
+ }
+ })
+ return enableDirperm
+}
diff --git a/vendor/github.com/containers/storage/drivers/aufs/dirs.go b/vendor/github.com/containers/storage/drivers/aufs/dirs.go
new file mode 100644
index 000000000..d2325fc46
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/aufs/dirs.go
@@ -0,0 +1,64 @@
+// +build linux
+
+package aufs
+
+import (
+ "bufio"
+ "io/ioutil"
+ "os"
+ "path"
+)
+
+// Return all the directories
+func loadIds(root string) ([]string, error) {
+ dirs, err := ioutil.ReadDir(root)
+ if err != nil {
+ return nil, err
+ }
+ out := []string{}
+ for _, d := range dirs {
+ if !d.IsDir() {
+ out = append(out, d.Name())
+ }
+ }
+ return out, nil
+}
+
+// Read the layers file for the current id and return all the
+// layers represented by new lines in the file
+//
+// If there are no lines in the file then the id has no parent
+// and an empty slice is returned.
+func getParentIDs(root, id string) ([]string, error) {
+ f, err := os.Open(path.Join(root, "layers", id))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ out := []string{}
+ s := bufio.NewScanner(f)
+
+ for s.Scan() {
+ if t := s.Text(); t != "" {
+ out = append(out, s.Text())
+ }
+ }
+ return out, s.Err()
+}
+
+func (a *Driver) getMountpoint(id string) string {
+ return path.Join(a.mntPath(), id)
+}
+
+func (a *Driver) mntPath() string {
+ return path.Join(a.rootPath(), "mnt")
+}
+
+func (a *Driver) getDiffPath(id string) string {
+ return path.Join(a.diffPath(), id)
+}
+
+func (a *Driver) diffPath() string {
+ return path.Join(a.rootPath(), "diff")
+}
diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount.go b/vendor/github.com/containers/storage/drivers/aufs/mount.go
new file mode 100644
index 000000000..100e7537a
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/aufs/mount.go
@@ -0,0 +1,21 @@
+// +build linux
+
+package aufs
+
+import (
+ "os/exec"
+
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+// Unmount the target specified.
+func Unmount(target string) error {
+ if err := exec.Command("auplink", target, "flush").Run(); err != nil {
+ logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err)
+ }
+ if err := unix.Unmount(target, 0); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go b/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go
new file mode 100644
index 000000000..937104ba3
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go
@@ -0,0 +1,7 @@
+package aufs
+
+import "golang.org/x/sys/unix"
+
+func mount(source string, target string, fstype string, flags uintptr, data string) error {
+ return unix.Mount(source, target, fstype, flags, data)
+}
diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go
new file mode 100644
index 000000000..d030b0663
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go
@@ -0,0 +1,12 @@
+// +build !linux
+
+package aufs
+
+import "errors"
+
+// MsRemount declared to specify a non-linux system mount.
+const MsRemount = 0
+
+func mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
+ return errors.New("mount is not implemented on this platform")
+}
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
new file mode 100644
index 000000000..abc856c83
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
@@ -0,0 +1,677 @@
+// +build linux
+
+package btrfs
+
+/*
+#include <stdlib.h>
+#include <dirent.h>
+#include <btrfs/ioctl.h>
+#include <btrfs/ctree.h>
+
+static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) {
+ snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value);
+}
+*/
+import "C"
+
+import (
+ "fmt"
+ "io/ioutil"
+ "math"
+ "os"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "unsafe"
+
+ "github.com/containers/storage/drivers"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/mount"
+ "github.com/containers/storage/pkg/parsers"
+ "github.com/containers/storage/pkg/system"
+ "github.com/docker/go-units"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+func init() {
+ graphdriver.Register("btrfs", Init)
+}
+
+type btrfsOptions struct {
+ minSpace uint64
+ size uint64
+}
+
+// Init returns a new BTRFS driver.
+// An error is returned if BTRFS is not supported.
+func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
+
+ fsMagic, err := graphdriver.GetFSMagic(home)
+ if err != nil {
+ return nil, err
+ }
+
+ if fsMagic != graphdriver.FsMagicBtrfs {
+ return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "%q is not on a btrfs filesystem", home)
+ }
+
+ rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
+ if err != nil {
+ return nil, err
+ }
+ if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil {
+ return nil, err
+ }
+
+ if err := mount.MakePrivate(home); err != nil {
+ return nil, err
+ }
+
+ opt, userDiskQuota, err := parseOptions(options)
+ if err != nil {
+ return nil, err
+ }
+
+ driver := &Driver{
+ home: home,
+ uidMaps: uidMaps,
+ gidMaps: gidMaps,
+ options: opt,
+ }
+
+ if userDiskQuota {
+ if err := driver.subvolEnableQuota(); err != nil {
+ return nil, err
+ }
+ }
+
+ return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil
+}
+
+func parseOptions(opt []string) (btrfsOptions, bool, error) {
+ var options btrfsOptions
+ userDiskQuota := false
+ for _, option := range opt {
+ key, val, err := parsers.ParseKeyValueOpt(option)
+ if err != nil {
+ return options, userDiskQuota, err
+ }
+ key = strings.ToLower(key)
+ switch key {
+ case "btrfs.min_space":
+ minSpace, err := units.RAMInBytes(val)
+ if err != nil {
+ return options, userDiskQuota, err
+ }
+ userDiskQuota = true
+ options.minSpace = uint64(minSpace)
+ default:
+ return options, userDiskQuota, fmt.Errorf("Unknown option %s", key)
+ }
+ }
+ return options, userDiskQuota, nil
+}
+
+// Driver contains information about the filesystem mounted.
+type Driver struct {
+ //root of the file system
+ home string
+ uidMaps []idtools.IDMap
+ gidMaps []idtools.IDMap
+ options btrfsOptions
+ quotaEnabled bool
+ once sync.Once
+}
+
+// String prints the name of the driver (btrfs).
+func (d *Driver) String() string {
+ return "btrfs"
+}
+
+// Status returns current driver information in a two dimensional string array.
+// Output contains "Build Version" and "Library Version" of the btrfs libraries used.
+// Version information can be used to check compatibility with your kernel.
+func (d *Driver) Status() [][2]string {
+ status := [][2]string{}
+ if bv := btrfsBuildVersion(); bv != "-" {
+ status = append(status, [2]string{"Build Version", bv})
+ }
+ if lv := btrfsLibVersion(); lv != -1 {
+ status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)})
+ }
+ return status
+}
+
+// Metadata returns empty metadata for this driver.
+func (d *Driver) Metadata(id string) (map[string]string, error) {
+ return nil, nil
+}
+
+// Cleanup unmounts the home directory.
+func (d *Driver) Cleanup() error {
+ if err := d.subvolDisableQuota(); err != nil {
+ return err
+ }
+
+ return mount.Unmount(d.home)
+}
+
+func free(p *C.char) {
+ C.free(unsafe.Pointer(p))
+}
+
+func openDir(path string) (*C.DIR, error) {
+ Cpath := C.CString(path)
+ defer free(Cpath)
+
+ dir := C.opendir(Cpath)
+ if dir == nil {
+ return nil, fmt.Errorf("Can't open dir")
+ }
+ return dir, nil
+}
+
+func closeDir(dir *C.DIR) {
+ if dir != nil {
+ C.closedir(dir)
+ }
+}
+
+func getDirFd(dir *C.DIR) uintptr {
+ return uintptr(C.dirfd(dir))
+}
+
+func subvolCreate(path, name string) error {
+ dir, err := openDir(path)
+ if err != nil {
+ return err
+ }
+ defer closeDir(dir)
+
+ var args C.struct_btrfs_ioctl_vol_args
+ for i, c := range []byte(name) {
+ args.name[i] = C.char(c)
+ }
+
+ _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE,
+ uintptr(unsafe.Pointer(&args)))
+ if errno != 0 {
+ return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error())
+ }
+ return nil
+}
+
+func subvolSnapshot(src, dest, name string) error {
+ srcDir, err := openDir(src)
+ if err != nil {
+ return err
+ }
+ defer closeDir(srcDir)
+
+ destDir, err := openDir(dest)
+ if err != nil {
+ return err
+ }
+ defer closeDir(destDir)
+
+ var args C.struct_btrfs_ioctl_vol_args_v2
+ args.fd = C.__s64(getDirFd(srcDir))
+
+ var cs = C.CString(name)
+ C.set_name_btrfs_ioctl_vol_args_v2(&args, cs)
+ C.free(unsafe.Pointer(cs))
+
+ _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2,
+ uintptr(unsafe.Pointer(&args)))
+ if errno != 0 {
+ return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error())
+ }
+ return nil
+}
+
+func isSubvolume(p string) (bool, error) {
+ var bufStat unix.Stat_t
+ if err := unix.Lstat(p, &bufStat); err != nil {
+ return false, err
+ }
+
+ // return true if it is a btrfs subvolume
+ return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil
+}
+
+func subvolDelete(dirpath, name string, quotaEnabled bool) error {
+ dir, err := openDir(dirpath)
+ if err != nil {
+ return err
+ }
+ defer closeDir(dir)
+ fullPath := path.Join(dirpath, name)
+
+ var args C.struct_btrfs_ioctl_vol_args
+
+ // walk the btrfs subvolumes
+ walkSubvolumes := func(p string, f os.FileInfo, err error) error {
+ if err != nil {
+ if os.IsNotExist(err) && p != fullPath {
+ // missing most likely because the path was a subvolume that got removed in the previous iteration
+ // since it's gone anyway, we don't care
+ return nil
+ }
+ return fmt.Errorf("error walking subvolumes: %v", err)
+ }
+ // we want to check children only so skip itself
+ // it will be removed after the filepath walk anyways
+ if f.IsDir() && p != fullPath {
+ sv, err := isSubvolume(p)
+ if err != nil {
+ return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err)
+ }
+ if sv {
+ if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil {
+ return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err)
+ }
+ }
+ }
+ return nil
+ }
+ if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil {
+ return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err)
+ }
+
+ if quotaEnabled {
+ if qgroupid, err := subvolLookupQgroup(fullPath); err == nil {
+ var args C.struct_btrfs_ioctl_qgroup_create_args
+ args.qgroupid = C.__u64(qgroupid)
+
+ _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE,
+ uintptr(unsafe.Pointer(&args)))
+ if errno != 0 {
+ logrus.Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error())
+ }
+ } else {
+ logrus.Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error())
+ }
+ }
+
+ // all subvolumes have been removed
+ // now remove the one originally passed in
+ for i, c := range []byte(name) {
+ args.name[i] = C.char(c)
+ }
+ _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY,
+ uintptr(unsafe.Pointer(&args)))
+ if errno != 0 {
+ return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error())
+ }
+ return nil
+}
+
+func (d *Driver) updateQuotaStatus() {
+ d.once.Do(func() {
+ if !d.quotaEnabled {
+ // In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed
+ if err := subvolQgroupStatus(d.home); err != nil {
+ // quota is still not enabled
+ return
+ }
+ d.quotaEnabled = true
+ }
+ })
+}
+
+func (d *Driver) subvolEnableQuota() error {
+ d.updateQuotaStatus()
+
+ if d.quotaEnabled {
+ return nil
+ }
+
+ dir, err := openDir(d.home)
+ if err != nil {
+ return err
+ }
+ defer closeDir(dir)
+
+ var args C.struct_btrfs_ioctl_quota_ctl_args
+ args.cmd = C.BTRFS_QUOTA_CTL_ENABLE
+ _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
+ uintptr(unsafe.Pointer(&args)))
+ if errno != 0 {
+ return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error())
+ }
+
+ d.quotaEnabled = true
+
+ return nil
+}
+
+func (d *Driver) subvolDisableQuota() error {
+ d.updateQuotaStatus()
+
+ if !d.quotaEnabled {
+ return nil
+ }
+
+ dir, err := openDir(d.home)
+ if err != nil {
+ return err
+ }
+ defer closeDir(dir)
+
+ var args C.struct_btrfs_ioctl_quota_ctl_args
+ args.cmd = C.BTRFS_QUOTA_CTL_DISABLE
+ _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
+ uintptr(unsafe.Pointer(&args)))
+ if errno != 0 {
+ return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error())
+ }
+
+ d.quotaEnabled = false
+
+ return nil
+}
+
+func (d *Driver) subvolRescanQuota() error {
+ d.updateQuotaStatus()
+
+ if !d.quotaEnabled {
+ return nil
+ }
+
+ dir, err := openDir(d.home)
+ if err != nil {
+ return err
+ }
+ defer closeDir(dir)
+
+ var args C.struct_btrfs_ioctl_quota_rescan_args
+ _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT,
+ uintptr(unsafe.Pointer(&args)))
+ if errno != 0 {
+ return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error())
+ }
+
+ return nil
+}
+
+func subvolLimitQgroup(path string, size uint64) error {
+ dir, err := openDir(path)
+ if err != nil {
+ return err
+ }
+ defer closeDir(dir)
+
+ var args C.struct_btrfs_ioctl_qgroup_limit_args
+ args.lim.max_referenced = C.__u64(size)
+ args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER
+ _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT,
+ uintptr(unsafe.Pointer(&args)))
+ if errno != 0 {
+ return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error())
+ }
+
+ return nil
+}
+
+// subvolQgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path
+// with search key of BTRFS_QGROUP_STATUS_KEY.
+// In case qgroup is enabled, the retuned key type will match BTRFS_QGROUP_STATUS_KEY.
+// For more details please see https://github.com/kdave/btrfs-progs/blob/v4.9/qgroup.c#L1035
+func subvolQgroupStatus(path string) error {
+ dir, err := openDir(path)
+ if err != nil {
+ return err
+ }
+ defer closeDir(dir)
+
+ var args C.struct_btrfs_ioctl_search_args
+ args.key.tree_id = C.BTRFS_QUOTA_TREE_OBJECTID
+ args.key.min_type = C.BTRFS_QGROUP_STATUS_KEY
+ args.key.max_type = C.BTRFS_QGROUP_STATUS_KEY
+ args.key.max_objectid = C.__u64(math.MaxUint64)
+ args.key.max_offset = C.__u64(math.MaxUint64)
+ args.key.max_transid = C.__u64(math.MaxUint64)
+ args.key.nr_items = 4096
+
+ _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH,
+ uintptr(unsafe.Pointer(&args)))
+ if errno != 0 {
+ return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error())
+ }
+ sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf))
+ if sh._type != C.BTRFS_QGROUP_STATUS_KEY {
+ return fmt.Errorf("Invalid qgroup search header type for %s: %v", path, sh._type)
+ }
+ return nil
+}
+
+func subvolLookupQgroup(path string) (uint64, error) {
+ dir, err := openDir(path)
+ if err != nil {
+ return 0, err
+ }
+ defer closeDir(dir)
+
+ var args C.struct_btrfs_ioctl_ino_lookup_args
+ args.objectid = C.BTRFS_FIRST_FREE_OBJECTID
+
+ _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP,
+ uintptr(unsafe.Pointer(&args)))
+ if errno != 0 {
+ return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error())
+ }
+ if args.treeid == 0 {
+ return 0, fmt.Errorf("Invalid qgroup id for %s: 0", dir)
+ }
+
+ return uint64(args.treeid), nil
+}
+
+func (d *Driver) subvolumesDir() string {
+ return path.Join(d.home, "subvolumes")
+}
+
+func (d *Driver) subvolumesDirID(id string) string {
+ return path.Join(d.subvolumesDir(), id)
+}
+
+func (d *Driver) quotasDir() string {
+ return path.Join(d.home, "quotas")
+}
+
+func (d *Driver) quotasDirID(id string) string {
+ return path.Join(d.quotasDir(), id)
+}
+
+// CreateReadWrite creates a layer that is writable for use as a container
+// file system.
+func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
+ return d.Create(id, parent, opts)
+}
+
+// Create the filesystem with given id.
+func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
+ quotas := path.Join(d.home, "quotas")
+ subvolumes := path.Join(d.home, "subvolumes")
+ rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
+ if err != nil {
+ return err
+ }
+ if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil {
+ return err
+ }
+ if parent == "" {
+ if err := subvolCreate(subvolumes, id); err != nil {
+ return err
+ }
+ } else {
+ parentDir := d.subvolumesDirID(parent)
+ st, err := os.Stat(parentDir)
+ if err != nil {
+ return err
+ }
+ if !st.IsDir() {
+ return fmt.Errorf("%s: not a directory", parentDir)
+ }
+ if err := subvolSnapshot(parentDir, subvolumes, id); err != nil {
+ return err
+ }
+ }
+
+ var storageOpt map[string]string
+ if opts != nil {
+ storageOpt = opts.StorageOpt
+ }
+
+ if _, ok := storageOpt["size"]; ok {
+ driver := &Driver{}
+ if err := d.parseStorageOpt(storageOpt, driver); err != nil {
+ return err
+ }
+
+ if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil {
+ return err
+ }
+ if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil {
+ return err
+ }
+ if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil {
+ return err
+ }
+ }
+
+ // if we have a remapped root (user namespaces enabled), change the created snapshot
+ // dir ownership to match
+ if rootUID != 0 || rootGID != 0 {
+ if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil {
+ return err
+ }
+ }
+
+ mountLabel := ""
+ if opts != nil {
+ mountLabel = opts.MountLabel
+ }
+
+ return label.Relabel(path.Join(subvolumes, id), mountLabel, false)
+}
+
+// Parse btrfs storage options
+func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error {
+ // Read size to change the subvolume disk quota per container
+ for key, val := range storageOpt {
+ key := strings.ToLower(key)
+ switch key {
+ case "size":
+ size, err := units.RAMInBytes(val)
+ if err != nil {
+ return err
+ }
+ driver.options.size = uint64(size)
+ default:
+ return fmt.Errorf("Unknown option %s", key)
+ }
+ }
+
+ return nil
+}
+
+// Set btrfs storage size
+func (d *Driver) setStorageSize(dir string, driver *Driver) error {
+ if driver.options.size <= 0 {
+ return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size)))
+ }
+ if d.options.minSpace > 0 && driver.options.size < d.options.minSpace {
+ return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace)))
+ }
+
+ if err := d.subvolEnableQuota(); err != nil {
+ return err
+ }
+
+ if err := subvolLimitQgroup(dir, driver.options.size); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Remove the filesystem with given id.
+func (d *Driver) Remove(id string) error {
+ dir := d.subvolumesDirID(id)
+ if _, err := os.Stat(dir); err != nil {
+ return err
+ }
+ quotasDir := d.quotasDirID(id)
+ if _, err := os.Stat(quotasDir); err == nil {
+ if err := os.Remove(quotasDir); err != nil {
+ return err
+ }
+ } else if !os.IsNotExist(err) {
+ return err
+ }
+
+ // Call updateQuotaStatus() to invoke status update
+ d.updateQuotaStatus()
+
+ if err := subvolDelete(d.subvolumesDir(), id, d.quotaEnabled); err != nil {
+ return err
+ }
+ if err := system.EnsureRemoveAll(dir); err != nil {
+ return err
+ }
+ if err := d.subvolRescanQuota(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Get the requested filesystem id.
+func (d *Driver) Get(id, mountLabel string) (string, error) {
+ dir := d.subvolumesDirID(id)
+ st, err := os.Stat(dir)
+ if err != nil {
+ return "", err
+ }
+
+ if !st.IsDir() {
+ return "", fmt.Errorf("%s: not a directory", dir)
+ }
+
+ if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil {
+ if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace {
+ if err := d.subvolEnableQuota(); err != nil {
+ return "", err
+ }
+ if err := subvolLimitQgroup(dir, size); err != nil {
+ return "", err
+ }
+ }
+ }
+
+ return dir, nil
+}
+
+// Put is not implemented for BTRFS as there is no cleanup required for the id.
+func (d *Driver) Put(id string) error {
+ // Get() creates no runtime resources (like e.g. mounts)
+ // so this doesn't need to do anything.
+ return nil
+}
+
+// Exists checks if the id exists in the filesystem.
+func (d *Driver) Exists(id string) bool {
+ dir := d.subvolumesDirID(id)
+ _, err := os.Stat(dir)
+ return err == nil
+}
+
+// AdditionalImageStores returns additional image stores supported by the driver
+func (d *Driver) AdditionalImageStores() []string {
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go
new file mode 100644
index 000000000..f07088887
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go
@@ -0,0 +1,3 @@
+// +build !linux !cgo
+
+package btrfs
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version.go b/vendor/github.com/containers/storage/drivers/btrfs/version.go
new file mode 100644
index 000000000..73d90cdd7
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/btrfs/version.go
@@ -0,0 +1,26 @@
+// +build linux,!btrfs_noversion
+
+package btrfs
+
+/*
+#include <btrfs/version.h>
+
+// around version 3.16, they did not define lib version yet
+#ifndef BTRFS_LIB_VERSION
+#define BTRFS_LIB_VERSION -1
+#endif
+
+// upstream had removed it, but now it will be coming back
+#ifndef BTRFS_BUILD_VERSION
+#define BTRFS_BUILD_VERSION "-"
+#endif
+*/
+import "C"
+
+func btrfsBuildVersion() string {
+ return string(C.BTRFS_BUILD_VERSION)
+}
+
+func btrfsLibVersion() int {
+ return int(C.BTRFS_LIB_VERSION)
+}
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go
new file mode 100644
index 000000000..f802fbc62
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go
@@ -0,0 +1,14 @@
+// +build linux,btrfs_noversion
+
+package btrfs
+
+// TODO(vbatts) remove this work-around once supported linux distros are on
+// btrfs utilities of >= 3.16.1
+
+func btrfsBuildVersion() string {
+ return "-"
+}
+
+func btrfsLibVersion() int {
+ return -1
+}
diff --git a/vendor/github.com/containers/storage/drivers/counter.go b/vendor/github.com/containers/storage/drivers/counter.go
new file mode 100644
index 000000000..72551a38d
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/counter.go
@@ -0,0 +1,59 @@
+package graphdriver
+
+import "sync"
+
+type minfo struct {
+ check bool
+ count int
+}
+
+// RefCounter is a generic counter for use by graphdriver Get/Put calls
+type RefCounter struct {
+ counts map[string]*minfo
+ mu sync.Mutex
+ checker Checker
+}
+
+// NewRefCounter returns a new RefCounter
+func NewRefCounter(c Checker) *RefCounter {
+ return &RefCounter{
+ checker: c,
+ counts: make(map[string]*minfo),
+ }
+}
+
+// Increment increases the ref count for the given id and returns the current count
+func (c *RefCounter) Increment(path string) int {
+ return c.incdec(path, func(minfo *minfo) {
+ minfo.count++
+ })
+}
+
+// Decrement decreases the ref count for the given id and returns the current count
+func (c *RefCounter) Decrement(path string) int {
+ return c.incdec(path, func(minfo *minfo) {
+ minfo.count--
+ })
+}
+
+func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int {
+ c.mu.Lock()
+ m := c.counts[path]
+ if m == nil {
+ m = &minfo{}
+ c.counts[path] = m
+ }
+ // if we are checking this path for the first time check to make sure
+ // if it was already mounted on the system and make sure we have a correct ref
+ // count if it is mounted as it is in use.
+ if !m.check {
+ m.check = true
+ if c.checker.IsMounted(path) {
+ m.count++
+ }
+ }
+ infoOp(m)
+ count := m.count
+ c.mu.Unlock()
+ return count
+}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
new file mode 100644
index 000000000..1430c8859
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
@@ -0,0 +1,236 @@
+package devmapper
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "reflect"
+ "strings"
+
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+type directLVMConfig struct {
+ Device string
+ ThinpPercent uint64
+ ThinpMetaPercent uint64
+ AutoExtendPercent uint64
+ AutoExtendThreshold uint64
+}
+
+var (
+ errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified")
+ errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100")
+ errMissingSetupDevice = errors.New("must provide device path in `dm.setup_device` in order to configure direct-lvm")
+)
+
+func validateLVMConfig(cfg directLVMConfig) error {
+ if reflect.DeepEqual(cfg, directLVMConfig{}) {
+ return nil
+ }
+ if cfg.Device == "" {
+ return errMissingSetupDevice
+ }
+ if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 {
+ return errThinpPercentMissing
+ }
+
+ if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 {
+ return errThinpPercentTooBig
+ }
+ return nil
+}
+
+func checkDevAvailable(dev string) error {
+ lvmScan, err := exec.LookPath("lvmdiskscan")
+ if err != nil {
+ logrus.Debug("could not find lvmdiskscan")
+ return nil
+ }
+
+ out, err := exec.Command(lvmScan).CombinedOutput()
+ if err != nil {
+ logrus.WithError(err).Error(string(out))
+ return nil
+ }
+
+ if !bytes.Contains(out, []byte(dev)) {
+ return errors.Errorf("%s is not available for use with devicemapper", dev)
+ }
+ return nil
+}
+
+func checkDevInVG(dev string) error {
+ pvDisplay, err := exec.LookPath("pvdisplay")
+ if err != nil {
+ logrus.Debug("could not find pvdisplay")
+ return nil
+ }
+
+ out, err := exec.Command(pvDisplay, dev).CombinedOutput()
+ if err != nil {
+ logrus.WithError(err).Error(string(out))
+ return nil
+ }
+
+ scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out)))
+ for scanner.Scan() {
+ fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name")
+ if len(fields) > 1 {
+ // got "VG Name" line"
+ vg := strings.TrimSpace(fields[1])
+ if len(vg) > 0 {
+ return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg)
+ }
+ logrus.Error(fields)
+ break
+ }
+ }
+ return nil
+}
+
+func checkDevHasFS(dev string) error {
+ blkid, err := exec.LookPath("blkid")
+ if err != nil {
+ logrus.Debug("could not find blkid")
+ return nil
+ }
+
+ out, err := exec.Command(blkid, dev).CombinedOutput()
+ if err != nil {
+ logrus.WithError(err).Error(string(out))
+ return nil
+ }
+
+ fields := bytes.Fields(out)
+ for _, f := range fields {
+ kv := bytes.Split(f, []byte{'='})
+ if bytes.Equal(kv[0], []byte("TYPE")) {
+ v := bytes.Trim(kv[1], "\"")
+ if len(v) > 0 {
+ return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev)
+ }
+ return nil
+ }
+ }
+ return nil
+}
+
+func verifyBlockDevice(dev string, force bool) error {
+ if err := checkDevAvailable(dev); err != nil {
+ return err
+ }
+ if err := checkDevInVG(dev); err != nil {
+ return err
+ }
+
+ if force {
+ return nil
+ }
+
+ if err := checkDevHasFS(dev); err != nil {
+ return err
+ }
+ return nil
+}
+
+func readLVMConfig(root string) (directLVMConfig, error) {
+ var cfg directLVMConfig
+
+ p := filepath.Join(root, "setup-config.json")
+ b, err := ioutil.ReadFile(p)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return cfg, nil
+ }
+ return cfg, errors.Wrap(err, "error reading existing setup config")
+ }
+
+ // check if this is just an empty file, no need to produce a json error later if so
+ if len(b) == 0 {
+ return cfg, nil
+ }
+
+ err = json.Unmarshal(b, &cfg)
+ return cfg, errors.Wrap(err, "error unmarshaling previous device setup config")
+}
+
+func writeLVMConfig(root string, cfg directLVMConfig) error {
+ p := filepath.Join(root, "setup-config.json")
+ b, err := json.Marshal(cfg)
+ if err != nil {
+ return errors.Wrap(err, "error marshalling direct lvm config")
+ }
+ err = ioutil.WriteFile(p, b, 0600)
+ return errors.Wrap(err, "error writing direct lvm config to file")
+}
+
+func setupDirectLVM(cfg directLVMConfig) error {
+ lvmProfileDir := "/etc/lvm/profile"
+ binaries := []string{"pvcreate", "vgcreate", "lvcreate", "lvconvert", "lvchange", "thin_check"}
+
+ for _, bin := range binaries {
+ if _, err := exec.LookPath(bin); err != nil {
+ return errors.Wrap(err, "error looking up command `"+bin+"` while setting up direct lvm")
+ }
+ }
+
+ err := os.MkdirAll(lvmProfileDir, 0755)
+ if err != nil {
+ return errors.Wrap(err, "error creating lvm profile directory")
+ }
+
+ if cfg.AutoExtendPercent == 0 {
+ cfg.AutoExtendPercent = 20
+ }
+
+ if cfg.AutoExtendThreshold == 0 {
+ cfg.AutoExtendThreshold = 80
+ }
+
+ if cfg.ThinpPercent == 0 {
+ cfg.ThinpPercent = 95
+ }
+ if cfg.ThinpMetaPercent == 0 {
+ cfg.ThinpMetaPercent = 1
+ }
+
+ out, err := exec.Command("pvcreate", "-f", cfg.Device).CombinedOutput()
+ if err != nil {
+ return errors.Wrap(err, string(out))
+ }
+
+ out, err = exec.Command("vgcreate", "storage", cfg.Device).CombinedOutput()
+ if err != nil {
+ return errors.Wrap(err, string(out))
+ }
+
+ out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput()
+ if err != nil {
+ return errors.Wrap(err, string(out))
+ }
+ out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput()
+ if err != nil {
+ return errors.Wrap(err, string(out))
+ }
+
+ out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "storage/thinpool", "--poolmetadata", "storage/thinpoolmeta").CombinedOutput()
+ if err != nil {
+ return errors.Wrap(err, string(out))
+ }
+
+ profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent)
+ err = ioutil.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600)
+ if err != nil {
+ return errors.Wrap(err, "error writing storage thinp autoextend profile")
+ }
+
+ out, err = exec.Command("lvchange", "--metadataprofile", "storage-thinpool", "storage/thinpool").CombinedOutput()
+ return errors.Wrap(err, string(out))
+}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
new file mode 100644
index 000000000..6db7b2b2c
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
@@ -0,0 +1,2825 @@
+// +build linux
+
+package devmapper
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/containers/storage/drivers"
+ "github.com/containers/storage/pkg/devicemapper"
+ "github.com/containers/storage/pkg/dmesg"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/loopback"
+ "github.com/containers/storage/pkg/mount"
+ "github.com/containers/storage/pkg/parsers"
+ "github.com/containers/storage/pkg/parsers/kernel"
+ units "github.com/docker/go-units"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+var (
+ defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024
+ defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024
+ defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024
+ defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors
+ defaultUdevSyncOverride = false
+ maxDeviceID = 0xffffff // 24 bit, pool limit
+ deviceIDMapSz = (maxDeviceID + 1) / 8
+ driverDeferredRemovalSupport = false
+ enableDeferredRemoval = false
+ enableDeferredDeletion = false
+ userBaseSize = false
+ defaultMinFreeSpacePercent uint32 = 10
+ lvmSetupConfigForce bool
+)
+
+const deviceSetMetaFile string = "deviceset-metadata"
+const transactionMetaFile string = "transaction-metadata"
+
+type transaction struct {
+ OpenTransactionID uint64 `json:"open_transaction_id"`
+ DeviceIDHash string `json:"device_hash"`
+ DeviceID int `json:"device_id"`
+}
+
+type devInfo struct {
+ Hash string `json:"-"`
+ DeviceID int `json:"device_id"`
+ Size uint64 `json:"size"`
+ TransactionID uint64 `json:"transaction_id"`
+ Initialized bool `json:"initialized"`
+ Deleted bool `json:"deleted"`
+ devices *DeviceSet
+
+ // The global DeviceSet lock guarantees that we serialize all
+ // the calls to libdevmapper (which is not threadsafe), but we
+ // sometimes release that lock while sleeping. In that case
+ // this per-device lock is still held, protecting against
+ // other accesses to the device that we're doing the wait on.
+ //
+ // WARNING: In order to avoid AB-BA deadlocks when releasing
+ // the global lock while holding the per-device locks all
+ // device locks must be acquired *before* the device lock, and
+ // multiple device locks should be acquired parent before child.
+ lock sync.Mutex
+}
+
+type metaData struct {
+ Devices map[string]*devInfo `json:"Devices"`
+}
+
+// DeviceSet holds information about list of devices
+type DeviceSet struct {
+ metaData `json:"-"`
+ sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper
+ root string
+ devicePrefix string
+ TransactionID uint64 `json:"-"`
+ NextDeviceID int `json:"next_device_id"`
+ deviceIDMap []byte
+
+ // Options
+ dataLoopbackSize int64
+ metaDataLoopbackSize int64
+ baseFsSize uint64
+ filesystem string
+ mountOptions string
+ mkfsArgs []string
+ dataDevice string // block or loop dev
+ dataLoopFile string // loopback file, if used
+ metadataDevice string // block or loop dev
+ metadataLoopFile string // loopback file, if used
+ doBlkDiscard bool
+ thinpBlockSize uint32
+ thinPoolDevice string
+ transaction `json:"-"`
+ overrideUdevSyncCheck bool
+ deferredRemove bool // use deferred removal
+ deferredDelete bool // use deferred deletion
+ BaseDeviceUUID string // save UUID of base device
+ BaseDeviceFilesystem string // save filesystem of base device
+ nrDeletedDevices uint // number of deleted devices
+ deletionWorkerTicker *time.Ticker
+ uidMaps []idtools.IDMap
+ gidMaps []idtools.IDMap
+ minFreeSpacePercent uint32 //min free space percentage in thinpool
+ xfsNospaceRetries string // max retries when xfs receives ENOSPC
+ lvmSetupConfig directLVMConfig
+}
+
+// DiskUsage contains information about disk usage and is used when reporting Status of a device.
+type DiskUsage struct {
+ // Used bytes on the disk.
+ Used uint64
+ // Total bytes on the disk.
+ Total uint64
+ // Available bytes on the disk.
+ Available uint64
+}
+
+// Status returns the information about the device.
+type Status struct {
+ // PoolName is the name of the data pool.
+ PoolName string
+ // DataFile is the actual block device for data.
+ DataFile string
+ // DataLoopback loopback file, if used.
+ DataLoopback string
+ // MetadataFile is the actual block device for metadata.
+ MetadataFile string
+ // MetadataLoopback is the loopback file, if used.
+ MetadataLoopback string
+ // Data is the disk used for data.
+ Data DiskUsage
+ // Metadata is the disk used for meta data.
+ Metadata DiskUsage
+ // BaseDeviceSize is base size of container and image
+ BaseDeviceSize uint64
+ // BaseDeviceFS is backing filesystem.
+ BaseDeviceFS string
+ // SectorSize size of the vector.
+ SectorSize uint64
+ // UdevSyncSupported is true if sync is supported.
+ UdevSyncSupported bool
+ // DeferredRemoveEnabled is true then the device is not unmounted.
+ DeferredRemoveEnabled bool
+ // True if deferred deletion is enabled. This is different from
+ // deferred removal. "removal" means that device mapper device is
+ // deactivated. Thin device is still in thin pool and can be activated
+ // again. But "deletion" means that thin device will be deleted from
+ // thin pool and it can't be activated again.
+ DeferredDeleteEnabled bool
+ DeferredDeletedDeviceCount uint
+ MinFreeSpace uint64
+}
+
+// Structure used to export image/container metadata in inspect.
+type deviceMetadata struct {
+ deviceID int
+ deviceSize uint64 // size in bytes
+ deviceName string // Device name as used during activation
+}
+
+// DevStatus returns information about device mounted containing its id, size and sector information.
+type DevStatus struct {
+ // DeviceID is the id of the device.
+ DeviceID int
+ // Size is the size of the filesystem.
+ Size uint64
+ // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental.
+ TransactionID uint64
+ // SizeInSectors indicates the size of the sectors allocated.
+ SizeInSectors uint64
+ // MappedSectors indicates number of mapped sectors.
+ MappedSectors uint64
+ // HighestMappedSector is the pointer to the highest mapped sector.
+ HighestMappedSector uint64
+}
+
+func getDevName(name string) string {
+ return "/dev/mapper/" + name
+}
+
+func (info *devInfo) Name() string {
+ hash := info.Hash
+ if hash == "" {
+ hash = "base"
+ }
+ return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash)
+}
+
+func (info *devInfo) DevName() string {
+ return getDevName(info.Name())
+}
+
+func (devices *DeviceSet) loopbackDir() string {
+ return path.Join(devices.root, "devicemapper")
+}
+
+func (devices *DeviceSet) metadataDir() string {
+ return path.Join(devices.root, "metadata")
+}
+
+func (devices *DeviceSet) metadataFile(info *devInfo) string {
+ file := info.Hash
+ if file == "" {
+ file = "base"
+ }
+ return path.Join(devices.metadataDir(), file)
+}
+
+func (devices *DeviceSet) transactionMetaFile() string {
+ return path.Join(devices.metadataDir(), transactionMetaFile)
+}
+
+func (devices *DeviceSet) deviceSetMetaFile() string {
+ return path.Join(devices.metadataDir(), deviceSetMetaFile)
+}
+
+func (devices *DeviceSet) oldMetadataFile() string {
+ return path.Join(devices.loopbackDir(), "json")
+}
+
+func (devices *DeviceSet) getPoolName() string {
+ if devices.thinPoolDevice == "" {
+ return devices.devicePrefix + "-pool"
+ }
+ return devices.thinPoolDevice
+}
+
+func (devices *DeviceSet) getPoolDevName() string {
+ return getDevName(devices.getPoolName())
+}
+
+func (devices *DeviceSet) hasImage(name string) bool {
+ dirname := devices.loopbackDir()
+ filename := path.Join(dirname, name)
+
+ _, err := os.Stat(filename)
+ return err == nil
+}
+
+// ensureImage creates a sparse file of <size> bytes at the path
+// <root>/devicemapper/<name>.
+// If the file already exists and new size is larger than its current size, it grows to the new size.
+// Either way it returns the full path.
+func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
+ dirname := devices.loopbackDir()
+ filename := path.Join(dirname, name)
+
+ uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps)
+ if err != nil {
+ return "", err
+ }
+ if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil && !os.IsExist(err) {
+ return "", err
+ }
+
+ if fi, err := os.Stat(filename); err != nil {
+ if !os.IsNotExist(err) {
+ return "", err
+ }
+ logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename)
+ file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+
+ if err := file.Truncate(size); err != nil {
+ return "", err
+ }
+ } else {
+ if fi.Size() < size {
+ file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+ if err := file.Truncate(size); err != nil {
+ return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err)
+ }
+ } else if fi.Size() > size {
+ logrus.Warnf("devmapper: Can't shrink loopback file %s", filename)
+ }
+ }
+ return filename, nil
+}
+
+func (devices *DeviceSet) allocateTransactionID() uint64 {
+ devices.OpenTransactionID = devices.TransactionID + 1
+ return devices.OpenTransactionID
+}
+
+func (devices *DeviceSet) updatePoolTransactionID() error {
+ if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil {
+ return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err)
+ }
+ devices.TransactionID = devices.OpenTransactionID
+ return nil
+}
+
+func (devices *DeviceSet) removeMetadata(info *devInfo) error {
+ if err := os.RemoveAll(devices.metadataFile(info)); err != nil {
+ return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err)
+ }
+ return nil
+}
+
+// Given json data and file path, write it to disk
+func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error {
+ tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp")
+ if err != nil {
+ return fmt.Errorf("devmapper: Error creating metadata file: %s", err)
+ }
+
+ n, err := tmpFile.Write(jsonData)
+ if err != nil {
+ return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err)
+ }
+ if n < len(jsonData) {
+ return io.ErrShortWrite
+ }
+ if err := tmpFile.Sync(); err != nil {
+ return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err)
+ }
+ if err := tmpFile.Close(); err != nil {
+ return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err)
+ }
+ if err := os.Rename(tmpFile.Name(), filePath); err != nil {
+ return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err)
+ }
+
+ return nil
+}
+
+func (devices *DeviceSet) saveMetadata(info *devInfo) error {
+ jsonData, err := json.Marshal(info)
+ if err != nil {
+ return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err)
+ }
+ if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (devices *DeviceSet) markDeviceIDUsed(deviceID int) {
+ var mask byte
+ i := deviceID % 8
+ mask = 1 << uint(i)
+ devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask
+}
+
+func (devices *DeviceSet) markDeviceIDFree(deviceID int) {
+ var mask byte
+ i := deviceID % 8
+ mask = ^(1 << uint(i))
+ devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask
+}
+
+func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool {
+ var mask byte
+ i := deviceID % 8
+ mask = (1 << uint(i))
+ return (devices.deviceIDMap[deviceID/8] & mask) == 0
+}
+
+// Should be called with devices.Lock() held.
+func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) {
+ info := devices.Devices[hash]
+ if info == nil {
+ info = devices.loadMetadata(hash)
+ if info == nil {
+ return nil, fmt.Errorf("devmapper: Unknown device %s", hash)
+ }
+
+ devices.Devices[hash] = info
+ }
+ return info, nil
+}
+
+func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) {
+ devices.Lock()
+ defer devices.Unlock()
+ info, err := devices.lookupDevice(hash)
+ return info, err
+}
+
+// This function relies on that device hash map has been loaded in advance.
+// Should be called with devices.Lock() held.
+func (devices *DeviceSet) constructDeviceIDMap() {
+ logrus.Debug("devmapper: constructDeviceIDMap()")
+ defer logrus.Debug("devmapper: constructDeviceIDMap() END")
+
+ for _, info := range devices.Devices {
+ devices.markDeviceIDUsed(info.DeviceID)
+ logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID)
+ }
+}
+
+func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error {
+
+ // Skip some of the meta files which are not device files.
+ if strings.HasSuffix(finfo.Name(), ".migrated") {
+ logrus.Debugf("devmapper: Skipping file %s", path)
+ return nil
+ }
+
+ if strings.HasPrefix(finfo.Name(), ".") {
+ logrus.Debugf("devmapper: Skipping file %s", path)
+ return nil
+ }
+
+ if finfo.Name() == deviceSetMetaFile {
+ logrus.Debugf("devmapper: Skipping file %s", path)
+ return nil
+ }
+
+ if finfo.Name() == transactionMetaFile {
+ logrus.Debugf("devmapper: Skipping file %s", path)
+ return nil
+ }
+
+ logrus.Debugf("devmapper: Loading data for file %s", path)
+
+ hash := finfo.Name()
+ if hash == "base" {
+ hash = ""
+ }
+
+ // Include deleted devices also as cleanup delete device logic
+ // will go through it and see if there are any deleted devices.
+ if _, err := devices.lookupDevice(hash); err != nil {
+ return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err)
+ }
+
+ return nil
+}
+
+func (devices *DeviceSet) loadDeviceFilesOnStart() error {
+ logrus.Debug("devmapper: loadDeviceFilesOnStart()")
+ defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END")
+
+ var scan = func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ logrus.Debugf("devmapper: Can't walk the file %s", path)
+ return nil
+ }
+
+ // Skip any directories
+ if info.IsDir() {
+ return nil
+ }
+
+ return devices.deviceFileWalkFunction(path, info)
+ }
+
+ return filepath.Walk(devices.metadataDir(), scan)
+}
+
+// Should be called with devices.Lock() held.
+func (devices *DeviceSet) unregisterDevice(hash string) error {
+ logrus.Debugf("devmapper: unregisterDevice(%v)", hash)
+ info := &devInfo{
+ Hash: hash,
+ }
+
+ delete(devices.Devices, hash)
+
+ if err := devices.removeMetadata(info); err != nil {
+ logrus.Debugf("devmapper: Error removing metadata: %s", err)
+ return err
+ }
+
+ return nil
+}
+
+// Should be called with devices.Lock() held.
+func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) {
+ logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash)
+ info := &devInfo{
+ Hash: hash,
+ DeviceID: id,
+ Size: size,
+ TransactionID: transactionID,
+ Initialized: false,
+ devices: devices,
+ }
+
+ devices.Devices[hash] = info
+
+ if err := devices.saveMetadata(info); err != nil {
+ // Try to remove unused device
+ delete(devices.Devices, hash)
+ return nil, err
+ }
+
+ return info, nil
+}
+
+func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error {
+ logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash)
+
+ if info.Deleted && !ignoreDeleted {
+ return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash)
+ }
+
+ // Make sure deferred removal on device is canceled, if one was
+ // scheduled.
+ if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil {
+ return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err)
+ }
+
+ if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 {
+ return nil
+ }
+
+ return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size)
+}
+
+// xfsSupported checks if xfs is supported, returns nil if it is, otherwise an error
+func xfsSupported() error {
+ // Make sure mkfs.xfs is available
+ if _, err := exec.LookPath("mkfs.xfs"); err != nil {
+ return err // error text is descriptive enough
+ }
+
+ // Check if kernel supports xfs filesystem or not.
+ exec.Command("modprobe", "xfs").Run()
+
+ f, err := os.Open("/proc/filesystems")
+ if err != nil {
+ return errors.Wrapf(err, "error checking for xfs support")
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ if strings.HasSuffix(s.Text(), "\txfs") {
+ return nil
+ }
+ }
+
+ if err := s.Err(); err != nil {
+ return errors.Wrapf(err, "error checking for xfs support")
+ }
+
+ return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`)
+}
+
+func determineDefaultFS() string {
+ err := xfsSupported()
+ if err == nil {
+ return "xfs"
+ }
+
+ logrus.Warnf("devmapper: XFS is not supported in your system (%v). Defaulting to ext4 filesystem", err)
+ return "ext4"
+}
+
+// mkfsOptions tries to figure out whether some additional mkfs options are required
+func mkfsOptions(fs string) []string {
+ if fs == "xfs" && !kernel.CheckKernelVersion(3, 16, 0) {
+ // For kernels earlier than 3.16 (and newer xfsutils),
+ // some xfs features need to be explicitly disabled.
+ return []string{"-m", "crc=0,finobt=0"}
+ }
+
+ return []string{}
+}
+
+func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) {
+ devname := info.DevName()
+
+ if devices.filesystem == "" {
+ devices.filesystem = determineDefaultFS()
+ }
+ if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil {
+ return err
+ }
+
+ args := mkfsOptions(devices.filesystem)
+ args = append(args, devices.mkfsArgs...)
+ args = append(args, devname)
+
+ logrus.Infof("devmapper: Creating filesystem %s on device %s, mkfs args: %v", devices.filesystem, info.Name(), args)
+ defer func() {
+ if err != nil {
+ logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err)
+ } else {
+ logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name())
+ }
+ }()
+
+ switch devices.filesystem {
+ case "xfs":
+ err = exec.Command("mkfs.xfs", args...).Run()
+ case "ext4":
+ err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run()
+ if err != nil {
+ err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run()
+ }
+ if err != nil {
+ return err
+ }
+ err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run()
+ default:
+ err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem)
+ }
+ return
+}
+
+func (devices *DeviceSet) migrateOldMetaData() error {
+ // Migrate old metadata file
+ jsonData, err := ioutil.ReadFile(devices.oldMetadataFile())
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+
+ if jsonData != nil {
+ m := metaData{Devices: make(map[string]*devInfo)}
+
+ if err := json.Unmarshal(jsonData, &m); err != nil {
+ return err
+ }
+
+ for hash, info := range m.Devices {
+ info.Hash = hash
+ devices.saveMetadata(info)
+ }
+ if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil {
+ return err
+ }
+
+ }
+
+ return nil
+}
+
+// Cleanup deleted devices. It assumes that all the devices have been
+// loaded in the hash table.
+func (devices *DeviceSet) cleanupDeletedDevices() error {
+ devices.Lock()
+
+ // If there are no deleted devices, there is nothing to do.
+ if devices.nrDeletedDevices == 0 {
+ devices.Unlock()
+ return nil
+ }
+
+ var deletedDevices []*devInfo
+
+ for _, info := range devices.Devices {
+ if !info.Deleted {
+ continue
+ }
+ logrus.Debugf("devmapper: Found deleted device %s.", info.Hash)
+ deletedDevices = append(deletedDevices, info)
+ }
+
+ // Delete the deleted devices. DeleteDevice() first takes the info lock
+ // and then devices.Lock(). So drop it to avoid deadlock.
+ devices.Unlock()
+
+ for _, info := range deletedDevices {
+ // This will again try deferred deletion.
+ if err := devices.DeleteDevice(info.Hash, false); err != nil {
+ logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err)
+ }
+ }
+
+ return nil
+}
+
+func (devices *DeviceSet) countDeletedDevices() {
+ for _, info := range devices.Devices {
+ if !info.Deleted {
+ continue
+ }
+ devices.nrDeletedDevices++
+ }
+}
+
+func (devices *DeviceSet) startDeviceDeletionWorker() {
+ // Deferred deletion is not enabled. Don't do anything.
+ if !devices.deferredDelete {
+ return
+ }
+
+ logrus.Debug("devmapper: Worker to cleanup deleted devices started")
+ for range devices.deletionWorkerTicker.C {
+ devices.cleanupDeletedDevices()
+ }
+}
+
+func (devices *DeviceSet) initMetaData() error {
+ devices.Lock()
+ defer devices.Unlock()
+
+ if err := devices.migrateOldMetaData(); err != nil {
+ return err
+ }
+
+ _, transactionID, _, _, _, _, err := devices.poolStatus()
+ if err != nil {
+ return err
+ }
+
+ devices.TransactionID = transactionID
+
+ if err := devices.loadDeviceFilesOnStart(); err != nil {
+ return fmt.Errorf("devmapper: Failed to load device files:%v", err)
+ }
+
+ devices.constructDeviceIDMap()
+ devices.countDeletedDevices()
+
+ if err := devices.processPendingTransaction(); err != nil {
+ return err
+ }
+
+ // Start a goroutine to cleanup Deleted Devices
+ go devices.startDeviceDeletionWorker()
+ return nil
+}
+
+func (devices *DeviceSet) incNextDeviceID() {
+ // IDs are 24bit, so wrap around
+ devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID
+}
+
+func (devices *DeviceSet) getNextFreeDeviceID() (int, error) {
+ devices.incNextDeviceID()
+ for i := 0; i <= maxDeviceID; i++ {
+ if devices.isDeviceIDFree(devices.NextDeviceID) {
+ devices.markDeviceIDUsed(devices.NextDeviceID)
+ return devices.NextDeviceID, nil
+ }
+ devices.incNextDeviceID()
+ }
+
+ return 0, fmt.Errorf("devmapper: Unable to find a free device ID")
+}
+
+func (devices *DeviceSet) poolHasFreeSpace() error {
+ if devices.minFreeSpacePercent == 0 {
+ return nil
+ }
+
+ _, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus()
+ if err != nil {
+ return err
+ }
+
+ minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100
+ if minFreeData < 1 {
+ minFreeData = 1
+ }
+ dataFree := dataTotal - dataUsed
+ if dataFree < minFreeData {
+ return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData)
+ }
+
+ minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100
+ if minFreeMetadata < 1 {
+ minFreeMetadata = 1
+ }
+
+ metadataFree := metadataTotal - metadataUsed
+ if metadataFree < minFreeMetadata {
+ return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata)
+ }
+
+ return nil
+}
+
+func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) {
+ devices.Lock()
+ defer devices.Unlock()
+
+ deviceID, err := devices.getNextFreeDeviceID()
+ if err != nil {
+ return nil, err
+ }
+
+ if err := devices.openTransaction(hash, deviceID); err != nil {
+ logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID)
+ devices.markDeviceIDFree(deviceID)
+ return nil, err
+ }
+
+ for {
+ if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil {
+ if devicemapper.DeviceIDExists(err) {
+ // Device ID already exists. This should not
+ // happen. Now we have a mechanism to find
+ // a free device ID. So something is not right.
+ // Give a warning and continue.
+ logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID)
+ deviceID, err = devices.getNextFreeDeviceID()
+ if err != nil {
+ return nil, err
+ }
+ // Save new device id into transaction
+ devices.refreshTransaction(deviceID)
+ continue
+ }
+ logrus.Debugf("devmapper: Error creating device: %s", err)
+ devices.markDeviceIDFree(deviceID)
+ return nil, err
+ }
+ break
+ }
+
+ logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize)
+ info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID)
+ if err != nil {
+ _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID)
+ devices.markDeviceIDFree(deviceID)
+ return nil, err
+ }
+
+ if err := devices.closeTransaction(); err != nil {
+ devices.unregisterDevice(hash)
+ devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID)
+ devices.markDeviceIDFree(deviceID)
+ return nil, err
+ }
+ return info, nil
+}
+
+func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error {
+ var (
+ devinfo *devicemapper.Info
+ err error
+ )
+
+ if err = devices.poolHasFreeSpace(); err != nil {
+ return err
+ }
+
+ if devices.deferredRemove {
+ devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name())
+ if err != nil {
+ return err
+ }
+ if devinfo != nil && devinfo.DeferredRemove != 0 {
+ err = devices.cancelDeferredRemoval(baseInfo)
+ if err != nil {
+ // If Error is ErrEnxio. Device is probably already gone. Continue.
+ if errors.Cause(err) != devicemapper.ErrEnxio {
+ return err
+ }
+ devinfo = nil
+ } else {
+ defer devices.deactivateDevice(baseInfo)
+ }
+ }
+ } else {
+ devinfo, err = devicemapper.GetInfo(baseInfo.Name())
+ if err != nil {
+ return err
+ }
+ }
+
+ doSuspend := devinfo != nil && devinfo.Exists != 0
+
+ if doSuspend {
+ if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil {
+ return err
+ }
+ defer devicemapper.ResumeDevice(baseInfo.Name())
+ }
+
+ if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error {
+ deviceID, err := devices.getNextFreeDeviceID()
+ if err != nil {
+ return err
+ }
+
+ if err := devices.openTransaction(hash, deviceID); err != nil {
+ logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID)
+ devices.markDeviceIDFree(deviceID)
+ return err
+ }
+
+ for {
+ if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil {
+ if devicemapper.DeviceIDExists(err) {
+ // Device ID already exists. This should not
+ // happen. Now we have a mechanism to find
+ // a free device ID. So something is not right.
+ // Give a warning and continue.
+ logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID)
+ deviceID, err = devices.getNextFreeDeviceID()
+ if err != nil {
+ return err
+ }
+ // Save new device id into transaction
+ devices.refreshTransaction(deviceID)
+ continue
+ }
+ logrus.Debugf("devmapper: Error creating snap device: %s", err)
+ devices.markDeviceIDFree(deviceID)
+ return err
+ }
+ break
+ }
+
+ if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil {
+ devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID)
+ devices.markDeviceIDFree(deviceID)
+ logrus.Debugf("devmapper: Error registering device: %s", err)
+ return err
+ }
+
+ if err := devices.closeTransaction(); err != nil {
+ devices.unregisterDevice(hash)
+ devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID)
+ devices.markDeviceIDFree(deviceID)
+ return err
+ }
+ return nil
+}
+
+func (devices *DeviceSet) loadMetadata(hash string) *devInfo {
+ info := &devInfo{Hash: hash, devices: devices}
+
+ jsonData, err := ioutil.ReadFile(devices.metadataFile(info))
+ if err != nil {
+ logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err)
+ return nil
+ }
+
+ if err := json.Unmarshal(jsonData, &info); err != nil {
+ logrus.Debugf("devmapper: Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err)
+ return nil
+ }
+
+ if info.DeviceID > maxDeviceID {
+ logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID)
+ return nil
+ }
+
+ return info
+}
+
+func getDeviceUUID(device string) (string, error) {
+ out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output()
+ if err != nil {
+ return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err)
+ }
+
+ uuid := strings.TrimSuffix(string(out), "\n")
+ uuid = strings.TrimSpace(uuid)
+ logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid)
+ return uuid, nil
+}
+
+func (devices *DeviceSet) getBaseDeviceSize() uint64 {
+ info, _ := devices.lookupDevice("")
+ if info == nil {
+ return 0
+ }
+ return info.Size
+}
+
+func (devices *DeviceSet) getBaseDeviceFS() string {
+ return devices.BaseDeviceFilesystem
+}
+
+func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error {
+ devices.Lock()
+ defer devices.Unlock()
+
+ if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil {
+ return err
+ }
+ defer devices.deactivateDevice(baseInfo)
+
+ uuid, err := getDeviceUUID(baseInfo.DevName())
+ if err != nil {
+ return err
+ }
+
+ if devices.BaseDeviceUUID != uuid {
+ return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID)
+ }
+
+ if devices.BaseDeviceFilesystem == "" {
+ fsType, err := ProbeFsType(baseInfo.DevName())
+ if err != nil {
+ return err
+ }
+ if err := devices.saveBaseDeviceFilesystem(fsType); err != nil {
+ return err
+ }
+ }
+
+ // If user specified a filesystem using dm.fs option and current
+ // file system of base image is not same, warn user that dm.fs
+ // will be ignored.
+ if devices.BaseDeviceFilesystem != devices.filesystem {
+ logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem)
+ devices.filesystem = devices.BaseDeviceFilesystem
+ }
+ return nil
+}
+
+func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error {
+ devices.BaseDeviceFilesystem = fs
+ return devices.saveDeviceSetMetaData()
+}
+
+func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error {
+ devices.Lock()
+ defer devices.Unlock()
+
+ if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil {
+ return err
+ }
+ defer devices.deactivateDevice(baseInfo)
+
+ uuid, err := getDeviceUUID(baseInfo.DevName())
+ if err != nil {
+ return err
+ }
+
+ devices.BaseDeviceUUID = uuid
+ return devices.saveDeviceSetMetaData()
+}
+
+func (devices *DeviceSet) createBaseImage() error {
+ logrus.Debug("devmapper: Initializing base device-mapper thin volume")
+
+ // Create initial device
+ info, err := devices.createRegisterDevice("")
+ if err != nil {
+ return err
+ }
+
+ logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume")
+
+ if err := devices.activateDeviceIfNeeded(info, false); err != nil {
+ return err
+ }
+
+ if err := devices.createFilesystem(info); err != nil {
+ return err
+ }
+
+ info.Initialized = true
+ if err := devices.saveMetadata(info); err != nil {
+ info.Initialized = false
+ return err
+ }
+
+ if err := devices.saveBaseDeviceUUID(info); err != nil {
+ return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err)
+ }
+
+ return nil
+}
+
+// Returns if thin pool device exists or not. If device exists, also makes
+// sure it is a thin pool device and not some other type of device.
+func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) {
+ logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice)
+
+ info, err := devicemapper.GetInfo(thinPoolDevice)
+ if err != nil {
+ return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err)
+ }
+
+ // Device does not exist.
+ if info.Exists == 0 {
+ return false, nil
+ }
+
+ _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice)
+ if err != nil {
+ return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err)
+ }
+
+ if deviceType != "thin-pool" {
+ return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice)
+ }
+
+ return true, nil
+}
+
+func (devices *DeviceSet) checkThinPool() error {
+ _, transactionID, dataUsed, _, _, _, err := devices.poolStatus()
+ if err != nil {
+ return err
+ }
+ if dataUsed != 0 {
+ return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks",
+ devices.thinPoolDevice)
+ }
+ if transactionID != 0 {
+ return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID",
+ devices.thinPoolDevice)
+ }
+ return nil
+}
+
+// Base image is initialized properly. Either save UUID for first time (for
+// upgrade case or verify UUID.
+func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error {
+ // If BaseDeviceUUID is nil (upgrade case), save it and return success.
+ if devices.BaseDeviceUUID == "" {
+ if err := devices.saveBaseDeviceUUID(baseInfo); err != nil {
+ return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err)
+ }
+ return nil
+ }
+
+ if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil {
+ return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err)
+ }
+
+ return nil
+}
+
+func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error {
+
+ if !userBaseSize {
+ return nil
+ }
+
+ if devices.baseFsSize < devices.getBaseDeviceSize() {
+ return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize())))
+ }
+
+ if devices.baseFsSize == devices.getBaseDeviceSize() {
+ return nil
+ }
+
+ info.lock.Lock()
+ defer info.lock.Unlock()
+
+ devices.Lock()
+ defer devices.Unlock()
+
+ info.Size = devices.baseFsSize
+
+ if err := devices.saveMetadata(info); err != nil {
+ // Try to remove unused device
+ delete(devices.Devices, info.Hash)
+ return err
+ }
+
+ return devices.growFS(info)
+}
+
+func (devices *DeviceSet) growFS(info *devInfo) error {
+ if err := devices.activateDeviceIfNeeded(info, false); err != nil {
+ return fmt.Errorf("Error activating devmapper device: %s", err)
+ }
+
+ defer devices.deactivateDevice(info)
+
+ fsMountPoint := "/run/containers/storage/mnt"
+ if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) {
+ if err := os.MkdirAll(fsMountPoint, 0700); err != nil {
+ return err
+ }
+ defer os.RemoveAll(fsMountPoint)
+ }
+
+ options := ""
+ if devices.BaseDeviceFilesystem == "xfs" {
+ // XFS needs nouuid or it can't mount filesystems with the same fs
+ options = joinMountOptions(options, "nouuid")
+ }
+ options = joinMountOptions(options, devices.mountOptions)
+
+ if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil {
+ return fmt.Errorf("Error mounting '%s' on '%s': %s\n%v", info.DevName(), fsMountPoint, err, string(dmesg.Dmesg(256)))
+ }
+
+ defer unix.Unmount(fsMountPoint, unix.MNT_DETACH)
+
+ switch devices.BaseDeviceFilesystem {
+ case "ext4":
+ if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil {
+ return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out))
+ }
+ case "xfs":
+ if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil {
+ return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out))
+ }
+ default:
+ return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem)
+ }
+ return nil
+}
+
+func (devices *DeviceSet) setupBaseImage() error {
+ oldInfo, _ := devices.lookupDeviceWithLock("")
+
+ // base image already exists. If it is initialized properly, do UUID
+ // verification and return. Otherwise remove image and set it up
+ // fresh.
+
+ if oldInfo != nil {
+ if oldInfo.Initialized && !oldInfo.Deleted {
+ if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil {
+ return err
+ }
+
+ if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil {
+ return err
+ }
+
+ return nil
+ }
+
+ logrus.Debug("devmapper: Removing uninitialized base image")
+ // If previous base device is in deferred delete state,
+ // that needs to be cleaned up first. So don't try
+ // deferred deletion.
+ if err := devices.DeleteDevice("", true); err != nil {
+ return err
+ }
+ }
+
+ // If we are setting up base image for the first time, make sure
+ // thin pool is empty.
+ if devices.thinPoolDevice != "" && oldInfo == nil {
+ if err := devices.checkThinPool(); err != nil {
+ return err
+ }
+ }
+
+ // Create new base image device
+ if err := devices.createBaseImage(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func setCloseOnExec(name string) {
+ fileInfos, _ := ioutil.ReadDir("/proc/self/fd")
+ for _, i := range fileInfos {
+ link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name()))
+ if link == name {
+ fd, err := strconv.Atoi(i.Name())
+ if err == nil {
+ unix.CloseOnExec(fd)
+ }
+ }
+ }
+}
+
+func major(device uint64) uint64 {
+ return (device >> 8) & 0xfff
+}
+
+func minor(device uint64) uint64 {
+ return (device & 0xff) | ((device >> 12) & 0xfff00)
+}
+
+// ResizePool increases the size of the pool.
+func (devices *DeviceSet) ResizePool(size int64) error {
+ dirname := devices.loopbackDir()
+ datafilename := path.Join(dirname, "data")
+ if len(devices.dataDevice) > 0 {
+ datafilename = devices.dataDevice
+ }
+ metadatafilename := path.Join(dirname, "metadata")
+ if len(devices.metadataDevice) > 0 {
+ metadatafilename = devices.metadataDevice
+ }
+
+ datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0)
+ if datafile == nil {
+ return err
+ }
+ defer datafile.Close()
+
+ fi, err := datafile.Stat()
+ if fi == nil {
+ return err
+ }
+
+ if fi.Size() > size {
+ return fmt.Errorf("devmapper: Can't shrink file")
+ }
+
+ dataloopback := loopback.FindLoopDeviceFor(datafile)
+ if dataloopback == nil {
+ return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename)
+ }
+ defer dataloopback.Close()
+
+ metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0)
+ if metadatafile == nil {
+ return err
+ }
+ defer metadatafile.Close()
+
+ metadataloopback := loopback.FindLoopDeviceFor(metadatafile)
+ if metadataloopback == nil {
+ return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename)
+ }
+ defer metadataloopback.Close()
+
+ // Grow loopback file
+ if err := datafile.Truncate(size); err != nil {
+ return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err)
+ }
+
+ // Reload size for loopback device
+ if err := loopback.SetCapacity(dataloopback); err != nil {
+ return fmt.Errorf("Unable to update loopback capacity: %s", err)
+ }
+
+ // Suspend the pool
+ if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil {
+ return fmt.Errorf("devmapper: Unable to suspend pool: %s", err)
+ }
+
+ // Reload with the new block sizes
+ if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil {
+ return fmt.Errorf("devmapper: Unable to reload pool: %s", err)
+ }
+
+ // Resume the pool
+ if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil {
+ return fmt.Errorf("devmapper: Unable to resume pool: %s", err)
+ }
+
+ return nil
+}
+
+func (devices *DeviceSet) loadTransactionMetaData() error {
+ jsonData, err := ioutil.ReadFile(devices.transactionMetaFile())
+ if err != nil {
+ // There is no active transaction. This will be the case
+ // during upgrade.
+ if os.IsNotExist(err) {
+ devices.OpenTransactionID = devices.TransactionID
+ return nil
+ }
+ return err
+ }
+
+ json.Unmarshal(jsonData, &devices.transaction)
+ return nil
+}
+
+func (devices *DeviceSet) saveTransactionMetaData() error {
+ jsonData, err := json.Marshal(&devices.transaction)
+ if err != nil {
+ return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err)
+ }
+
+ return devices.writeMetaFile(jsonData, devices.transactionMetaFile())
+}
+
+func (devices *DeviceSet) removeTransactionMetaData() error {
+ return os.RemoveAll(devices.transactionMetaFile())
+}
+
+func (devices *DeviceSet) rollbackTransaction() error {
+ logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID)
+
+ // A device id might have already been deleted before transaction
+ // closed. In that case this call will fail. Just leave a message
+ // in case of failure.
+ if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil {
+ logrus.Errorf("devmapper: Unable to delete device: %s", err)
+ }
+
+ dinfo := &devInfo{Hash: devices.DeviceIDHash}
+ if err := devices.removeMetadata(dinfo); err != nil {
+ logrus.Errorf("devmapper: Unable to remove metadata: %s", err)
+ } else {
+ devices.markDeviceIDFree(devices.DeviceID)
+ }
+
+ if err := devices.removeTransactionMetaData(); err != nil {
+ logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err)
+ }
+
+ return nil
+}
+
+func (devices *DeviceSet) processPendingTransaction() error {
+ if err := devices.loadTransactionMetaData(); err != nil {
+ return err
+ }
+
+ // If there was open transaction but pool transaction ID is same
+ // as open transaction ID, nothing to roll back.
+ if devices.TransactionID == devices.OpenTransactionID {
+ return nil
+ }
+
+ // If open transaction ID is less than pool transaction ID, something
+ // is wrong. Bail out.
+ if devices.OpenTransactionID < devices.TransactionID {
+ logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID)
+ return nil
+ }
+
+ // Pool transaction ID is not same as open transaction. There is
+ // a transaction which was not completed.
+ if err := devices.rollbackTransaction(); err != nil {
+ return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err)
+ }
+
+ devices.OpenTransactionID = devices.TransactionID
+ return nil
+}
+
+func (devices *DeviceSet) loadDeviceSetMetaData() error {
+ jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile())
+ if err != nil {
+ // For backward compatibility return success if file does
+ // not exist.
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+
+ return json.Unmarshal(jsonData, devices)
+}
+
+func (devices *DeviceSet) saveDeviceSetMetaData() error {
+ jsonData, err := json.Marshal(devices)
+ if err != nil {
+ return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err)
+ }
+
+ return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile())
+}
+
+func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error {
+ devices.allocateTransactionID()
+ devices.DeviceIDHash = hash
+ devices.DeviceID = DeviceID
+ if err := devices.saveTransactionMetaData(); err != nil {
+ return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err)
+ }
+ return nil
+}
+
+func (devices *DeviceSet) refreshTransaction(DeviceID int) error {
+ devices.DeviceID = DeviceID
+ if err := devices.saveTransactionMetaData(); err != nil {
+ return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err)
+ }
+ return nil
+}
+
+func (devices *DeviceSet) closeTransaction() error {
+ if err := devices.updatePoolTransactionID(); err != nil {
+ logrus.Debug("devmapper: Failed to close Transaction")
+ return err
+ }
+ return nil
+}
+
+func determineDriverCapabilities(version string) error {
+ // Kernel driver version >= 4.27.0 support deferred removal
+
+ logrus.Debugf("devicemapper: kernel dm driver version is %s", version)
+
+ versionSplit := strings.Split(version, ".")
+ major, err := strconv.Atoi(versionSplit[0])
+ if err != nil {
+ return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver major version %q as a number", versionSplit[0])
+ }
+
+ if major > 4 {
+ driverDeferredRemovalSupport = true
+ return nil
+ }
+
+ if major < 4 {
+ return nil
+ }
+
+ minor, err := strconv.Atoi(versionSplit[1])
+ if err != nil {
+ return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver minor version %q as a number", versionSplit[1])
+ }
+
+ /*
+ * If major is 4 and minor is 27, then there is no need to
+ * check for patch level as it can not be less than 0.
+ */
+ if minor >= 27 {
+ driverDeferredRemovalSupport = true
+ return nil
+ }
+
+ return nil
+}
+
+// Determine the major and minor number of loopback device
+func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) {
+ var stat unix.Stat_t
+ err := unix.Stat(file.Name(), &stat)
+ if err != nil {
+ return 0, 0, err
+ }
+
+ dev := stat.Rdev
+ majorNum := major(dev)
+ minorNum := minor(dev)
+
+ logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum)
+ return majorNum, minorNum, nil
+}
+
+// Given a file which is backing file of a loop back device, find the
+// loopback device name and its major/minor number.
+func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) {
+ file, err := os.Open(filename)
+ if err != nil {
+ logrus.Debugf("devmapper: Failed to open file %s", filename)
+ return "", 0, 0, err
+ }
+
+ defer file.Close()
+ loopbackDevice := loopback.FindLoopDeviceFor(file)
+ if loopbackDevice == nil {
+ return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename)
+ }
+ defer loopbackDevice.Close()
+
+ Major, Minor, err := getDeviceMajorMinor(loopbackDevice)
+ if err != nil {
+ return "", 0, 0, err
+ }
+ return loopbackDevice.Name(), Major, Minor, nil
+}
+
+// Get the major/minor numbers of thin pool data and metadata devices
+func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) {
+ var params, poolDataMajMin, poolMetadataMajMin string
+
+ _, _, _, params, err := devicemapper.GetTable(devices.getPoolName())
+ if err != nil {
+ return 0, 0, 0, 0, err
+ }
+
+ if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil {
+ return 0, 0, 0, 0, err
+ }
+
+ logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin)
+
+ poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":")
+ poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32)
+ if err != nil {
+ return 0, 0, 0, 0, err
+ }
+
+ poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32)
+ if err != nil {
+ return 0, 0, 0, 0, err
+ }
+
+ poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":")
+ poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32)
+ if err != nil {
+ return 0, 0, 0, 0, err
+ }
+
+ poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32)
+ if err != nil {
+ return 0, 0, 0, 0, err
+ }
+
+ return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil
+}
+
+func (devices *DeviceSet) loadThinPoolLoopBackInfo() error {
+ poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin()
+ if err != nil {
+ return err
+ }
+
+ dirname := devices.loopbackDir()
+
+ // data device has not been passed in. So there should be a data file
+ // which is being mounted as loop device.
+ if devices.dataDevice == "" {
+ datafilename := path.Join(dirname, "data")
+ dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename)
+ if err != nil {
+ return err
+ }
+
+ // Compare the two
+ if poolDataMajor == dataMajor && poolDataMinor == dataMinor {
+ devices.dataDevice = dataLoopDevice
+ devices.dataLoopFile = datafilename
+ }
+
+ }
+
+ // metadata device has not been passed in. So there should be a
+ // metadata file which is being mounted as loop device.
+ if devices.metadataDevice == "" {
+ metadatafilename := path.Join(dirname, "metadata")
+ metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename)
+ if err != nil {
+ return err
+ }
+ if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor {
+ devices.metadataDevice = metadataLoopDevice
+ devices.metadataLoopFile = metadatafilename
+ }
+ }
+
+ return nil
+}
+
+func (devices *DeviceSet) enableDeferredRemovalDeletion() error {
+
+ // If user asked for deferred removal then check both libdm library
+ // and kernel driver support deferred removal otherwise error out.
+ if enableDeferredRemoval {
+ if !driverDeferredRemovalSupport {
+ return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it")
+ }
+ if !devicemapper.LibraryDeferredRemovalSupport {
+ return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it")
+ }
+ logrus.Debug("devmapper: Deferred removal support enabled.")
+ devices.deferredRemove = true
+ }
+
+ if enableDeferredDeletion {
+ if !devices.deferredRemove {
+ return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter")
+ }
+ logrus.Debug("devmapper: Deferred deletion support enabled.")
+ devices.deferredDelete = true
+ }
+ return nil
+}
+
+func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
+ if err := devices.enableDeferredRemovalDeletion(); err != nil {
+ return err
+ }
+
+ // https://github.com/docker/docker/issues/4036
+ if supported := devicemapper.UdevSetSyncSupport(true); !supported {
+ logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options")
+
+ if !devices.overrideUdevSyncCheck {
+ return graphdriver.ErrNotSupported
+ }
+ }
+
+ //create the root dir of the devmapper driver ownership to match this
+ //daemon's remapped root uid/gid so containers can start properly
+ uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps)
+ if err != nil {
+ return err
+ }
+ if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil && !os.IsExist(err) {
+ return err
+ }
+ if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) {
+ return err
+ }
+
+ prevSetupConfig, err := readLVMConfig(devices.root)
+ if err != nil {
+ return err
+ }
+
+ if !reflect.DeepEqual(devices.lvmSetupConfig, directLVMConfig{}) {
+ if devices.thinPoolDevice != "" {
+ return errors.New("cannot setup direct-lvm when `dm.thinpooldev` is also specified")
+ }
+
+ if !reflect.DeepEqual(prevSetupConfig, devices.lvmSetupConfig) {
+ if !reflect.DeepEqual(prevSetupConfig, directLVMConfig{}) {
+ return errors.New("changing direct-lvm config is not supported")
+ }
+ logrus.WithField("storage-driver", "devicemapper").WithField("direct-lvm-config", devices.lvmSetupConfig).Debugf("Setting up direct lvm mode")
+ if err := verifyBlockDevice(devices.lvmSetupConfig.Device, lvmSetupConfigForce); err != nil {
+ return err
+ }
+ if err := setupDirectLVM(devices.lvmSetupConfig); err != nil {
+ return err
+ }
+ if err := writeLVMConfig(devices.root, devices.lvmSetupConfig); err != nil {
+ return err
+ }
+ }
+ devices.thinPoolDevice = "storage-thinpool"
+ logrus.WithField("storage-driver", "devicemapper").Debugf("Setting dm.thinpooldev to %q", devices.thinPoolDevice)
+ }
+
+ // Set the device prefix from the device id and inode of the storage root dir
+ var st unix.Stat_t
+ if err := unix.Stat(devices.root, &st); err != nil {
+ return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err)
+ }
+ // "reg-" stands for "regular file".
+ // In the future we might use "dev-" for "device file", etc.
+ // container-maj,min[-inode] stands for:
+ // - Managed by container storage
+ // - The target of this device is at major <maj> and minor <min>
+ // - If <inode> is defined, use that file inside the device as a loopback image. Otherwise use the device itself.
+ devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(st.Dev), minor(st.Dev), st.Ino)
+ logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix)
+
+ // Check for the existence of the thin-pool device
+ poolExists, err := devices.thinPoolExists(devices.getPoolName())
+ if err != nil {
+ return err
+ }
+
+ // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files
+ // that are not Close-on-exec,
+ // so we add this badhack to make sure it closes itself
+ setCloseOnExec("/dev/mapper/control")
+
+ // Make sure the sparse images exist in <root>/devicemapper/data and
+ // <root>/devicemapper/metadata
+
+ createdLoopback := false
+
+ // If the pool doesn't exist, create it
+ if !poolExists && devices.thinPoolDevice == "" {
+ logrus.Debug("devmapper: Pool doesn't exist. Creating it.")
+
+ var (
+ dataFile *os.File
+ metadataFile *os.File
+ )
+
+ fsMagic, err := graphdriver.GetFSMagic(devices.loopbackDir())
+ if err != nil {
+ return err
+ }
+ switch fsMagic {
+ case graphdriver.FsMagicAufs:
+ return errors.Errorf("devmapper: Loopback devices can not be created on AUFS filesystems")
+ }
+
+ if devices.dataDevice == "" {
+ // Make sure the sparse images exist in <root>/devicemapper/data
+
+ hasData := devices.hasImage("data")
+
+ if !doInit && !hasData {
+ return errors.New("loopback data file not found")
+ }
+
+ if !hasData {
+ createdLoopback = true
+ }
+
+ data, err := devices.ensureImage("data", devices.dataLoopbackSize)
+ if err != nil {
+ logrus.Debugf("devmapper: Error device ensureImage (data): %s", err)
+ return err
+ }
+
+ dataFile, err = loopback.AttachLoopDevice(data)
+ if err != nil {
+ return err
+ }
+ devices.dataLoopFile = data
+ devices.dataDevice = dataFile.Name()
+ } else {
+ dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600)
+ if err != nil {
+ return err
+ }
+ }
+ defer dataFile.Close()
+
+ if devices.metadataDevice == "" {
+ // Make sure the sparse images exist in <root>/devicemapper/metadata
+
+ hasMetadata := devices.hasImage("metadata")
+
+ if !doInit && !hasMetadata {
+ return errors.New("loopback metadata file not found")
+ }
+
+ if !hasMetadata {
+ createdLoopback = true
+ }
+
+ metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize)
+ if err != nil {
+ logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err)
+ return err
+ }
+
+ metadataFile, err = loopback.AttachLoopDevice(metadata)
+ if err != nil {
+ return err
+ }
+ devices.metadataLoopFile = metadata
+ devices.metadataDevice = metadataFile.Name()
+ } else {
+ metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600)
+ if err != nil {
+ return err
+ }
+ }
+ defer metadataFile.Close()
+
+ if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil {
+ return err
+ }
+ defer func() {
+ if retErr != nil {
+ err = devices.deactivatePool()
+ if err != nil {
+ logrus.Warnf("devmapper: Failed to deactivatePool: %v", err)
+ }
+ }
+ }()
+ }
+
+ // Pool already exists and caller did not pass us a pool. That means
+ // we probably created pool earlier and could not remove it as some
+ // containers were still using it. Detect some of the properties of
+ // pool, like is it using loop devices.
+ if poolExists && devices.thinPoolDevice == "" {
+ if err := devices.loadThinPoolLoopBackInfo(); err != nil {
+ logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err)
+ return err
+ }
+ }
+
+ // If we didn't just create the data or metadata image, we need to
+ // load the transaction id and migrate old metadata
+ if !createdLoopback {
+ if err := devices.initMetaData(); err != nil {
+ return err
+ }
+ }
+
+ if devices.thinPoolDevice == "" {
+ if devices.metadataLoopFile != "" || devices.dataLoopFile != "" {
+ logrus.Warn("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev`.")
+ }
+ }
+
+ // Right now this loads only NextDeviceID. If there is more metadata
+ // down the line, we might have to move it earlier.
+ if err := devices.loadDeviceSetMetaData(); err != nil {
+ return err
+ }
+
+ // Setup the base image
+ if doInit {
+ if err := devices.setupBaseImage(); err != nil {
+ logrus.Debugf("devmapper: Error device setupBaseImage: %s", err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+// AddDevice adds a device and registers in the hash.
+func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error {
+ logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash)
+ defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash)
+
+ // If a deleted device exists, return error.
+ baseInfo, err := devices.lookupDeviceWithLock(baseHash)
+ if err != nil {
+ return err
+ }
+
+ if baseInfo.Deleted {
+ return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash)
+ }
+
+ baseInfo.lock.Lock()
+ defer baseInfo.lock.Unlock()
+
+ devices.Lock()
+ defer devices.Unlock()
+
+ // Also include deleted devices in case hash of new device is
+ // same as one of the deleted devices.
+ if info, _ := devices.lookupDevice(hash); info != nil {
+ return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted)
+ }
+
+ size, err := devices.parseStorageOpt(storageOpt)
+ if err != nil {
+ return err
+ }
+
+ if size == 0 {
+ size = baseInfo.Size
+ }
+
+ if size < baseInfo.Size {
+ return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size)))
+ }
+
+ if err := devices.takeSnapshot(hash, baseInfo, size); err != nil {
+ return err
+ }
+
+ // Grow the container rootfs.
+ if size > baseInfo.Size {
+ info, err := devices.lookupDevice(hash)
+ if err != nil {
+ return err
+ }
+
+ if err := devices.growFS(info); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) {
+
+ // Read size to change the block device size per container.
+ for key, val := range storageOpt {
+ key := strings.ToLower(key)
+ switch key {
+ case "size":
+ size, err := units.RAMInBytes(val)
+ if err != nil {
+ return 0, err
+ }
+ return uint64(size), nil
+ default:
+ return 0, fmt.Errorf("Unknown option %s", key)
+ }
+ }
+
+ return 0, nil
+}
+
+func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error {
+ // If device is already in deleted state, there is nothing to be done.
+ if info.Deleted {
+ return nil
+ }
+
+ logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash)
+
+ info.Deleted = true
+
+ // save device metadata to reflect deleted state.
+ if err := devices.saveMetadata(info); err != nil {
+ info.Deleted = false
+ return err
+ }
+
+ devices.nrDeletedDevices++
+ return nil
+}
+
+// Should be called with devices.Lock() held.
+func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error {
+ if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil {
+ logrus.Debugf("devmapper: Error opening transaction hash = %s deviceId = %d", "", info.DeviceID)
+ return err
+ }
+
+ defer devices.closeTransaction()
+
+ err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID)
+ if err != nil {
+ // If syncDelete is true, we want to return error. If deferred
+ // deletion is not enabled, we return an error. If error is
+ // something other then EBUSY, return an error.
+ if syncDelete || !devices.deferredDelete || errors.Cause(err) != devicemapper.ErrBusy {
+ logrus.Debugf("devmapper: Error deleting device: %s", err)
+ return err
+ }
+ }
+
+ if err == nil {
+ if err := devices.unregisterDevice(info.Hash); err != nil {
+ return err
+ }
+ // If device was already in deferred delete state that means
+ // deletion was being tried again later. Reduce the deleted
+ // device count.
+ if info.Deleted {
+ devices.nrDeletedDevices--
+ }
+ devices.markDeviceIDFree(info.DeviceID)
+ } else {
+ if err := devices.markForDeferredDeletion(info); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Issue discard only if device open count is zero.
+func (devices *DeviceSet) issueDiscard(info *devInfo) error {
+ logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash)
+ defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash)
+ // This is a workaround for the kernel not discarding block so
+ // on the thin pool when we remove a thinp device, so we do it
+ // manually.
+ // Even if device is deferred deleted, activate it and issue
+ // discards.
+ if err := devices.activateDeviceIfNeeded(info, true); err != nil {
+ return err
+ }
+
+ devinfo, err := devicemapper.GetInfo(info.Name())
+ if err != nil {
+ return err
+ }
+
+ if devinfo.OpenCount != 0 {
+ logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount)
+ return nil
+ }
+
+ if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil {
+ logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err)
+ }
+ return nil
+}
+
+// Should be called with devices.Lock() held.
+func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error {
+ if devices.doBlkDiscard {
+ devices.issueDiscard(info)
+ }
+
+ // Try to deactivate device in case it is active.
+ // If deferred removal is enabled and deferred deletion is disabled
+ // then make sure device is removed synchronously. There have been
+ // some cases of device being busy for short duration and we would
+ // rather busy wait for device removal to take care of these cases.
+ deferredRemove := devices.deferredRemove
+ if !devices.deferredDelete {
+ deferredRemove = false
+ }
+
+ if err := devices.deactivateDeviceMode(info, deferredRemove); err != nil {
+ logrus.Debugf("devmapper: Error deactivating device: %s", err)
+ return err
+ }
+
+ if err := devices.deleteTransaction(info, syncDelete); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// DeleteDevice will return success if device has been marked for deferred
+// removal. If one wants to override that and want DeleteDevice() to fail if
+// device was busy and could not be deleted, set syncDelete=true.
+func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error {
+ logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete)
+ defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete)
+ info, err := devices.lookupDeviceWithLock(hash)
+ if err != nil {
+ return err
+ }
+
+ info.lock.Lock()
+ defer info.lock.Unlock()
+
+ devices.Lock()
+ defer devices.Unlock()
+
+ return devices.deleteDevice(info, syncDelete)
+}
+
+func (devices *DeviceSet) deactivatePool() error {
+ logrus.Debug("devmapper: deactivatePool() START")
+ defer logrus.Debug("devmapper: deactivatePool() END")
+ devname := devices.getPoolDevName()
+
+ devinfo, err := devicemapper.GetInfo(devname)
+ if err != nil {
+ return err
+ }
+
+ if devinfo.Exists == 0 {
+ return nil
+ }
+ if err := devicemapper.RemoveDevice(devname); err != nil {
+ return err
+ }
+
+ if d, err := devicemapper.GetDeps(devname); err == nil {
+ logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count)
+ }
+
+ return nil
+}
+
+func (devices *DeviceSet) deactivateDevice(info *devInfo) error {
+ return devices.deactivateDeviceMode(info, devices.deferredRemove)
+}
+
+func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove bool) error {
+ var err error
+ logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash)
+ defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash)
+
+ devinfo, err := devicemapper.GetInfo(info.Name())
+ if err != nil {
+ return err
+ }
+
+ if devinfo.Exists == 0 {
+ return nil
+ }
+
+ if deferredRemove {
+ err = devicemapper.RemoveDeviceDeferred(info.Name())
+ } else {
+ err = devices.removeDevice(info.Name())
+ }
+
+ // This function's semantics is such that it does not return an
+ // error if device does not exist. So if device went away by
+ // the time we actually tried to remove it, do not return error.
+ if errors.Cause(err) != devicemapper.ErrEnxio {
+ return err
+ }
+ return nil
+}
+
+// Issues the underlying dm remove operation.
+func (devices *DeviceSet) removeDevice(devname string) error {
+ var err error
+
+ logrus.Debugf("devmapper: removeDevice START(%s)", devname)
+ defer logrus.Debugf("devmapper: removeDevice END(%s)", devname)
+
+ for i := 0; i < 200; i++ {
+ err = devicemapper.RemoveDevice(devname)
+ if err == nil {
+ break
+ }
+ if errors.Cause(err) != devicemapper.ErrBusy {
+ return err
+ }
+
+ // If we see EBUSY it may be a transient error,
+ // sleep a bit a retry a few times.
+ devices.Unlock()
+ time.Sleep(100 * time.Millisecond)
+ devices.Lock()
+ }
+
+ return err
+}
+
+func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error {
+ if !devices.deferredRemove {
+ return nil
+ }
+
+ logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name())
+ defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name())
+
+ devinfo, err := devicemapper.GetInfoWithDeferred(info.Name())
+ if err != nil {
+ return err
+ }
+
+ if devinfo != nil && devinfo.DeferredRemove == 0 {
+ return nil
+ }
+
+ // Cancel deferred remove
+ if err := devices.cancelDeferredRemoval(info); err != nil {
+ // If Error is ErrEnxio. Device is probably already gone. Continue.
+ if errors.Cause(err) != devicemapper.ErrBusy {
+ return err
+ }
+ }
+ return nil
+}
+
+func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error {
+ logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name())
+ defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name())
+
+ var err error
+
+ // Cancel deferred remove
+ for i := 0; i < 100; i++ {
+ err = devicemapper.CancelDeferredRemove(info.Name())
+ if err != nil {
+ if errors.Cause(err) != devicemapper.ErrBusy {
+ // If we see EBUSY it may be a transient error,
+ // sleep a bit a retry a few times.
+ devices.Unlock()
+ time.Sleep(100 * time.Millisecond)
+ devices.Lock()
+ continue
+ }
+ }
+ break
+ }
+ return err
+}
+
+// Shutdown shuts down the device by unmounting the root.
+func (devices *DeviceSet) Shutdown(home string) error {
+ logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix)
+ logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root)
+ defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix)
+
+ // Stop deletion worker. This should start delivering new events to
+ // ticker channel. That means no new instance of cleanupDeletedDevice()
+ // will run after this call. If one instance is already running at
+ // the time of the call, it must be holding devices.Lock() and
+ // we will block on this lock till cleanup function exits.
+ devices.deletionWorkerTicker.Stop()
+
+ devices.Lock()
+ // Save DeviceSet Metadata first. Docker kills all threads if they
+ // don't finish in certain time. It is possible that Shutdown()
+ // routine does not finish in time as we loop trying to deactivate
+ // some devices while these are busy. In that case shutdown() routine
+ // will be killed and we will not get a chance to save deviceset
+ // metadata. Hence save this early before trying to deactivate devices.
+ devices.saveDeviceSetMetaData()
+
+ // ignore the error since it's just a best effort to not try to unmount something that's mounted
+ mounts, _ := mount.GetMounts()
+ mounted := make(map[string]bool, len(mounts))
+ for _, mnt := range mounts {
+ mounted[mnt.Mountpoint] = true
+ }
+
+ if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if !info.IsDir() {
+ return nil
+ }
+
+ if mounted[p] {
+ // We use MNT_DETACH here in case it is still busy in some running
+ // container. This means it'll go away from the global scope directly,
+ // and the device will be released when that container dies.
+ if err := unix.Unmount(p, unix.MNT_DETACH); err != nil {
+ logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err)
+ }
+ }
+
+ if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil {
+ logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err)
+ } else {
+ if err := devices.deactivateDevice(devInfo); err != nil {
+ logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err)
+ }
+ }
+
+ return nil
+ }); err != nil && !os.IsNotExist(err) {
+ devices.Unlock()
+ return err
+ }
+
+ devices.Unlock()
+
+ info, _ := devices.lookupDeviceWithLock("")
+ if info != nil {
+ info.lock.Lock()
+ devices.Lock()
+ if err := devices.deactivateDevice(info); err != nil {
+ logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err)
+ }
+ devices.Unlock()
+ info.lock.Unlock()
+ }
+
+ devices.Lock()
+ if devices.thinPoolDevice == "" {
+ if err := devices.deactivatePool(); err != nil {
+ logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err)
+ }
+ }
+ devices.Unlock()
+
+ return nil
+}
+
+// Recent XFS changes allow changing behavior of filesystem in case of errors.
+// When thin pool gets full and XFS gets ENOSPC error, currently it tries
+// IO infinitely and sometimes it can block the container process
+// and process can't be killWith 0 value, XFS will not retry upon error
+// and instead will shutdown filesystem.
+
+func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error {
+ dmDevicePath, err := os.Readlink(info.DevName())
+ if err != nil {
+ return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err)
+ }
+
+ dmDeviceName := path.Base(dmDevicePath)
+ filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries"
+ maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0)
+ if err != nil {
+ return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err)
+ }
+ defer maxRetriesFile.Close()
+
+ // Set max retries to 0
+ _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries)
+ if err != nil {
+ return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err)
+ }
+ return nil
+}
+
+// MountDevice mounts the device if not already mounted.
+func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
+ info, err := devices.lookupDeviceWithLock(hash)
+ if err != nil {
+ return err
+ }
+
+ if info.Deleted {
+ return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash)
+ }
+
+ info.lock.Lock()
+ defer info.lock.Unlock()
+
+ devices.Lock()
+ defer devices.Unlock()
+
+ if err := devices.activateDeviceIfNeeded(info, false); err != nil {
+ return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err)
+ }
+
+ fstype, err := ProbeFsType(info.DevName())
+ if err != nil {
+ return err
+ }
+
+ options := ""
+
+ if fstype == "xfs" {
+ // XFS needs nouuid or it can't mount filesystems with the same fs
+ options = joinMountOptions(options, "nouuid")
+ }
+
+ options = joinMountOptions(options, devices.mountOptions)
+ options = joinMountOptions(options, label.FormatMountLabel("", mountLabel))
+
+ if err := mount.Mount(info.DevName(), path, fstype, options); err != nil {
+ return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s\n%v", info.DevName(), path, err, string(dmesg.Dmesg(256)))
+ }
+
+ if fstype == "xfs" && devices.xfsNospaceRetries != "" {
+ if err := devices.xfsSetNospaceRetries(info); err != nil {
+ unix.Unmount(path, unix.MNT_DETACH)
+ devices.deactivateDevice(info)
+ return err
+ }
+ }
+
+ return nil
+}
+
+// UnmountDevice unmounts the device and removes it from hash.
+func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
+ logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash)
+ defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash)
+
+ info, err := devices.lookupDeviceWithLock(hash)
+ if err != nil {
+ return err
+ }
+
+ info.lock.Lock()
+ defer info.lock.Unlock()
+
+ devices.Lock()
+ defer devices.Unlock()
+
+ logrus.Debugf("devmapper: Unmount(%s)", mountPath)
+ if err := unix.Unmount(mountPath, unix.MNT_DETACH); err != nil {
+ return err
+ }
+ logrus.Debug("devmapper: Unmount done")
+
+ return devices.deactivateDevice(info)
+}
+
+// HasDevice returns true if the device metadata exists.
+func (devices *DeviceSet) HasDevice(hash string) bool {
+ info, _ := devices.lookupDeviceWithLock(hash)
+ return info != nil
+}
+
+// List returns a list of device ids.
+func (devices *DeviceSet) List() []string {
+ devices.Lock()
+ defer devices.Unlock()
+
+ ids := make([]string, len(devices.Devices))
+ i := 0
+ for k := range devices.Devices {
+ ids[i] = k
+ i++
+ }
+ return ids
+}
+
+func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) {
+ var params string
+ _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName)
+ if err != nil {
+ return
+ }
+ if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil {
+ return
+ }
+ return
+}
+
+// GetDeviceStatus provides size, mapped sectors
+func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) {
+ info, err := devices.lookupDeviceWithLock(hash)
+ if err != nil {
+ return nil, err
+ }
+
+ info.lock.Lock()
+ defer info.lock.Unlock()
+
+ devices.Lock()
+ defer devices.Unlock()
+
+ status := &DevStatus{
+ DeviceID: info.DeviceID,
+ Size: info.Size,
+ TransactionID: info.TransactionID,
+ }
+
+ if err := devices.activateDeviceIfNeeded(info, false); err != nil {
+ return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err)
+ }
+
+ sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName())
+
+ if err != nil {
+ return nil, err
+ }
+
+ status.SizeInSectors = sizeInSectors
+ status.MappedSectors = mappedSectors
+ status.HighestMappedSector = highestMappedSector
+
+ return status, nil
+}
+
+func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) {
+ var params string
+ if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil {
+ _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal)
+ }
+ return
+}
+
+// DataDevicePath returns the path to the data storage for this deviceset,
+// regardless of loopback or block device
+func (devices *DeviceSet) DataDevicePath() string {
+ return devices.dataDevice
+}
+
+// MetadataDevicePath returns the path to the metadata storage for this deviceset,
+// regardless of loopback or block device
+func (devices *DeviceSet) MetadataDevicePath() string {
+ return devices.metadataDevice
+}
+
+func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) {
+ buf := new(unix.Statfs_t)
+ if err := unix.Statfs(loopFile, buf); err != nil {
+ logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err)
+ return 0, err
+ }
+ return buf.Bfree * uint64(buf.Bsize), nil
+}
+
+func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) {
+ if loopFile != "" {
+ fi, err := os.Stat(loopFile)
+ if err != nil {
+ logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err)
+ return false, err
+ }
+ return fi.Mode().IsRegular(), nil
+ }
+ return false, nil
+}
+
+// Status returns the current status of this deviceset
+func (devices *DeviceSet) Status() *Status {
+ devices.Lock()
+ defer devices.Unlock()
+
+ status := &Status{}
+
+ status.PoolName = devices.getPoolName()
+ status.DataFile = devices.DataDevicePath()
+ status.DataLoopback = devices.dataLoopFile
+ status.MetadataFile = devices.MetadataDevicePath()
+ status.MetadataLoopback = devices.metadataLoopFile
+ status.UdevSyncSupported = devicemapper.UdevSyncSupported()
+ status.DeferredRemoveEnabled = devices.deferredRemove
+ status.DeferredDeleteEnabled = devices.deferredDelete
+ status.DeferredDeletedDeviceCount = devices.nrDeletedDevices
+ status.BaseDeviceSize = devices.getBaseDeviceSize()
+ status.BaseDeviceFS = devices.getBaseDeviceFS()
+
+ totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus()
+ if err == nil {
+ // Convert from blocks to bytes
+ blockSizeInSectors := totalSizeInSectors / dataTotal
+
+ status.Data.Used = dataUsed * blockSizeInSectors * 512
+ status.Data.Total = dataTotal * blockSizeInSectors * 512
+ status.Data.Available = status.Data.Total - status.Data.Used
+
+ // metadata blocks are always 4k
+ status.Metadata.Used = metadataUsed * 4096
+ status.Metadata.Total = metadataTotal * 4096
+ status.Metadata.Available = status.Metadata.Total - status.Metadata.Used
+
+ status.SectorSize = blockSizeInSectors * 512
+
+ if check, _ := devices.isRealFile(devices.dataLoopFile); check {
+ actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile)
+ if err == nil && actualSpace < status.Data.Available {
+ status.Data.Available = actualSpace
+ }
+ }
+
+ if check, _ := devices.isRealFile(devices.metadataLoopFile); check {
+ actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile)
+ if err == nil && actualSpace < status.Metadata.Available {
+ status.Metadata.Available = actualSpace
+ }
+ }
+
+ minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100
+ status.MinFreeSpace = minFreeData * blockSizeInSectors * 512
+ }
+
+ return status
+}
+
+// Status returns the current status of this deviceset
+func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) {
+ info, err := devices.lookupDeviceWithLock(hash)
+ if err != nil {
+ return nil, err
+ }
+
+ info.lock.Lock()
+ defer info.lock.Unlock()
+
+ metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()}
+ return metadata, nil
+}
+
+// NewDeviceSet creates the device set based on the options provided.
+func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) {
+ devicemapper.SetDevDir("/dev")
+
+ devices := &DeviceSet{
+ root: root,
+ metaData: metaData{Devices: make(map[string]*devInfo)},
+ dataLoopbackSize: defaultDataLoopbackSize,
+ metaDataLoopbackSize: defaultMetaDataLoopbackSize,
+ baseFsSize: defaultBaseFsSize,
+ overrideUdevSyncCheck: defaultUdevSyncOverride,
+ doBlkDiscard: true,
+ thinpBlockSize: defaultThinpBlockSize,
+ deviceIDMap: make([]byte, deviceIDMapSz),
+ deletionWorkerTicker: time.NewTicker(time.Second * 30),
+ uidMaps: uidMaps,
+ gidMaps: gidMaps,
+ minFreeSpacePercent: defaultMinFreeSpacePercent,
+ }
+
+ version, err := devicemapper.GetDriverVersion()
+ if err != nil {
+ // Can't even get driver version, assume not supported
+ return nil, graphdriver.ErrNotSupported
+ }
+
+ if err := determineDriverCapabilities(version); err != nil {
+ return nil, graphdriver.ErrNotSupported
+ }
+
+ if driverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport {
+ // enable deferred stuff by default
+ enableDeferredDeletion = true
+ enableDeferredRemoval = true
+ }
+
+ foundBlkDiscard := false
+ var lvmSetupConfig directLVMConfig
+ for _, option := range options {
+ key, val, err := parsers.ParseKeyValueOpt(option)
+ if err != nil {
+ return nil, err
+ }
+ key = strings.ToLower(key)
+ switch key {
+ case "dm.basesize":
+ size, err := units.RAMInBytes(val)
+ if err != nil {
+ return nil, err
+ }
+ userBaseSize = true
+ devices.baseFsSize = uint64(size)
+ case "dm.loopdatasize":
+ size, err := units.RAMInBytes(val)
+ if err != nil {
+ return nil, err
+ }
+ devices.dataLoopbackSize = size
+ case "dm.loopmetadatasize":
+ size, err := units.RAMInBytes(val)
+ if err != nil {
+ return nil, err
+ }
+ devices.metaDataLoopbackSize = size
+ case "dm.fs":
+ if val != "ext4" && val != "xfs" {
+ return nil, fmt.Errorf("devmapper: Unsupported filesystem %s", val)
+ }
+ devices.filesystem = val
+ case "dm.mkfsarg":
+ devices.mkfsArgs = append(devices.mkfsArgs, val)
+ case "dm.mountopt":
+ devices.mountOptions = joinMountOptions(devices.mountOptions, val)
+ case "dm.metadatadev":
+ devices.metadataDevice = val
+ case "dm.datadev":
+ devices.dataDevice = val
+ case "dm.thinpooldev":
+ devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/")
+ case "dm.blkdiscard":
+ foundBlkDiscard = true
+ devices.doBlkDiscard, err = strconv.ParseBool(val)
+ if err != nil {
+ return nil, err
+ }
+ case "dm.blocksize":
+ size, err := units.RAMInBytes(val)
+ if err != nil {
+ return nil, err
+ }
+ // convert to 512b sectors
+ devices.thinpBlockSize = uint32(size) >> 9
+ case "dm.override_udev_sync_check":
+ devices.overrideUdevSyncCheck, err = strconv.ParseBool(val)
+ if err != nil {
+ return nil, err
+ }
+
+ case "dm.use_deferred_removal":
+ enableDeferredRemoval, err = strconv.ParseBool(val)
+ if err != nil {
+ return nil, err
+ }
+
+ case "dm.use_deferred_deletion":
+ enableDeferredDeletion, err = strconv.ParseBool(val)
+ if err != nil {
+ return nil, err
+ }
+
+ case "dm.min_free_space":
+ if !strings.HasSuffix(val, "%") {
+ return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix")
+ }
+
+ valstring := strings.TrimSuffix(val, "%")
+ minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ if minFreeSpacePercent >= 100 {
+ return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val)
+ }
+
+ devices.minFreeSpacePercent = uint32(minFreeSpacePercent)
+ case "dm.xfs_nospace_max_retries":
+ _, err := strconv.ParseUint(val, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ devices.xfsNospaceRetries = val
+ case "dm.directlvm_device":
+ lvmSetupConfig.Device = val
+ case "dm.directlvm_device_force":
+ lvmSetupConfigForce, err = strconv.ParseBool(val)
+ if err != nil {
+ return nil, err
+ }
+ case "dm.thinp_percent":
+ per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val)
+ }
+ if per >= 100 {
+ return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100")
+ }
+ lvmSetupConfig.ThinpPercent = per
+ case "dm.thinp_metapercent":
+ per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val)
+ }
+ if per >= 100 {
+ return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100")
+ }
+ lvmSetupConfig.ThinpMetaPercent = per
+ case "dm.thinp_autoextend_percent":
+ per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val)
+ }
+ if per > 100 {
+ return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100")
+ }
+ lvmSetupConfig.AutoExtendPercent = per
+ case "dm.thinp_autoextend_threshold":
+ per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val)
+ }
+ if per > 100 {
+ return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100")
+ }
+ lvmSetupConfig.AutoExtendThreshold = per
+ case "dm.libdm_log_level":
+ level, err := strconv.ParseInt(val, 10, 32)
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not parse `dm.libdm_log_level=%s`", val)
+ }
+ if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug {
+ return nil, errors.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug)
+ }
+ // Register a new logging callback with the specified level.
+ devicemapper.LogInit(devicemapper.DefaultLogger{
+ Level: int(level),
+ })
+ default:
+ return nil, fmt.Errorf("devmapper: Unknown option %s", key)
+ }
+ }
+
+ if err := validateLVMConfig(lvmSetupConfig); err != nil {
+ return nil, err
+ }
+
+ devices.lvmSetupConfig = lvmSetupConfig
+
+ // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive
+ if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") {
+ devices.doBlkDiscard = false
+ }
+
+ if err := devices.initDevmapper(doInit); err != nil {
+ return nil, err
+ }
+
+ return devices, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go
new file mode 100644
index 000000000..9ab3e4f86
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go
@@ -0,0 +1,106 @@
+package devmapper
+
+// Definition of struct dm_task and sub structures (from lvm2)
+//
+// struct dm_ioctl {
+// /*
+// * The version number is made up of three parts:
+// * major - no backward or forward compatibility,
+// * minor - only backwards compatible,
+// * patch - both backwards and forwards compatible.
+// *
+// * All clients of the ioctl interface should fill in the
+// * version number of the interface that they were
+// * compiled with.
+// *
+// * All recognized ioctl commands (ie. those that don't
+// * return -ENOTTY) fill out this field, even if the
+// * command failed.
+// */
+// uint32_t version[3]; /* in/out */
+// uint32_t data_size; /* total size of data passed in
+// * including this struct */
+
+// uint32_t data_start; /* offset to start of data
+// * relative to start of this struct */
+
+// uint32_t target_count; /* in/out */
+// int32_t open_count; /* out */
+// uint32_t flags; /* in/out */
+
+// /*
+// * event_nr holds either the event number (input and output) or the
+// * udev cookie value (input only).
+// * The DM_DEV_WAIT ioctl takes an event number as input.
+// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls
+// * use the field as a cookie to return in the DM_COOKIE
+// * variable with the uevents they issue.
+// * For output, the ioctls return the event number, not the cookie.
+// */
+// uint32_t event_nr; /* in/out */
+// uint32_t padding;
+
+// uint64_t dev; /* in/out */
+
+// char name[DM_NAME_LEN]; /* device name */
+// char uuid[DM_UUID_LEN]; /* unique identifier for
+// * the block device */
+// char data[7]; /* padding or data */
+// };
+
+// struct target {
+// uint64_t start;
+// uint64_t length;
+// char *type;
+// char *params;
+
+// struct target *next;
+// };
+
+// typedef enum {
+// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */
+// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */
+// } dm_add_node_t;
+
+// struct dm_task {
+// int type;
+// char *dev_name;
+// char *mangled_dev_name;
+
+// struct target *head, *tail;
+
+// int read_only;
+// uint32_t event_nr;
+// int major;
+// int minor;
+// int allow_default_major_fallback;
+// uid_t uid;
+// gid_t gid;
+// mode_t mode;
+// uint32_t read_ahead;
+// uint32_t read_ahead_flags;
+// union {
+// struct dm_ioctl *v4;
+// } dmi;
+// char *newname;
+// char *message;
+// char *geometry;
+// uint64_t sector;
+// int no_flush;
+// int no_open_count;
+// int skip_lockfs;
+// int query_inactive_table;
+// int suppress_identical_reload;
+// dm_add_node_t add_node;
+// uint64_t existing_table_size;
+// int cookie_set;
+// int new_uuid;
+// int secure_data;
+// int retry_remove;
+// int enable_checks;
+// int expected_errno;
+
+// char *uuid;
+// char *mangled_uuid;
+// };
+//
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
new file mode 100644
index 000000000..d68fb66cc
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
@@ -0,0 +1,240 @@
+// +build linux
+
+package devmapper
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "strconv"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/containers/storage/drivers"
+ "github.com/containers/storage/pkg/devicemapper"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/locker"
+ "github.com/containers/storage/pkg/mount"
+ "github.com/containers/storage/pkg/system"
+ units "github.com/docker/go-units"
+)
+
+func init() {
+ graphdriver.Register("devicemapper", Init)
+}
+
+// Driver contains the device set mounted and the home directory
+type Driver struct {
+ *DeviceSet
+ home string
+ uidMaps []idtools.IDMap
+ gidMaps []idtools.IDMap
+ ctr *graphdriver.RefCounter
+ locker *locker.Locker
+}
+
+// Init creates a driver with the given home and the set of options.
+func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
+ deviceSet, err := NewDeviceSet(home, true, options, uidMaps, gidMaps)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mount.MakePrivate(home); err != nil {
+ return nil, err
+ }
+
+ d := &Driver{
+ DeviceSet: deviceSet,
+ home: home,
+ uidMaps: uidMaps,
+ gidMaps: gidMaps,
+ ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
+ locker: locker.New(),
+ }
+
+ return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
+}
+
+func (d *Driver) String() string {
+ return "devicemapper"
+}
+
+// Status returns the status about the driver in a printable format.
+// Information returned contains Pool Name, Data File, Metadata file, disk usage by
+// the data and metadata, etc.
+func (d *Driver) Status() [][2]string {
+ s := d.DeviceSet.Status()
+
+ status := [][2]string{
+ {"Pool Name", s.PoolName},
+ {"Pool Blocksize", units.HumanSize(float64(s.SectorSize))},
+ {"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))},
+ {"Backing Filesystem", s.BaseDeviceFS},
+ {"Data file", s.DataFile},
+ {"Metadata file", s.MetadataFile},
+ {"Data Space Used", units.HumanSize(float64(s.Data.Used))},
+ {"Data Space Total", units.HumanSize(float64(s.Data.Total))},
+ {"Data Space Available", units.HumanSize(float64(s.Data.Available))},
+ {"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))},
+ {"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))},
+ {"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))},
+ {"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))},
+ {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)},
+ {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)},
+ {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)},
+ {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)},
+ }
+ if len(s.DataLoopback) > 0 {
+ status = append(status, [2]string{"Data loop file", s.DataLoopback})
+ }
+ if len(s.MetadataLoopback) > 0 {
+ status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback})
+ }
+ if vStr, err := devicemapper.GetLibraryVersion(); err == nil {
+ status = append(status, [2]string{"Library Version", vStr})
+ }
+ return status
+}
+
+// Metadata returns a map of information about the device.
+func (d *Driver) Metadata(id string) (map[string]string, error) {
+ m, err := d.DeviceSet.exportDeviceMetadata(id)
+
+ if err != nil {
+ return nil, err
+ }
+
+ metadata := make(map[string]string)
+ metadata["DeviceId"] = strconv.Itoa(m.deviceID)
+ metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10)
+ metadata["DeviceName"] = m.deviceName
+ return metadata, nil
+}
+
+// Cleanup unmounts a device.
+func (d *Driver) Cleanup() error {
+ err := d.DeviceSet.Shutdown(d.home)
+
+ if err2 := mount.Unmount(d.home); err == nil {
+ err = err2
+ }
+
+ return err
+}
+
+// CreateReadWrite creates a layer that is writable for use as a container
+// file system.
+func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
+ return d.Create(id, parent, opts)
+}
+
+// Create adds a device with a given id and the parent.
+func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
+ var storageOpt map[string]string
+ if opts != nil {
+ storageOpt = opts.StorageOpt
+ }
+
+ if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Remove removes a device with a given id, unmounts the filesystem.
+func (d *Driver) Remove(id string) error {
+ d.locker.Lock(id)
+ defer d.locker.Unlock(id)
+ if !d.DeviceSet.HasDevice(id) {
+ // Consider removing a non-existing device a no-op
+ // This is useful to be able to progress on container removal
+ // if the underlying device has gone away due to earlier errors
+ return nil
+ }
+
+ // This assumes the device has been properly Get/Put:ed and thus is unmounted
+ if err := d.DeviceSet.DeleteDevice(id, false); err != nil {
+ return fmt.Errorf("failed to remove device %s: %v", id, err)
+ }
+ return system.EnsureRemoveAll(path.Join(d.home, "mnt", id))
+}
+
+// Get mounts a device with given id into the root filesystem
+func (d *Driver) Get(id, mountLabel string) (string, error) {
+ d.locker.Lock(id)
+ defer d.locker.Unlock(id)
+ mp := path.Join(d.home, "mnt", id)
+ rootFs := path.Join(mp, "rootfs")
+ if count := d.ctr.Increment(mp); count > 1 {
+ return rootFs, nil
+ }
+
+ uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
+ if err != nil {
+ d.ctr.Decrement(mp)
+ return "", err
+ }
+
+ // Create the target directories if they don't exist
+ if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) {
+ d.ctr.Decrement(mp)
+ return "", err
+ }
+ if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) {
+ d.ctr.Decrement(mp)
+ return "", err
+ }
+
+ // Mount the device
+ if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil {
+ d.ctr.Decrement(mp)
+ return "", err
+ }
+
+ if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) {
+ d.ctr.Decrement(mp)
+ d.DeviceSet.UnmountDevice(id, mp)
+ return "", err
+ }
+
+ idFile := path.Join(mp, "id")
+ if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) {
+ // Create an "id" file with the container/image id in it to help reconstruct this in case
+ // of later problems
+ if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil {
+ d.ctr.Decrement(mp)
+ d.DeviceSet.UnmountDevice(id, mp)
+ return "", err
+ }
+ }
+
+ return rootFs, nil
+}
+
+// Put unmounts a device and removes it.
+func (d *Driver) Put(id string) error {
+ d.locker.Lock(id)
+ defer d.locker.Unlock(id)
+ mp := path.Join(d.home, "mnt", id)
+ if count := d.ctr.Decrement(mp); count > 0 {
+ return nil
+ }
+ err := d.DeviceSet.UnmountDevice(id, mp)
+ if err != nil {
+ logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err)
+ }
+ return err
+}
+
+// Exists checks to see if the device exists.
+func (d *Driver) Exists(id string) bool {
+ return d.DeviceSet.HasDevice(id)
+}
+
+// AdditionalImageStores returns additional image stores supported by the driver
+func (d *Driver) AdditionalImageStores() []string {
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/mount.go b/vendor/github.com/containers/storage/drivers/devmapper/mount.go
new file mode 100644
index 000000000..1dc3262d2
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/devmapper/mount.go
@@ -0,0 +1,88 @@
+// +build linux
+
+package devmapper
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "golang.org/x/sys/unix"
+)
+
+// FIXME: this is copy-pasted from the aufs driver.
+// It should be moved into the core.
+
+// Mounted returns true if a mount point exists.
+func Mounted(mountpoint string) (bool, error) {
+ var mntpointSt unix.Stat_t
+ if err := unix.Stat(mountpoint, &mntpointSt); err != nil {
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+ }
+ var parentSt unix.Stat_t
+ if err := unix.Stat(filepath.Join(mountpoint, ".."), &parentSt); err != nil {
+ return false, err
+ }
+ return mntpointSt.Dev != parentSt.Dev, nil
+}
+
+type probeData struct {
+ fsName string
+ magic string
+ offset uint64
+}
+
+// ProbeFsType returns the filesystem name for the given device id.
+func ProbeFsType(device string) (string, error) {
+ probes := []probeData{
+ {"btrfs", "_BHRfS_M", 0x10040},
+ {"ext4", "\123\357", 0x438},
+ {"xfs", "XFSB", 0},
+ }
+
+ maxLen := uint64(0)
+ for _, p := range probes {
+ l := p.offset + uint64(len(p.magic))
+ if l > maxLen {
+ maxLen = l
+ }
+ }
+
+ file, err := os.Open(device)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+
+ buffer := make([]byte, maxLen)
+ l, err := file.Read(buffer)
+ if err != nil {
+ return "", err
+ }
+
+ if uint64(l) != maxLen {
+ return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device)
+ }
+
+ for _, p := range probes {
+ if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) {
+ return p.fsName, nil
+ }
+ }
+
+ return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device)
+}
+
+func joinMountOptions(a, b string) string {
+ if a == "" {
+ return b
+ }
+ if b == "" {
+ return a
+ }
+ return a + "," + b
+}
diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go
new file mode 100644
index 000000000..569964784
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/driver.go
@@ -0,0 +1,285 @@
+package graphdriver
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/vbatts/tar-split/tar/storage"
+
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/idtools"
+)
+
+// FsMagic unsigned id of the filesystem in use.
+type FsMagic uint32
+
+const (
+ // FsMagicUnsupported is a predefined constant value other than a valid filesystem id.
+ FsMagicUnsupported = FsMagic(0x00000000)
+)
+
+var (
+ // All registered drivers
+ drivers map[string]InitFunc
+
+ // ErrNotSupported returned when driver is not supported.
+ ErrNotSupported = errors.New("driver not supported")
+ // ErrPrerequisites returned when driver does not meet prerequisites.
+ ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
+ // ErrIncompatibleFS returned when file system is not supported.
+ ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver")
+)
+
+//CreateOpts contains optional arguments for Create() and CreateReadWrite()
+// methods.
+type CreateOpts struct {
+ MountLabel string
+ StorageOpt map[string]string
+}
+
+// InitFunc initializes the storage driver.
+type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error)
+
+// ProtoDriver defines the basic capabilities of a driver.
+// This interface exists solely to be a minimum set of methods
+// for client code which choose not to implement the entire Driver
+// interface and use the NaiveDiffDriver wrapper constructor.
+//
+// Use of ProtoDriver directly by client code is not recommended.
+type ProtoDriver interface {
+ // String returns a string representation of this driver.
+ String() string
+ // CreateReadWrite creates a new, empty filesystem layer that is ready
+ // to be used as the storage for a container. Additional options can
+ // be passed in opts. parent may be "" and opts may be nil.
+ CreateReadWrite(id, parent string, opts *CreateOpts) error
+ // Create creates a new, empty, filesystem layer with the
+ // specified id and parent and options passed in opts. Parent
+ // may be "" and opts may be nil.
+ Create(id, parent string, opts *CreateOpts) error
+ // Remove attempts to remove the filesystem layer with this id.
+ Remove(id string) error
+ // Get returns the mountpoint for the layered filesystem referred
+ // to by this id. You can optionally specify a mountLabel or "".
+ // Returns the absolute path to the mounted layered filesystem.
+ Get(id, mountLabel string) (dir string, err error)
+ // Put releases the system resources for the specified id,
+ // e.g, unmounting layered filesystem.
+ Put(id string) error
+ // Exists returns whether a filesystem layer with the specified
+ // ID exists on this driver.
+ Exists(id string) bool
+ // Status returns a set of key-value pairs which give low
+ // level diagnostic status about this driver.
+ Status() [][2]string
+ // Returns a set of key-value pairs which give low level information
+ // about the image/container driver is managing.
+ Metadata(id string) (map[string]string, error)
+ // Cleanup performs necessary tasks to release resources
+ // held by the driver, e.g., unmounting all layered filesystems
+ // known to this driver.
+ Cleanup() error
+ // AdditionalImageStores returns additional image stores supported by the driver
+ AdditionalImageStores() []string
+}
+
+// DiffDriver is the interface to use to implement graph diffs
+type DiffDriver interface {
+ // Diff produces an archive of the changes between the specified
+ // layer and its parent layer which may be "".
+ Diff(id, parent string) (io.ReadCloser, error)
+ // Changes produces a list of changes between the specified layer
+ // and its parent layer. If parent is "", then all changes will be ADD changes.
+ Changes(id, parent string) ([]archive.Change, error)
+ // ApplyDiff extracts the changeset from the given diff into the
+ // layer with the specified id and parent, returning the size of the
+ // new layer in bytes.
+ // The io.Reader must be an uncompressed stream.
+ ApplyDiff(id, parent string, diff io.Reader) (size int64, err error)
+ // DiffSize calculates the changes between the specified id
+ // and its parent and returns the size in bytes of the changes
+ // relative to its base filesystem directory.
+ DiffSize(id, parent string) (size int64, err error)
+}
+
+// Driver is the interface for layered/snapshot file system drivers.
+type Driver interface {
+ ProtoDriver
+ DiffDriver
+}
+
+// Capabilities defines a list of capabilities a driver may implement.
+// These capabilities are not required; however, they do determine how a
+// graphdriver can be used.
+type Capabilities struct {
+ // Flags that this driver is capable of reproducing exactly equivalent
+ // diffs for read-only layers. If set, clients can rely on the driver
+ // for consistent tar streams, and avoid extra processing to account
+ // for potential differences (eg: the layer store's use of tar-split).
+ ReproducesExactDiffs bool
+}
+
+// CapabilityDriver is the interface for layered file system drivers that
+// can report on their Capabilities.
+type CapabilityDriver interface {
+ Capabilities() Capabilities
+}
+
+// DiffGetterDriver is the interface for layered file system drivers that
+// provide a specialized function for getting file contents for tar-split.
+type DiffGetterDriver interface {
+ Driver
+ // DiffGetter returns an interface to efficiently retrieve the contents
+ // of files in a layer.
+ DiffGetter(id string) (FileGetCloser, error)
+}
+
+// FileGetCloser extends the storage.FileGetter interface with a Close method
+// for cleaning up.
+type FileGetCloser interface {
+ storage.FileGetter
+ // Close cleans up any resources associated with the FileGetCloser.
+ Close() error
+}
+
+// Checker makes checks on specified filesystems.
+type Checker interface {
+ // IsMounted returns true if the provided path is mounted for the specific checker
+ IsMounted(path string) bool
+}
+
+func init() {
+ drivers = make(map[string]InitFunc)
+}
+
+// Register registers an InitFunc for the driver.
+func Register(name string, initFunc InitFunc) error {
+ if _, exists := drivers[name]; exists {
+ return fmt.Errorf("Name already registered %s", name)
+ }
+ drivers[name] = initFunc
+
+ return nil
+}
+
+// GetDriver initializes and returns the registered driver
+func GetDriver(name string, config Options) (Driver, error) {
+ if initFunc, exists := drivers[name]; exists {
+ return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps)
+ }
+
+ logrus.Errorf("Failed to GetDriver graph %s %s", name, config.Root)
+ return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, config.Root)
+}
+
+// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins
+func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) {
+ if initFunc, exists := drivers[name]; exists {
+ return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps)
+ }
+ logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home)
+ return nil, errors.Wrapf(ErrNotSupported, "failed to built-in GetDriver graph %s %s", name, home)
+}
+
+// Options is used to initialize a graphdriver
+type Options struct {
+ Root string
+ DriverOptions []string
+ UIDMaps []idtools.IDMap
+ GIDMaps []idtools.IDMap
+ ExperimentalEnabled bool
+}
+
+// New creates the driver and initializes it at the specified root.
+func New(name string, config Options) (Driver, error) {
+ if name != "" {
+ logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver
+ return GetDriver(name, config)
+ }
+
+ // Guess for prior driver
+ driversMap := scanPriorDrivers(config.Root)
+ for _, name := range priority {
+ if name == "vfs" {
+ // don't use vfs even if there is state present.
+ continue
+ }
+ if _, prior := driversMap[name]; prior {
+ // of the state found from prior drivers, check in order of our priority
+ // which we would prefer
+ driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps)
+ if err != nil {
+ // unlike below, we will return error here, because there is prior
+ // state, and now it is no longer supported/prereq/compatible, so
+ // something changed and needs attention. Otherwise the daemon's
+ // images would just "disappear".
+ logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err)
+ return nil, err
+ }
+
+ // abort starting when there are other prior configured drivers
+ // to ensure the user explicitly selects the driver to load
+ if len(driversMap)-1 > 0 {
+ var driversSlice []string
+ for name := range driversMap {
+ driversSlice = append(driversSlice, name)
+ }
+
+ return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s <DRIVER>)", config.Root, strings.Join(driversSlice, ", "))
+ }
+
+ logrus.Infof("[graphdriver] using prior storage driver: %s", name)
+ return driver, nil
+ }
+ }
+
+ // Check for priority drivers first
+ for _, name := range priority {
+ driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps)
+ if err != nil {
+ if isDriverNotSupported(err) {
+ continue
+ }
+ return nil, err
+ }
+ return driver, nil
+ }
+
+ // Check all registered drivers if no priority driver is found
+ for name, initFunc := range drivers {
+ driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps)
+ if err != nil {
+ if isDriverNotSupported(err) {
+ continue
+ }
+ return nil, err
+ }
+ return driver, nil
+ }
+ return nil, fmt.Errorf("No supported storage backend found")
+}
+
+// isDriverNotSupported returns true if the error initializing
+// the graph driver is a non-supported error.
+func isDriverNotSupported(err error) bool {
+ cause := errors.Cause(err)
+ return cause == ErrNotSupported || cause == ErrPrerequisites || cause == ErrIncompatibleFS
+}
+
+// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers
+func scanPriorDrivers(root string) map[string]bool {
+ driversMap := make(map[string]bool)
+
+ for driver := range drivers {
+ p := filepath.Join(root, driver)
+ if _, err := os.Stat(p); err == nil && driver != "vfs" {
+ driversMap[driver] = true
+ }
+ }
+ return driversMap
+}
diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go
new file mode 100644
index 000000000..53394b738
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go
@@ -0,0 +1,23 @@
+package graphdriver
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+var (
+ // Slice of drivers that should be used in an order
+ priority = []string{
+ "zfs",
+ }
+)
+
+// Mounted checks if the given path is mounted as the fs type
+func Mounted(fsType FsMagic, mountPath string) (bool, error) {
+ var buf unix.Statfs_t
+ if err := syscall.Statfs(mountPath, &buf); err != nil {
+ return false, err
+ }
+ return FsMagic(buf.Type) == fsType, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go
new file mode 100644
index 000000000..94f7270ea
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/driver_linux.go
@@ -0,0 +1,135 @@
+// +build linux
+
+package graphdriver
+
+import (
+ "path/filepath"
+
+ "github.com/containers/storage/pkg/mount"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // FsMagicAufs filesystem id for Aufs
+ FsMagicAufs = FsMagic(0x61756673)
+ // FsMagicBtrfs filesystem id for Btrfs
+ FsMagicBtrfs = FsMagic(0x9123683E)
+ // FsMagicCramfs filesystem id for Cramfs
+ FsMagicCramfs = FsMagic(0x28cd3d45)
+ // FsMagicEcryptfs filesystem id for eCryptfs
+ FsMagicEcryptfs = FsMagic(0xf15f)
+ // FsMagicExtfs filesystem id for Extfs
+ FsMagicExtfs = FsMagic(0x0000EF53)
+ // FsMagicF2fs filesystem id for F2fs
+ FsMagicF2fs = FsMagic(0xF2F52010)
+ // FsMagicGPFS filesystem id for GPFS
+ FsMagicGPFS = FsMagic(0x47504653)
+ // FsMagicJffs2Fs filesystem if for Jffs2Fs
+ FsMagicJffs2Fs = FsMagic(0x000072b6)
+ // FsMagicJfs filesystem id for Jfs
+ FsMagicJfs = FsMagic(0x3153464a)
+ // FsMagicNfsFs filesystem id for NfsFs
+ FsMagicNfsFs = FsMagic(0x00006969)
+ // FsMagicRAMFs filesystem id for RamFs
+ FsMagicRAMFs = FsMagic(0x858458f6)
+ // FsMagicReiserFs filesystem id for ReiserFs
+ FsMagicReiserFs = FsMagic(0x52654973)
+ // FsMagicSmbFs filesystem id for SmbFs
+ FsMagicSmbFs = FsMagic(0x0000517B)
+ // FsMagicSquashFs filesystem id for SquashFs
+ FsMagicSquashFs = FsMagic(0x73717368)
+ // FsMagicTmpFs filesystem id for TmpFs
+ FsMagicTmpFs = FsMagic(0x01021994)
+ // FsMagicVxFS filesystem id for VxFs
+ FsMagicVxFS = FsMagic(0xa501fcf5)
+ // FsMagicXfs filesystem id for Xfs
+ FsMagicXfs = FsMagic(0x58465342)
+ // FsMagicZfs filesystem id for Zfs
+ FsMagicZfs = FsMagic(0x2fc12fc1)
+ // FsMagicOverlay filesystem id for overlay
+ FsMagicOverlay = FsMagic(0x794C7630)
+)
+
+var (
+ // Slice of drivers that should be used in an order
+ priority = []string{
+ "overlay",
+ "devicemapper",
+ "aufs",
+ "btrfs",
+ "zfs",
+ "vfs",
+ }
+
+ // FsNames maps filesystem id to name of the filesystem.
+ FsNames = map[FsMagic]string{
+ FsMagicAufs: "aufs",
+ FsMagicBtrfs: "btrfs",
+ FsMagicCramfs: "cramfs",
+ FsMagicEcryptfs: "ecryptfs",
+ FsMagicExtfs: "extfs",
+ FsMagicF2fs: "f2fs",
+ FsMagicGPFS: "gpfs",
+ FsMagicJffs2Fs: "jffs2",
+ FsMagicJfs: "jfs",
+ FsMagicNfsFs: "nfs",
+ FsMagicOverlay: "overlayfs",
+ FsMagicRAMFs: "ramfs",
+ FsMagicReiserFs: "reiserfs",
+ FsMagicSmbFs: "smb",
+ FsMagicSquashFs: "squashfs",
+ FsMagicTmpFs: "tmpfs",
+ FsMagicUnsupported: "unsupported",
+ FsMagicVxFS: "vxfs",
+ FsMagicXfs: "xfs",
+ FsMagicZfs: "zfs",
+ }
+)
+
+// GetFSMagic returns the filesystem id given the path.
+func GetFSMagic(rootpath string) (FsMagic, error) {
+ var buf unix.Statfs_t
+ if err := unix.Statfs(filepath.Dir(rootpath), &buf); err != nil {
+ return 0, err
+ }
+ return FsMagic(buf.Type), nil
+}
+
+// NewFsChecker returns a checker configured for the provided FsMagic
+func NewFsChecker(t FsMagic) Checker {
+ return &fsChecker{
+ t: t,
+ }
+}
+
+type fsChecker struct {
+ t FsMagic
+}
+
+func (c *fsChecker) IsMounted(path string) bool {
+ m, _ := Mounted(c.t, path)
+ return m
+}
+
+// NewDefaultChecker returns a check that parses /proc/mountinfo to check
+// if the specified path is mounted.
+func NewDefaultChecker() Checker {
+ return &defaultChecker{}
+}
+
+type defaultChecker struct {
+}
+
+func (c *defaultChecker) IsMounted(path string) bool {
+ m, _ := mount.Mounted(path)
+ return m
+}
+
+// Mounted checks if the given path is mounted as the fs type
+func Mounted(fsType FsMagic, mountPath string) (bool, error) {
+ var buf unix.Statfs_t
+ if err := unix.Statfs(mountPath, &buf); err != nil {
+ return false, err
+ }
+ return FsMagic(buf.Type) == fsType, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/driver_solaris.go b/vendor/github.com/containers/storage/drivers/driver_solaris.go
new file mode 100644
index 000000000..174fa9670
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/driver_solaris.go
@@ -0,0 +1,96 @@
+// +build solaris,cgo
+
+package graphdriver
+
+/*
+#include <sys/statvfs.h>
+#include <stdlib.h>
+
+static inline struct statvfs *getstatfs(char *s) {
+ struct statvfs *buf;
+ int err;
+ buf = (struct statvfs *)malloc(sizeof(struct statvfs));
+ err = statvfs(s, buf);
+ return buf;
+}
+*/
+import "C"
+import (
+ "path/filepath"
+ "unsafe"
+
+ "github.com/containers/storage/pkg/mount"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // FsMagicZfs filesystem id for Zfs
+ FsMagicZfs = FsMagic(0x2fc12fc1)
+)
+
+var (
+ // Slice of drivers that should be used in an order
+ priority = []string{
+ "zfs",
+ }
+
+ // FsNames maps filesystem id to name of the filesystem.
+ FsNames = map[FsMagic]string{
+ FsMagicZfs: "zfs",
+ }
+)
+
+// GetFSMagic returns the filesystem id given the path.
+func GetFSMagic(rootpath string) (FsMagic, error) {
+ return 0, nil
+}
+
+type fsChecker struct {
+ t FsMagic
+}
+
+func (c *fsChecker) IsMounted(path string) bool {
+ m, _ := Mounted(c.t, path)
+ return m
+}
+
+// NewFsChecker returns a checker configured for the provided FsMagic
+func NewFsChecker(t FsMagic) Checker {
+ return &fsChecker{
+ t: t,
+ }
+}
+
+// NewDefaultChecker returns a check that parses /proc/mountinfo to check
+// if the specified path is mounted.
+// No-op on Solaris.
+func NewDefaultChecker() Checker {
+ return &defaultChecker{}
+}
+
+type defaultChecker struct {
+}
+
+func (c *defaultChecker) IsMounted(path string) bool {
+ m, _ := mount.Mounted(path)
+ return m
+}
+
+// Mounted checks if the given path is mounted as the fs type
+//Solaris supports only ZFS for now
+func Mounted(fsType FsMagic, mountPath string) (bool, error) {
+
+ cs := C.CString(filepath.Dir(mountPath))
+ defer C.free(unsafe.Pointer(cs))
+ buf := C.getstatfs(cs)
+ defer C.free(unsafe.Pointer(buf))
+
+ // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
+ if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
+ (buf.f_basetype[3] != 0) {
+ logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath)
+ return false, ErrPrerequisites
+ }
+
+ return true, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/github.com/containers/storage/drivers/driver_unsupported.go
new file mode 100644
index 000000000..4a875608b
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/driver_unsupported.go
@@ -0,0 +1,15 @@
+// +build !linux,!windows,!freebsd,!solaris
+
+package graphdriver
+
+var (
+ // Slice of drivers that should be used in an order
+ priority = []string{
+ "unsupported",
+ }
+)
+
+// GetFSMagic returns the filesystem id given the path.
+func GetFSMagic(rootpath string) (FsMagic, error) {
+ return FsMagicUnsupported, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/driver_windows.go b/vendor/github.com/containers/storage/drivers/driver_windows.go
new file mode 100644
index 000000000..ffd30c295
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/driver_windows.go
@@ -0,0 +1,14 @@
+package graphdriver
+
+var (
+ // Slice of drivers that should be used in order
+ priority = []string{
+ "windowsfilter",
+ }
+)
+
+// GetFSMagic returns the filesystem id given the path.
+func GetFSMagic(rootpath string) (FsMagic, error) {
+ // Note it is OK to return FsMagicUnsupported on Windows.
+ return FsMagicUnsupported, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go
new file mode 100644
index 000000000..48a1f078f
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/fsdiff.go
@@ -0,0 +1,169 @@
+package graphdriver
+
+import (
+ "io"
+ "time"
+
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/chrootarchive"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ // ApplyUncompressedLayer defines the unpack method used by the graph
+ // driver.
+ ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer
+)
+
+// NaiveDiffDriver takes a ProtoDriver and adds the
+// capability of the Diffing methods which it may or may not
+// support on its own. See the comment on the exported
+// NewNaiveDiffDriver function below.
+// Notably, the AUFS driver doesn't need to be wrapped like this.
+type NaiveDiffDriver struct {
+ ProtoDriver
+ uidMaps []idtools.IDMap
+ gidMaps []idtools.IDMap
+}
+
+// NewNaiveDiffDriver returns a fully functional driver that wraps the
+// given ProtoDriver and adds the capability of the following methods which
+// it may or may not support on its own:
+// Diff(id, parent string) (io.ReadCloser, error)
+// Changes(id, parent string) ([]archive.Change, error)
+// ApplyDiff(id, parent string, diff io.Reader) (size int64, err error)
+// DiffSize(id, parent string) (size int64, err error)
+func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver {
+ return &NaiveDiffDriver{ProtoDriver: driver,
+ uidMaps: uidMaps,
+ gidMaps: gidMaps}
+}
+
+// Diff produces an archive of the changes between the specified
+// layer and its parent layer which may be "".
+func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) {
+ startTime := time.Now()
+ driver := gdw.ProtoDriver
+
+ layerFs, err := driver.Get(id, "")
+ if err != nil {
+ return nil, err
+ }
+
+ defer func() {
+ if err != nil {
+ driver.Put(id)
+ }
+ }()
+
+ if parent == "" {
+ archive, err := archive.Tar(layerFs, archive.Uncompressed)
+ if err != nil {
+ return nil, err
+ }
+ return ioutils.NewReadCloserWrapper(archive, func() error {
+ err := archive.Close()
+ driver.Put(id)
+ return err
+ }), nil
+ }
+
+ parentFs, err := driver.Get(parent, "")
+ if err != nil {
+ return nil, err
+ }
+ defer driver.Put(parent)
+
+ changes, err := archive.ChangesDirs(layerFs, parentFs)
+ if err != nil {
+ return nil, err
+ }
+
+ archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps)
+ if err != nil {
+ return nil, err
+ }
+
+ return ioutils.NewReadCloserWrapper(archive, func() error {
+ err := archive.Close()
+ driver.Put(id)
+
+ // NaiveDiffDriver compares file metadata with parent layers. Parent layers
+ // are extracted from tar's with full second precision on modified time.
+ // We need this hack here to make sure calls within same second receive
+ // correct result.
+ time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now()))
+ return err
+ }), nil
+}
+
+// Changes produces a list of changes between the specified layer
+// and its parent layer. If parent is "", then all changes will be ADD changes.
+func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) {
+ driver := gdw.ProtoDriver
+
+ layerFs, err := driver.Get(id, "")
+ if err != nil {
+ return nil, err
+ }
+ defer driver.Put(id)
+
+ parentFs := ""
+
+ if parent != "" {
+ parentFs, err = driver.Get(parent, "")
+ if err != nil {
+ return nil, err
+ }
+ defer driver.Put(parent)
+ }
+
+ return archive.ChangesDirs(layerFs, parentFs)
+}
+
+// ApplyDiff extracts the changeset from the given diff into the
+// layer with the specified id and parent, returning the size of the
+// new layer in bytes.
+func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) {
+ driver := gdw.ProtoDriver
+
+ // Mount the root filesystem so we can apply the diff/layer.
+ layerFs, err := driver.Get(id, "")
+ if err != nil {
+ return
+ }
+ defer driver.Put(id)
+
+ options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
+ GIDMaps: gdw.gidMaps}
+ start := time.Now().UTC()
+ logrus.Debug("Start untar layer")
+ if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil {
+ return
+ }
+ logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
+
+ return
+}
+
+// DiffSize calculates the changes between the specified layer
+// and its parent and returns the size in bytes of the changes
+// relative to its base filesystem directory.
+func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) {
+ driver := gdw.ProtoDriver
+
+ changes, err := gdw.Changes(id, parent)
+ if err != nil {
+ return
+ }
+
+ layerFs, err := driver.Get(id, "")
+ if err != nil {
+ return
+ }
+ defer driver.Put(id)
+
+ return archive.ChangesSize(layerFs, changes), nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go
new file mode 100644
index 000000000..2a096edf6
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/overlay/check.go
@@ -0,0 +1,102 @@
+// +build linux
+
+package overlay
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "syscall"
+
+ "github.com/containers/storage/pkg/system"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+// doesSupportNativeDiff checks whether the filesystem has a bug
+// which copies up the opaque flag when copying up an opaque
+// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR.
+// When these exist naive diff should be used.
+func doesSupportNativeDiff(d string) error {
+ td, err := ioutil.TempDir(d, "opaque-bug-check")
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := os.RemoveAll(td); err != nil {
+ logrus.Warnf("Failed to remove check directory %v: %v", td, err)
+ }
+ }()
+
+ // Make directories l1/d, l1/d1, l2/d, l3, work, merged
+ if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil {
+ return err
+ }
+ if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil {
+ return err
+ }
+ if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil {
+ return err
+ }
+ if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil {
+ return err
+ }
+ if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil {
+ return err
+ }
+ if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil {
+ return err
+ }
+
+ // Mark l2/d as opaque
+ if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), "trusted.overlay.opaque", []byte("y"), 0); err != nil {
+ return errors.Wrap(err, "failed to set opaque flag on middle layer")
+ }
+
+ opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work"))
+ if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil {
+ return errors.Wrap(err, "failed to mount overlay")
+ }
+ defer func() {
+ if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
+ logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err)
+ }
+ }()
+
+ // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3"
+ if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil {
+ return errors.Wrap(err, "failed to write to merged directory")
+ }
+
+ // Check l3/d does not have opaque flag
+ xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), "trusted.overlay.opaque")
+ if err != nil {
+ return errors.Wrap(err, "failed to read opaque flag on upper layer")
+ }
+ if string(xattrOpaque) == "y" {
+ return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix")
+ }
+
+ // rename "d1" to "d2"
+ if err := os.Rename(filepath.Join(td, "merged", "d1"), filepath.Join(td, "merged", "d2")); err != nil {
+ // if rename failed with syscall.EXDEV, the kernel doesn't have CONFIG_OVERLAY_FS_REDIRECT_DIR enabled
+ if err.(*os.LinkError).Err == syscall.EXDEV {
+ return nil
+ }
+ return errors.Wrap(err, "failed to rename dir in merged directory")
+ }
+ // get the xattr of "d2"
+ xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), "trusted.overlay.redirect")
+ if err != nil {
+ return errors.Wrap(err, "failed to read redirect flag on upper layer")
+ }
+
+ if string(xattrRedirect) == "d1" {
+ return errors.New("kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled")
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go
new file mode 100644
index 000000000..feb039592
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go
@@ -0,0 +1,89 @@
+// +build linux
+
+package overlay
+
+import (
+ "bytes"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "os"
+ "runtime"
+
+ "github.com/containers/storage/pkg/reexec"
+ "golang.org/x/sys/unix"
+)
+
+func init() {
+ reexec.Register("storage-mountfrom", mountFromMain)
+}
+
+func fatal(err error) {
+ fmt.Fprint(os.Stderr, err)
+ os.Exit(1)
+}
+
+type mountOptions struct {
+ Device string
+ Target string
+ Type string
+ Label string
+ Flag uint32
+}
+
+func mountFrom(dir, device, target, mType string, flags uintptr, label string) error {
+ options := &mountOptions{
+ Device: device,
+ Target: target,
+ Type: mType,
+ Flag: uint32(flags),
+ Label: label,
+ }
+
+ cmd := reexec.Command("storage-mountfrom", dir)
+ w, err := cmd.StdinPipe()
+ if err != nil {
+ return fmt.Errorf("mountfrom error on pipe creation: %v", err)
+ }
+
+ output := bytes.NewBuffer(nil)
+ cmd.Stdout = output
+ cmd.Stderr = output
+ if err := cmd.Start(); err != nil {
+ w.Close()
+ return fmt.Errorf("mountfrom error on re-exec cmd: %v", err)
+ }
+ //write the options to the pipe for the untar exec to read
+ if err := json.NewEncoder(w).Encode(options); err != nil {
+ w.Close()
+ return fmt.Errorf("mountfrom json encode to pipe failed: %v", err)
+ }
+ w.Close()
+
+ if err := cmd.Wait(); err != nil {
+ return fmt.Errorf("mountfrom re-exec error: %v: output: %v", err, output)
+ }
+ return nil
+}
+
+// mountfromMain is the entry-point for storage-mountfrom on re-exec.
+func mountFromMain() {
+ runtime.LockOSThread()
+ flag.Parse()
+
+ var options *mountOptions
+
+ if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil {
+ fatal(err)
+ }
+
+ if err := os.Chdir(flag.Arg(0)); err != nil {
+ fatal(err)
+ }
+
+ if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil {
+ fatal(err)
+ }
+
+ os.Exit(0)
+}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
new file mode 100644
index 000000000..ae601f431
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -0,0 +1,775 @@
+// +build linux
+
+package overlay
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/containers/storage/drivers"
+ "github.com/containers/storage/drivers/overlayutils"
+ "github.com/containers/storage/drivers/quota"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/chrootarchive"
+ "github.com/containers/storage/pkg/directory"
+ "github.com/containers/storage/pkg/fsutils"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/locker"
+ "github.com/containers/storage/pkg/mount"
+ "github.com/containers/storage/pkg/parsers"
+ "github.com/containers/storage/pkg/parsers/kernel"
+ "github.com/containers/storage/pkg/system"
+ units "github.com/docker/go-units"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+var (
+ // untar defines the untar method
+ untar = chrootarchive.UntarUncompressed
+)
+
+// This backend uses the overlay union filesystem for containers
+// with diff directories for each layer.
+
+// This version of the overlay driver requires at least kernel
+// 4.0.0 in order to support mounting multiple diff directories.
+
+// Each container/image has at least a "diff" directory and "link" file.
+// If there is also a "lower" file when there are diff layers
+// below as well as "merged" and "work" directories. The "diff" directory
+// has the upper layer of the overlay and is used to capture any
+// changes to the layer. The "lower" file contains all the lower layer
+// mounts separated by ":" and ordered from uppermost to lowermost
+// layers. The overlay itself is mounted in the "merged" directory,
+// and the "work" dir is needed for overlay to work.
+
+// The "link" file for each layer contains a unique string for the layer.
+// Under the "l" directory at the root there will be a symbolic link
+// with that unique string pointing the "diff" directory for the layer.
+// The symbolic links are used to reference lower layers in the "lower"
+// file and on mount. The links are used to shorten the total length
+// of a layer reference without requiring changes to the layer identifier
+// or root directory. Mounts are always done relative to root and
+// referencing the symbolic links in order to ensure the number of
+// lower directories can fit in a single page for making the mount
+// syscall. A hard upper limit of 128 lower layers is enforced to ensure
+// that mounts do not fail due to length.
+
+const (
+ linkDir = "l"
+ lowerFile = "lower"
+ maxDepth = 128
+
+ // idLength represents the number of random characters
+ // which can be used to create the unique link identifer
+ // for every layer. If this value is too long then the
+ // page size limit for the mount command may be exceeded.
+ // The idLength should be selected such that following equation
+ // is true (512 is a buffer for label metadata).
+ // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512)
+ idLength = 26
+)
+
+type overlayOptions struct {
+ overrideKernelCheck bool
+ imageStores []string
+ quota quota.Quota
+}
+
+// Driver contains information about the home directory and the list of active mounts that are created using this driver.
+type Driver struct {
+ name string
+ home string
+ uidMaps []idtools.IDMap
+ gidMaps []idtools.IDMap
+ ctr *graphdriver.RefCounter
+ quotaCtl *quota.Control
+ options overlayOptions
+ naiveDiff graphdriver.DiffDriver
+ supportsDType bool
+ locker *locker.Locker
+}
+
+var (
+ backingFs = "<unknown>"
+ projectQuotaSupported = false
+
+ useNaiveDiffLock sync.Once
+ useNaiveDiffOnly bool
+)
+
+func init() {
+ graphdriver.Register("overlay", Init)
+ graphdriver.Register("overlay2", Init)
+}
+
+// Init returns the a native diff driver for overlay filesystem.
+// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error.
+// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned.
+func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
+ opts, err := parseOptions(options)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := supportsOverlay(); err != nil {
+ return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support overlay fs")
+ }
+
+ // require kernel 4.0.0 to ensure multiple lower dirs are supported
+ v, err := kernel.GetKernelVersion()
+ if err != nil {
+ return nil, err
+ }
+ if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 {
+ if !opts.overrideKernelCheck {
+ return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay")
+ }
+ logrus.Warn("Using pre-4.0.0 kernel for overlay, mount failures may require kernel update")
+ }
+
+ fsMagic, err := graphdriver.GetFSMagic(home)
+ if err != nil {
+ return nil, err
+ }
+ if fsName, ok := graphdriver.FsNames[fsMagic]; ok {
+ backingFs = fsName
+ }
+
+ // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs
+ switch fsMagic {
+ case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
+ logrus.Errorf("'overlay' is not supported over %s", backingFs)
+ return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s", backingFs)
+ case graphdriver.FsMagicBtrfs:
+ // Support for OverlayFS on BTRFS was added in kernel 4.7
+ // See https://btrfs.wiki.kernel.org/index.php/Changelog
+ if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 7, Minor: 0}) < 0 {
+ if !opts.overrideKernelCheck {
+ logrus.Errorf("'overlay' requires kernel 4.7 to use on %s", backingFs)
+ return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' requires kernel 4.7 to use on %s", backingFs)
+ }
+ logrus.Warn("Using pre-4.7.0 kernel for overlay on btrfs, may require kernel update")
+ }
+ }
+
+ rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
+ if err != nil {
+ return nil, err
+ }
+ // Create the driver home dir
+ if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
+ return nil, err
+ }
+
+ if err := mount.MakePrivate(home); err != nil {
+ return nil, err
+ }
+
+ supportsDType, err := fsutils.SupportsDType(home)
+ if err != nil {
+ return nil, err
+ }
+ if !supportsDType {
+ logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs))
+ // TODO: Will make fatal when CRI-O Has AMI built on RHEL7.4
+ // return nil, overlayutils.ErrDTypeNotSupported("overlay", backingFs)
+ }
+
+ d := &Driver{
+ name: "overlay",
+ home: home,
+ uidMaps: uidMaps,
+ gidMaps: gidMaps,
+ ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
+ supportsDType: supportsDType,
+ locker: locker.New(),
+ options: *opts,
+ }
+
+ d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps)
+
+ if backingFs == "xfs" {
+ // Try to enable project quota support over xfs.
+ if d.quotaCtl, err = quota.NewControl(home); err == nil {
+ projectQuotaSupported = true
+ } else if opts.quota.Size > 0 {
+ return nil, fmt.Errorf("Storage option overlay.size not supported. Filesystem does not support Project Quota: %v", err)
+ }
+ } else if opts.quota.Size > 0 {
+ // if xfs is not the backing fs then error out if the storage-opt overlay.size is used.
+ return nil, fmt.Errorf("Storage Option overlay.size only supported for backingFS XFS. Found %v", backingFs)
+ }
+
+ logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported)
+
+ return d, nil
+}
+
+func parseOptions(options []string) (*overlayOptions, error) {
+ o := &overlayOptions{}
+ for _, option := range options {
+ key, val, err := parsers.ParseKeyValueOpt(option)
+ if err != nil {
+ return nil, err
+ }
+ key = strings.ToLower(key)
+ switch key {
+ case "overlay.override_kernel_check", "overlay2.override_kernel_check":
+ logrus.Debugf("overlay: override_kernelcheck=%s", val)
+ o.overrideKernelCheck, err = strconv.ParseBool(val)
+ if err != nil {
+ return nil, err
+ }
+ case "overlay.size", "overlay2.size":
+ logrus.Debugf("overlay: size=%s", val)
+ size, err := units.RAMInBytes(val)
+ if err != nil {
+ return nil, err
+ }
+ o.quota.Size = uint64(size)
+ case "overlay.imagestore", "overlay2.imagestore":
+ logrus.Debugf("overlay: imagestore=%s", val)
+ // Additional read only image stores to use for lower paths
+ for _, store := range strings.Split(val, ",") {
+ store = filepath.Clean(store)
+ if !filepath.IsAbs(store) {
+ return nil, fmt.Errorf("overlay: image path %q is not absolute. Can not be relative", store)
+ }
+ st, err := os.Stat(store)
+ if err != nil {
+ return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %v", store, err)
+ }
+ if !st.IsDir() {
+ return nil, fmt.Errorf("overlay: image path %q must be a directory", store)
+ }
+ o.imageStores = append(o.imageStores, store)
+ }
+ default:
+ return nil, fmt.Errorf("overlay: Unknown option %s", key)
+ }
+ }
+ return o, nil
+}
+
+func supportsOverlay() error {
+ // We can try to modprobe overlay first before looking at
+ // proc/filesystems for when overlay is supported
+ exec.Command("modprobe", "overlay").Run()
+
+ f, err := os.Open("/proc/filesystems")
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ if s.Text() == "nodev\toverlay" {
+ return nil
+ }
+ }
+ logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
+ return errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
+}
+
+func useNaiveDiff(home string) bool {
+ useNaiveDiffLock.Do(func() {
+ if err := doesSupportNativeDiff(home); err != nil {
+ logrus.Warnf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err)
+ useNaiveDiffOnly = true
+ }
+ })
+ return useNaiveDiffOnly
+}
+
+func (d *Driver) String() string {
+ return d.name
+}
+
+// Status returns current driver information in a two dimensional string array.
+// Output contains "Backing Filesystem" used in this implementation.
+func (d *Driver) Status() [][2]string {
+ return [][2]string{
+ {"Backing Filesystem", backingFs},
+ {"Supports d_type", strconv.FormatBool(d.supportsDType)},
+ {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))},
+ }
+}
+
+// Metadata returns meta data about the overlay driver such as
+// LowerDir, UpperDir, WorkDir and MergeDir used to store data.
+func (d *Driver) Metadata(id string) (map[string]string, error) {
+ dir := d.dir(id)
+ if _, err := os.Stat(dir); err != nil {
+ return nil, err
+ }
+
+ metadata := map[string]string{
+ "WorkDir": path.Join(dir, "work"),
+ "MergedDir": path.Join(dir, "merged"),
+ "UpperDir": path.Join(dir, "diff"),
+ }
+
+ lowerDirs, err := d.getLowerDirs(id)
+ if err != nil {
+ return nil, err
+ }
+ if len(lowerDirs) > 0 {
+ metadata["LowerDir"] = strings.Join(lowerDirs, ":")
+ }
+
+ return metadata, nil
+}
+
+// Cleanup any state created by overlay which should be cleaned when daemon
+// is being shutdown. For now, we just have to unmount the bind mounted
+// we had created.
+func (d *Driver) Cleanup() error {
+ return mount.Unmount(d.home)
+}
+
+// CreateReadWrite creates a layer that is writable for use as a container
+// file system.
+func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
+ if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported {
+ return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option")
+ }
+
+ if opts == nil {
+ opts = &graphdriver.CreateOpts{
+ StorageOpt: map[string]string{},
+ }
+ }
+
+ if _, ok := opts.StorageOpt["size"]; !ok {
+ if opts.StorageOpt == nil {
+ opts.StorageOpt = map[string]string{}
+ }
+ opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10)
+ }
+
+ return d.create(id, parent, opts)
+}
+
+// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id.
+// The parent filesystem is used to configure these directories for the overlay.
+func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) {
+ if opts != nil && len(opts.StorageOpt) != 0 {
+ if _, ok := opts.StorageOpt["size"]; ok {
+ return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers")
+ }
+ }
+ return d.create(id, parent, opts)
+}
+
+func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) {
+ dir := d.dir(id)
+
+ rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
+ if err != nil {
+ return err
+ }
+ if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil {
+ return err
+ }
+ if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil {
+ return err
+ }
+
+ defer func() {
+ // Clean up on failure
+ if retErr != nil {
+ os.RemoveAll(dir)
+ }
+ }()
+
+ if opts != nil && len(opts.StorageOpt) > 0 {
+ driver := &Driver{}
+ if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil {
+ return err
+ }
+
+ if driver.options.quota.Size > 0 {
+ // Set container disk quota limit
+ if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil {
+ return err
+ }
+ }
+ }
+
+ if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil {
+ return err
+ }
+
+ lid := generateID(idLength)
+ if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil {
+ return err
+ }
+
+ // Write link id to link file
+ if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil {
+ return err
+ }
+
+ // if no parent directory, done
+ if parent == "" {
+ return nil
+ }
+
+ if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil {
+ return err
+ }
+ if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil {
+ return err
+ }
+
+ lower, err := d.getLower(parent)
+ if err != nil {
+ return err
+ }
+ if lower != "" {
+ if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Parse overlay storage options
+func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error {
+ // Read size to set the disk project quota per container
+ for key, val := range storageOpt {
+ key := strings.ToLower(key)
+ switch key {
+ case "size":
+ size, err := units.RAMInBytes(val)
+ if err != nil {
+ return err
+ }
+ driver.options.quota.Size = uint64(size)
+ default:
+ return fmt.Errorf("Unknown option %s", key)
+ }
+ }
+
+ return nil
+}
+
+func (d *Driver) getLower(parent string) (string, error) {
+ parentDir := d.dir(parent)
+
+ // Ensure parent exists
+ if _, err := os.Lstat(parentDir); err != nil {
+ return "", err
+ }
+
+ // Read Parent link fileA
+ parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link"))
+ if err != nil {
+ return "", err
+ }
+ lowers := []string{path.Join(linkDir, string(parentLink))}
+
+ parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile))
+ if err == nil {
+ parentLowers := strings.Split(string(parentLower), ":")
+ lowers = append(lowers, parentLowers...)
+ }
+ if len(lowers) > maxDepth {
+ return "", errors.New("max depth exceeded")
+ }
+ return strings.Join(lowers, ":"), nil
+}
+
+func (d *Driver) dir(id string) string {
+ newpath := path.Join(d.home, id)
+ if _, err := os.Stat(newpath); err != nil {
+ for _, p := range d.AdditionalImageStores() {
+ l := path.Join(p, d.name, id)
+ _, err = os.Stat(l)
+ if err == nil {
+ return l
+ }
+ }
+ }
+ return newpath
+}
+
+func (d *Driver) getLowerDirs(id string) ([]string, error) {
+ var lowersArray []string
+ lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile))
+ if err == nil {
+ for _, s := range strings.Split(string(lowers), ":") {
+ lower := d.dir(s)
+ lp, err := os.Readlink(lower)
+ if err != nil {
+ return nil, err
+ }
+ lowersArray = append(lowersArray, path.Clean(d.dir(path.Join("link", lp))))
+ }
+ } else if !os.IsNotExist(err) {
+ return nil, err
+ }
+ return lowersArray, nil
+}
+
+// Remove cleans the directories that are created for this id.
+func (d *Driver) Remove(id string) error {
+ d.locker.Lock(id)
+ defer d.locker.Unlock(id)
+ dir := d.dir(id)
+ lid, err := ioutil.ReadFile(path.Join(dir, "link"))
+ if err == nil {
+ if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil {
+ logrus.Debugf("Failed to remove link: %v", err)
+ }
+ }
+
+ if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ return nil
+}
+
+// Get creates and mounts the required file system for the given id and returns the mount path.
+func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) {
+ d.locker.Lock(id)
+ defer d.locker.Unlock(id)
+ dir := d.dir(id)
+ if _, err := os.Stat(dir); err != nil {
+ return "", err
+ }
+
+ diffDir := path.Join(dir, "diff")
+ lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile))
+ if err != nil {
+ // If no lower, just return diff directory
+ if os.IsNotExist(err) {
+ return diffDir, nil
+ }
+ return "", err
+ }
+
+ newlowers := ""
+ for _, l := range strings.Split(string(lowers), ":") {
+ lower := ""
+ newpath := path.Join(d.home, l)
+ if _, err := os.Stat(newpath); err != nil {
+ for _, p := range d.AdditionalImageStores() {
+ lower = path.Join(p, d.name, l)
+ if _, err2 := os.Stat(lower); err2 == nil {
+ break
+ }
+ lower = ""
+ }
+ if lower == "" {
+ return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err)
+ }
+ } else {
+ lower = newpath
+ }
+ if newlowers == "" {
+ newlowers = lower
+ } else {
+ newlowers = newlowers + ":" + lower
+ }
+ }
+
+ mergedDir := path.Join(dir, "merged")
+ if count := d.ctr.Increment(mergedDir); count > 1 {
+ return mergedDir, nil
+ }
+ defer func() {
+ if retErr != nil {
+ if c := d.ctr.Decrement(mergedDir); c <= 0 {
+ if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil {
+ logrus.Errorf("error unmounting %v: %v", mergedDir, mntErr)
+ }
+ }
+ }
+ }()
+
+ workDir := path.Join(dir, "work")
+ opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", newlowers, diffDir, workDir)
+ mountData := label.FormatMountLabel(opts, mountLabel)
+ mount := unix.Mount
+ mountTarget := mergedDir
+
+ pageSize := unix.Getpagesize()
+
+ // Use relative paths and mountFrom when the mount data has exceeded
+ // the page size. The mount syscall fails if the mount data cannot
+ // fit within a page and relative links make the mount data much
+ // smaller at the expense of requiring a fork exec to chroot.
+ if len(mountData) > pageSize {
+ //FIXME: We need to figure out to get this to work with additional stores
+ opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work"))
+ mountData = label.FormatMountLabel(opts, mountLabel)
+ if len(mountData) > pageSize {
+ return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData))
+ }
+
+ mount = func(source string, target string, mType string, flags uintptr, label string) error {
+ return mountFrom(d.home, source, target, mType, flags, label)
+ }
+ mountTarget = path.Join(id, "merged")
+ }
+ if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil {
+ return "", fmt.Errorf("error creating overlay mount to %s: %v", mountTarget, err)
+ }
+
+ // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
+ // user namespace requires this to move a directory from lower to upper.
+ rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
+ if err != nil {
+ return "", err
+ }
+
+ if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
+ return "", err
+ }
+
+ return mergedDir, nil
+}
+
+// Put unmounts the mount path created for the give id.
+func (d *Driver) Put(id string) error {
+ d.locker.Lock(id)
+ defer d.locker.Unlock(id)
+ dir := d.dir(id)
+ if _, err := os.Stat(dir); err != nil {
+ return err
+ }
+ mountpoint := path.Join(d.dir(id), "merged")
+ if count := d.ctr.Decrement(mountpoint); count > 0 {
+ return nil
+ }
+ if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil {
+ // If no lower, we used the diff directory, so no work to do
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+ if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
+ logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
+ }
+ return nil
+}
+
+// Exists checks to see if the id is already mounted.
+func (d *Driver) Exists(id string) bool {
+ _, err := os.Stat(d.dir(id))
+ return err == nil
+}
+
+// isParent returns if the passed in parent is the direct parent of the passed in layer
+func (d *Driver) isParent(id, parent string) bool {
+ lowers, err := d.getLowerDirs(id)
+ if err != nil {
+ return false
+ }
+ if parent == "" && len(lowers) > 0 {
+ return false
+ }
+
+ parentDir := d.dir(parent)
+ var ld string
+ if len(lowers) > 0 {
+ ld = filepath.Dir(lowers[0])
+ }
+ if ld == "" && parent == "" {
+ return true
+ }
+ return ld == parentDir
+}
+
+// ApplyDiff applies the new layer into a root
+func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) {
+ if !d.isParent(id, parent) {
+ return d.naiveDiff.ApplyDiff(id, parent, diff)
+ }
+
+ applyDir := d.getDiffPath(id)
+
+ logrus.Debugf("Applying tar in %s", applyDir)
+ // Overlay doesn't need the parent id to apply the diff
+ if err := untar(diff, applyDir, &archive.TarOptions{
+ UIDMaps: d.uidMaps,
+ GIDMaps: d.gidMaps,
+ WhiteoutFormat: archive.OverlayWhiteoutFormat,
+ }); err != nil {
+ return 0, err
+ }
+
+ return directory.Size(applyDir)
+}
+
+func (d *Driver) getDiffPath(id string) string {
+ dir := d.dir(id)
+
+ return path.Join(dir, "diff")
+}
+
+// DiffSize calculates the changes between the specified id
+// and its parent and returns the size in bytes of the changes
+// relative to its base filesystem directory.
+func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
+ if useNaiveDiff(d.home) || !d.isParent(id, parent) {
+ return d.naiveDiff.DiffSize(id, parent)
+ }
+ return directory.Size(d.getDiffPath(id))
+}
+
+// Diff produces an archive of the changes between the specified
+// layer and its parent layer which may be "".
+func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
+ if useNaiveDiff(d.home) || !d.isParent(id, parent) {
+ return d.naiveDiff.Diff(id, parent)
+ }
+
+ diffPath := d.getDiffPath(id)
+ logrus.Debugf("Tar with options on %s", diffPath)
+ return archive.TarWithOptions(diffPath, &archive.TarOptions{
+ Compression: archive.Uncompressed,
+ UIDMaps: d.uidMaps,
+ GIDMaps: d.gidMaps,
+ WhiteoutFormat: archive.OverlayWhiteoutFormat,
+ })
+}
+
+// Changes produces a list of changes between the specified layer
+// and its parent layer. If parent is "", then all changes will be ADD changes.
+func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
+ if useNaiveDiff(d.home) || !d.isParent(id, parent) {
+ return d.naiveDiff.Changes(id, parent)
+ }
+ // Overlay doesn't have snapshots, so we need to get changes from all parent
+ // layers.
+ diffPath := d.getDiffPath(id)
+ layers, err := d.getLowerDirs(id)
+ if err != nil {
+ return nil, err
+ }
+
+ return archive.OverlayChanges(layers, diffPath)
+}
+
+// AdditionalImageStores returns additional image stores supported by the driver
+func (d *Driver) AdditionalImageStores() []string {
+ return d.options.imageStores
+}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go
new file mode 100644
index 000000000..3dbb4de44
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go
@@ -0,0 +1,3 @@
+// +build !linux
+
+package overlay
diff --git a/vendor/github.com/containers/storage/drivers/overlay/randomid.go b/vendor/github.com/containers/storage/drivers/overlay/randomid.go
new file mode 100644
index 000000000..fc565ef0b
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/overlay/randomid.go
@@ -0,0 +1,81 @@
+// +build linux
+
+package overlay
+
+import (
+ "crypto/rand"
+ "encoding/base32"
+ "fmt"
+ "io"
+ "os"
+ "syscall"
+ "time"
+
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+// generateID creates a new random string identifier with the given length
+func generateID(l int) string {
+ const (
+ // ensures we backoff for less than 450ms total. Use the following to
+ // select new value, in units of 10ms:
+ // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2
+ maxretries = 9
+ backoff = time.Millisecond * 10
+ )
+
+ var (
+ totalBackoff time.Duration
+ count int
+ retries int
+ size = (l*5 + 7) / 8
+ u = make([]byte, size)
+ )
+ // TODO: Include time component, counter component, random component
+
+ for {
+ // This should never block but the read may fail. Because of this,
+ // we just try to read the random number generator until we get
+ // something. This is a very rare condition but may happen.
+ b := time.Duration(retries) * backoff
+ time.Sleep(b)
+ totalBackoff += b
+
+ n, err := io.ReadFull(rand.Reader, u[count:])
+ if err != nil {
+ if retryOnError(err) && retries < maxretries {
+ count += n
+ retries++
+ logrus.Errorf("error generating version 4 uuid, retrying: %v", err)
+ continue
+ }
+
+ // Any other errors represent a system problem. What did someone
+ // do to /dev/urandom?
+ panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err))
+ }
+
+ break
+ }
+
+ s := base32.StdEncoding.EncodeToString(u)
+
+ return s[:l]
+}
+
+// retryOnError tries to detect whether or not retrying would be fruitful.
+func retryOnError(err error) bool {
+ switch err := err.(type) {
+ case *os.PathError:
+ return retryOnError(err.Err) // unpack the target error
+ case syscall.Errno:
+ if err == unix.EPERM {
+ // EPERM represents an entropy pool exhaustion, a condition under
+ // which we backoff and retry.
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go
new file mode 100644
index 000000000..467733647
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go
@@ -0,0 +1,18 @@
+// +build linux
+
+package overlayutils
+
+import (
+ "errors"
+ "fmt"
+)
+
+// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type.
+func ErrDTypeNotSupported(driver, backingFs string) error {
+ msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs)
+ if backingFs == "xfs" {
+ msg += " Reformat the filesystem with ftype=1 to enable d_type support."
+ }
+ msg += " Running without d_type is not supported."
+ return errors.New(msg)
+}
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
new file mode 100644
index 000000000..93e744371
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
@@ -0,0 +1,337 @@
+// +build linux
+
+//
+// projectquota.go - implements XFS project quota controls
+// for setting quota limits on a newly created directory.
+// It currently supports the legacy XFS specific ioctls.
+//
+// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR
+// for both xfs/ext4 for kernel version >= v4.5
+//
+
+package quota
+
+/*
+#include <stdlib.h>
+#include <dirent.h>
+#include <linux/fs.h>
+#include <linux/quota.h>
+#include <linux/dqblk_xfs.h>
+
+#ifndef FS_XFLAG_PROJINHERIT
+struct fsxattr {
+ __u32 fsx_xflags;
+ __u32 fsx_extsize;
+ __u32 fsx_nextents;
+ __u32 fsx_projid;
+ unsigned char fsx_pad[12];
+};
+#define FS_XFLAG_PROJINHERIT 0x00000200
+#endif
+#ifndef FS_IOC_FSGETXATTR
+#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr)
+#endif
+#ifndef FS_IOC_FSSETXATTR
+#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr)
+#endif
+
+#ifndef PRJQUOTA
+#define PRJQUOTA 2
+#endif
+#ifndef XFS_PROJ_QUOTA
+#define XFS_PROJ_QUOTA 2
+#endif
+#ifndef Q_XSETPQLIM
+#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA)
+#endif
+#ifndef Q_XGETPQUOTA
+#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA)
+#endif
+*/
+import "C"
+import (
+ "fmt"
+ "io/ioutil"
+ "path"
+ "path/filepath"
+ "unsafe"
+
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+// Quota limit params - currently we only control blocks hard limit
+type Quota struct {
+ Size uint64
+}
+
+// Control - Context to be used by storage driver (e.g. overlay)
+// who wants to apply project quotas to container dirs
+type Control struct {
+ backingFsBlockDev string
+ nextProjectID uint32
+ quotas map[string]uint32
+}
+
+// NewControl - initialize project quota support.
+// Test to make sure that quota can be set on a test dir and find
+// the first project id to be used for the next container create.
+//
+// Returns nil (and error) if project quota is not supported.
+//
+// First get the project id of the home directory.
+// This test will fail if the backing fs is not xfs.
+//
+// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.:
+// echo 999:/var/lib/containers/storage/overlay >> /etc/projects
+// echo storage:999 >> /etc/projid
+// xfs_quota -x -c 'project -s storage' /<xfs mount point>
+//
+// In that case, the home directory project id will be used as a "start offset"
+// and all containers will be assigned larger project ids (e.g. >= 1000).
+// This is a way to prevent xfs_quota management from conflicting with containers/storage.
+//
+// Then try to create a test directory with the next project id and set a quota
+// on it. If that works, continue to scan existing containers to map allocated
+// project ids.
+//
+func NewControl(basePath string) (*Control, error) {
+ //
+ // Get project id of parent dir as minimal id to be used by driver
+ //
+ minProjectID, err := getProjectID(basePath)
+ if err != nil {
+ return nil, err
+ }
+ minProjectID++
+
+ //
+ // create backing filesystem device node
+ //
+ backingFsBlockDev, err := makeBackingFsDev(basePath)
+ if err != nil {
+ return nil, err
+ }
+
+ //
+ // Test if filesystem supports project quotas by trying to set
+ // a quota on the first available project id
+ //
+ quota := Quota{
+ Size: 0,
+ }
+ if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil {
+ return nil, err
+ }
+
+ q := Control{
+ backingFsBlockDev: backingFsBlockDev,
+ nextProjectID: minProjectID + 1,
+ quotas: make(map[string]uint32),
+ }
+
+ //
+ // get first project id to be used for next container
+ //
+ err = q.findNextProjectID(basePath)
+ if err != nil {
+ return nil, err
+ }
+
+ logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID)
+ return &q, nil
+}
+
+// SetQuota - assign a unique project id to directory and set the quota limits
+// for that project id
+func (q *Control) SetQuota(targetPath string, quota Quota) error {
+
+ projectID, ok := q.quotas[targetPath]
+ if !ok {
+ projectID = q.nextProjectID
+
+ //
+ // assign project id to new container directory
+ //
+ err := setProjectID(targetPath, projectID)
+ if err != nil {
+ return err
+ }
+
+ q.quotas[targetPath] = projectID
+ q.nextProjectID++
+ }
+
+ //
+ // set the quota limit for the container's project id
+ //
+ logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID)
+ return setProjectQuota(q.backingFsBlockDev, projectID, quota)
+}
+
+// setProjectQuota - set the quota for project id on xfs block device
+func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error {
+ var d C.fs_disk_quota_t
+ d.d_version = C.FS_DQUOT_VERSION
+ d.d_id = C.__u32(projectID)
+ d.d_flags = C.XFS_PROJ_QUOTA
+
+ d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT
+ d.d_blk_hardlimit = C.__u64(quota.Size / 512)
+ d.d_blk_softlimit = d.d_blk_hardlimit
+
+ var cs = C.CString(backingFsBlockDev)
+ defer C.free(unsafe.Pointer(cs))
+
+ _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
+ uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
+ uintptr(unsafe.Pointer(&d)), 0, 0)
+ if errno != 0 {
+ return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v",
+ projectID, backingFsBlockDev, errno.Error())
+ }
+
+ return nil
+}
+
+// GetQuota - get the quota limits of a directory that was configured with SetQuota
+func (q *Control) GetQuota(targetPath string, quota *Quota) error {
+
+ projectID, ok := q.quotas[targetPath]
+ if !ok {
+ return fmt.Errorf("quota not found for path : %s", targetPath)
+ }
+
+ //
+ // get the quota limit for the container's project id
+ //
+ var d C.fs_disk_quota_t
+
+ var cs = C.CString(q.backingFsBlockDev)
+ defer C.free(unsafe.Pointer(cs))
+
+ _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA,
+ uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)),
+ uintptr(unsafe.Pointer(&d)), 0, 0)
+ if errno != 0 {
+ return fmt.Errorf("Failed to get quota limit for projid %d on %s: %v",
+ projectID, q.backingFsBlockDev, errno.Error())
+ }
+ quota.Size = uint64(d.d_blk_hardlimit) * 512
+
+ return nil
+}
+
+// getProjectID - get the project id of path on xfs
+func getProjectID(targetPath string) (uint32, error) {
+ dir, err := openDir(targetPath)
+ if err != nil {
+ return 0, err
+ }
+ defer closeDir(dir)
+
+ var fsx C.struct_fsxattr
+ _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
+ uintptr(unsafe.Pointer(&fsx)))
+ if errno != 0 {
+ return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error())
+ }
+
+ return uint32(fsx.fsx_projid), nil
+}
+
+// setProjectID - set the project id of path on xfs
+func setProjectID(targetPath string, projectID uint32) error {
+ dir, err := openDir(targetPath)
+ if err != nil {
+ return err
+ }
+ defer closeDir(dir)
+
+ var fsx C.struct_fsxattr
+ _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
+ uintptr(unsafe.Pointer(&fsx)))
+ if errno != 0 {
+ return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error())
+ }
+ fsx.fsx_projid = C.__u32(projectID)
+ fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT
+ _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
+ uintptr(unsafe.Pointer(&fsx)))
+ if errno != 0 {
+ return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error())
+ }
+
+ return nil
+}
+
+// findNextProjectID - find the next project id to be used for containers
+// by scanning driver home directory to find used project ids
+func (q *Control) findNextProjectID(home string) error {
+ files, err := ioutil.ReadDir(home)
+ if err != nil {
+ return fmt.Errorf("read directory failed : %s", home)
+ }
+ for _, file := range files {
+ if !file.IsDir() {
+ continue
+ }
+ path := filepath.Join(home, file.Name())
+ projid, err := getProjectID(path)
+ if err != nil {
+ return err
+ }
+ if projid > 0 {
+ q.quotas[path] = projid
+ }
+ if q.nextProjectID <= projid {
+ q.nextProjectID = projid + 1
+ }
+ }
+
+ return nil
+}
+
+func free(p *C.char) {
+ C.free(unsafe.Pointer(p))
+}
+
+func openDir(path string) (*C.DIR, error) {
+ Cpath := C.CString(path)
+ defer free(Cpath)
+
+ dir := C.opendir(Cpath)
+ if dir == nil {
+ return nil, fmt.Errorf("Can't open dir")
+ }
+ return dir, nil
+}
+
+func closeDir(dir *C.DIR) {
+ if dir != nil {
+ C.closedir(dir)
+ }
+}
+
+func getDirFd(dir *C.DIR) uintptr {
+ return uintptr(C.dirfd(dir))
+}
+
+// Get the backing block device of the driver home directory
+// and create a block device node under the home directory
+// to be used by quotactl commands
+func makeBackingFsDev(home string) (string, error) {
+ var stat unix.Stat_t
+ if err := unix.Stat(home, &stat); err != nil {
+ return "", err
+ }
+
+ backingFsBlockDev := path.Join(home, "backingFsBlockDev")
+ // Re-create just in case someone copied the home directory over to a new device
+ unix.Unlink(backingFsBlockDev)
+ if err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev)); err != nil {
+ return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err)
+ }
+
+ return backingFsBlockDev, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/register/register_aufs.go b/vendor/github.com/containers/storage/drivers/register/register_aufs.go
new file mode 100644
index 000000000..7743dcedb
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/register/register_aufs.go
@@ -0,0 +1,8 @@
+// +build !exclude_graphdriver_aufs,linux
+
+package register
+
+import (
+ // register the aufs graphdriver
+ _ "github.com/containers/storage/drivers/aufs"
+)
diff --git a/vendor/github.com/containers/storage/drivers/register/register_btrfs.go b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go
new file mode 100644
index 000000000..40ff1cdd0
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go
@@ -0,0 +1,8 @@
+// +build !exclude_graphdriver_btrfs,linux
+
+package register
+
+import (
+ // register the btrfs graphdriver
+ _ "github.com/containers/storage/drivers/btrfs"
+)
diff --git a/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go
new file mode 100644
index 000000000..08ac984b0
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go
@@ -0,0 +1,8 @@
+// +build !exclude_graphdriver_devicemapper,linux
+
+package register
+
+import (
+ // register the devmapper graphdriver
+ _ "github.com/containers/storage/drivers/devmapper"
+)
diff --git a/vendor/github.com/containers/storage/drivers/register/register_overlay.go b/vendor/github.com/containers/storage/drivers/register/register_overlay.go
new file mode 100644
index 000000000..2d61219bb
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/register/register_overlay.go
@@ -0,0 +1,8 @@
+// +build !exclude_graphdriver_overlay,linux
+
+package register
+
+import (
+ // register the overlay graphdriver
+ _ "github.com/containers/storage/drivers/overlay"
+)
diff --git a/vendor/github.com/containers/storage/drivers/register/register_vfs.go b/vendor/github.com/containers/storage/drivers/register/register_vfs.go
new file mode 100644
index 000000000..691ce8592
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/register/register_vfs.go
@@ -0,0 +1,6 @@
+package register
+
+import (
+ // register vfs
+ _ "github.com/containers/storage/drivers/vfs"
+)
diff --git a/vendor/github.com/containers/storage/drivers/register/register_windows.go b/vendor/github.com/containers/storage/drivers/register/register_windows.go
new file mode 100644
index 000000000..048b27097
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/register/register_windows.go
@@ -0,0 +1,6 @@
+package register
+
+import (
+ // register the windows graph driver
+ _ "github.com/containers/storage/drivers/windows"
+)
diff --git a/vendor/github.com/containers/storage/drivers/register/register_zfs.go b/vendor/github.com/containers/storage/drivers/register/register_zfs.go
new file mode 100644
index 000000000..c748468e5
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/register/register_zfs.go
@@ -0,0 +1,8 @@
+// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris
+
+package register
+
+import (
+ // register the zfs driver
+ _ "github.com/containers/storage/drivers/zfs"
+)
diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go
new file mode 100644
index 000000000..cf8eca914
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go
@@ -0,0 +1,155 @@
+package vfs
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/storage/drivers"
+ "github.com/containers/storage/pkg/chrootarchive"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/system"
+ "github.com/opencontainers/selinux/go-selinux/label"
+)
+
+var (
+ // CopyWithTar defines the copy method to use.
+ CopyWithTar = chrootarchive.NewArchiver(nil).CopyWithTar
+)
+
+func init() {
+ graphdriver.Register("vfs", Init)
+}
+
+// Init returns a new VFS driver.
+// This sets the home directory for the driver and returns NaiveDiffDriver.
+func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
+ d := &Driver{
+ homes: []string{home},
+ idMappings: idtools.NewIDMappingsFromMaps(uidMaps, gidMaps),
+ }
+ rootIDs := d.idMappings.RootPair()
+ if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil {
+ return nil, err
+ }
+ for _, option := range options {
+ if strings.HasPrefix(option, "vfs.imagestore=") {
+ d.homes = append(d.homes, strings.Split(option[15:], ",")...)
+ }
+ }
+ return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
+}
+
+// Driver holds information about the driver, home directory of the driver.
+// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations.
+// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support.
+// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver
+type Driver struct {
+ homes []string
+ idMappings *idtools.IDMappings
+}
+
+func (d *Driver) String() string {
+ return "vfs"
+}
+
+// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information.
+func (d *Driver) Status() [][2]string {
+ return nil
+}
+
+// Metadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data.
+func (d *Driver) Metadata(id string) (map[string]string, error) {
+ return nil, nil
+}
+
+// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver.
+func (d *Driver) Cleanup() error {
+ return nil
+}
+
+// CreateReadWrite creates a layer that is writable for use as a container
+// file system.
+func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
+ return d.Create(id, parent, opts)
+}
+
+// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent.
+func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
+ if opts != nil && len(opts.StorageOpt) != 0 {
+ return fmt.Errorf("--storage-opt is not supported for vfs")
+ }
+
+ dir := d.dir(id)
+ rootIDs := d.idMappings.RootPair()
+ if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil {
+ return err
+ }
+ if err := idtools.MkdirAndChown(dir, 0755, rootIDs); err != nil {
+ return err
+ }
+ labelOpts := []string{"level:s0"}
+ if _, mountLabel, err := label.InitLabels(labelOpts); err == nil {
+ label.SetFileLabel(dir, mountLabel)
+ }
+ if parent == "" {
+ return nil
+ }
+ parentDir, err := d.Get(parent, "")
+ if err != nil {
+ return fmt.Errorf("%s: %s", parent, err)
+ }
+ return CopyWithTar(parentDir, dir)
+}
+
+func (d *Driver) dir(id string) string {
+ for i, home := range d.homes {
+ if i > 0 {
+ home = filepath.Join(home, d.String())
+ }
+ candidate := filepath.Join(home, "dir", filepath.Base(id))
+ fi, err := os.Stat(candidate)
+ if err == nil && fi.IsDir() {
+ return candidate
+ }
+ }
+ return filepath.Join(d.homes[0], "dir", filepath.Base(id))
+}
+
+// Remove deletes the content from the directory for a given id.
+func (d *Driver) Remove(id string) error {
+ return system.EnsureRemoveAll(d.dir(id))
+}
+
+// Get returns the directory for the given id.
+func (d *Driver) Get(id, mountLabel string) (string, error) {
+ dir := d.dir(id)
+ if st, err := os.Stat(dir); err != nil {
+ return "", err
+ } else if !st.IsDir() {
+ return "", fmt.Errorf("%s: not a directory", dir)
+ }
+ return dir, nil
+}
+
+// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up.
+func (d *Driver) Put(id string) error {
+ // The vfs driver has no runtime resources (e.g. mounts)
+ // to clean up, so we don't need anything here
+ return nil
+}
+
+// Exists checks to see if the directory exists for the given id.
+func (d *Driver) Exists(id string) bool {
+ _, err := os.Stat(d.dir(id))
+ return err == nil
+}
+
+// AdditionalImageStores returns additional image stores supported by the driver
+func (d *Driver) AdditionalImageStores() []string {
+ if len(d.homes) > 1 {
+ return d.homes[1:]
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go
new file mode 100644
index 000000000..abe2ac432
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/windows/windows.go
@@ -0,0 +1,965 @@
+//+build windows
+
+package windows
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "github.com/Microsoft/go-winio"
+ "github.com/Microsoft/go-winio/archive/tar"
+ "github.com/Microsoft/go-winio/backuptar"
+ "github.com/Microsoft/hcsshim"
+ "github.com/containers/storage/drivers"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/longpath"
+ "github.com/containers/storage/pkg/reexec"
+ "github.com/containers/storage/pkg/system"
+ units "github.com/docker/go-units"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/windows"
+)
+
+// filterDriver is an HCSShim driver type for the Windows Filter driver.
+const filterDriver = 1
+
+var (
+ // mutatedFiles is a list of files that are mutated by the import process
+ // and must be backed up and restored.
+ mutatedFiles = map[string]string{
+ "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak",
+ "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak",
+ "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak",
+ "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak",
+ }
+ noreexec = false
+)
+
+// init registers the windows graph drivers to the register.
+func init() {
+ graphdriver.Register("windowsfilter", InitFilter)
+ // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes
+ // debugging issues in the re-exec codepath significantly easier.
+ if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" {
+ logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.")
+ noreexec = true
+ } else {
+ reexec.Register("docker-windows-write-layer", writeLayerReexec)
+ }
+}
+
+type checker struct {
+}
+
+func (c *checker) IsMounted(path string) bool {
+ return false
+}
+
+// Driver represents a windows graph driver.
+type Driver struct {
+ // info stores the shim driver information
+ info hcsshim.DriverInfo
+ ctr *graphdriver.RefCounter
+ // it is safe for windows to use a cache here because it does not support
+ // restoring containers when the daemon dies.
+ cacheMu sync.Mutex
+ cache map[string]string
+}
+
+// InitFilter returns a new Windows storage filter driver.
+func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
+ logrus.Debugf("WindowsGraphDriver InitFilter at %s", home)
+
+ fsType, err := getFileSystemType(string(home[0]))
+ if err != nil {
+ return nil, err
+ }
+ if strings.ToLower(fsType) == "refs" {
+ return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home)
+ }
+
+ if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil {
+ return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err)
+ }
+
+ d := &Driver{
+ info: hcsshim.DriverInfo{
+ HomeDir: home,
+ Flavour: filterDriver,
+ },
+ cache: make(map[string]string),
+ ctr: graphdriver.NewRefCounter(&checker{}),
+ }
+ return d, nil
+}
+
+// win32FromHresult is a helper function to get the win32 error code from an HRESULT
+func win32FromHresult(hr uintptr) uintptr {
+ if hr&0x1fff0000 == 0x00070000 {
+ return hr & 0xffff
+ }
+ return hr
+}
+
+// getFileSystemType obtains the type of a file system through GetVolumeInformation
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx
+func getFileSystemType(drive string) (fsType string, hr error) {
+ var (
+ modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+ procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW")
+ buf = make([]uint16, 255)
+ size = windows.MAX_PATH + 1
+ )
+ if len(drive) != 1 {
+ hr = errors.New("getFileSystemType must be called with a drive letter")
+ return
+ }
+ drive += `:\`
+ n := uintptr(unsafe.Pointer(nil))
+ r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0)
+ if int32(r0) < 0 {
+ hr = syscall.Errno(win32FromHresult(r0))
+ }
+ fsType = windows.UTF16ToString(buf)
+ return
+}
+
+// String returns the string representation of a driver. This should match
+// the name the graph driver has been registered with.
+func (d *Driver) String() string {
+ return "windowsfilter"
+}
+
+// Status returns the status of the driver.
+func (d *Driver) Status() [][2]string {
+ return [][2]string{
+ {"Windows", ""},
+ }
+}
+
+// panicIfUsedByLcow does exactly what it says.
+// TODO @jhowardmsft - this is a temporary measure for the bring-up of
+// Linux containers on Windows. It is a failsafe to ensure that the right
+// graphdriver is used.
+func panicIfUsedByLcow() {
+ if system.LCOWSupported() {
+ panic("inconsistency - windowsfilter graphdriver should not be used when in LCOW mode")
+ }
+}
+
+// Exists returns true if the given id is registered with this driver.
+func (d *Driver) Exists(id string) bool {
+ panicIfUsedByLcow()
+ rID, err := d.resolveID(id)
+ if err != nil {
+ return false
+ }
+ result, err := hcsshim.LayerExists(d.info, rID)
+ if err != nil {
+ return false
+ }
+ return result
+}
+
+// CreateReadWrite creates a layer that is writable for use as a container
+// file system.
+func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
+ panicIfUsedByLcow()
+ if opts != nil {
+ return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt)
+ }
+ return d.create(id, parent, "", false, nil)
+}
+
+// Create creates a new read-only layer with the given id.
+func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
+ panicIfUsedByLcow()
+ if opts != nil {
+ return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt)
+ }
+ return d.create(id, parent, "", true, nil)
+}
+
+func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error {
+ rPId, err := d.resolveID(parent)
+ if err != nil {
+ return err
+ }
+
+ parentChain, err := d.getLayerChain(rPId)
+ if err != nil {
+ return err
+ }
+
+ var layerChain []string
+
+ if rPId != "" {
+ parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId)
+ if err != nil {
+ return err
+ }
+ if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil {
+ // This is a legitimate parent layer (not the empty "-init" layer),
+ // so include it in the layer chain.
+ layerChain = []string{parentPath}
+ }
+ }
+
+ layerChain = append(layerChain, parentChain...)
+
+ if readOnly {
+ if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil {
+ return err
+ }
+ } else {
+ var parentPath string
+ if len(layerChain) != 0 {
+ parentPath = layerChain[0]
+ }
+
+ if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil {
+ return err
+ }
+
+ storageOptions, err := parseStorageOpt(storageOpt)
+ if err != nil {
+ return fmt.Errorf("Failed to parse storage options - %s", err)
+ }
+
+ if storageOptions.size != 0 {
+ if err := hcsshim.ExpandSandboxSize(d.info, id, storageOptions.size); err != nil {
+ return err
+ }
+ }
+ }
+
+ if _, err := os.Lstat(d.dir(parent)); err != nil {
+ if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil {
+ logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2)
+ }
+ return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err)
+ }
+
+ if err := d.setLayerChain(id, layerChain); err != nil {
+ if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil {
+ logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2)
+ }
+ return err
+ }
+
+ return nil
+}
+
+// dir returns the absolute path to the layer.
+func (d *Driver) dir(id string) string {
+ return filepath.Join(d.info.HomeDir, filepath.Base(id))
+}
+
+// Remove unmounts and removes the dir information.
+func (d *Driver) Remove(id string) error {
+ panicIfUsedByLcow()
+ rID, err := d.resolveID(id)
+ if err != nil {
+ return err
+ }
+
+ // This retry loop is due to a bug in Windows (Internal bug #9432268)
+ // if GetContainers fails with ErrVmcomputeOperationInvalidState
+ // it is a transient error. Retry until it succeeds.
+ var computeSystems []hcsshim.ContainerProperties
+ retryCount := 0
+ osv := system.GetOSVersion()
+ for {
+ // Get and terminate any template VMs that are currently using the layer.
+ // Note: It is unfortunate that we end up in the graphdrivers Remove() call
+ // for both containers and images, but the logic for template VMs is only
+ // needed for images - specifically we are looking to see if a base layer
+ // is in use by a template VM as a result of having started a Hyper-V
+ // container at some point.
+ //
+ // We have a retry loop for ErrVmcomputeOperationInvalidState and
+ // ErrVmcomputeOperationAccessIsDenied as there is a race condition
+ // in RS1 and RS2 building during enumeration when a silo is going away
+ // for example under it, in HCS. AccessIsDenied added to fix 30278.
+ //
+ // TODO @jhowardmsft - For RS3, we can remove the retries. Also consider
+ // using platform APIs (if available) to get this more succinctly. Also
+ // consider enhancing the Remove() interface to have context of why
+ // the remove is being called - that could improve efficiency by not
+ // enumerating compute systems during a remove of a container as it's
+ // not required.
+ computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{})
+ if err != nil {
+ if (osv.Build < 15139) &&
+ ((err == hcsshim.ErrVmcomputeOperationInvalidState) || (err == hcsshim.ErrVmcomputeOperationAccessIsDenied)) {
+ if retryCount >= 500 {
+ break
+ }
+ retryCount++
+ time.Sleep(10 * time.Millisecond)
+ continue
+ }
+ return err
+ }
+ break
+ }
+
+ for _, computeSystem := range computeSystems {
+ if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate {
+ container, err := hcsshim.OpenContainer(computeSystem.ID)
+ if err != nil {
+ return err
+ }
+ defer container.Close()
+ err = container.Terminate()
+ if hcsshim.IsPending(err) {
+ err = container.Wait()
+ } else if hcsshim.IsAlreadyStopped(err) {
+ err = nil
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ layerPath := filepath.Join(d.info.HomeDir, rID)
+ tmpID := fmt.Sprintf("%s-removing", rID)
+ tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID)
+ if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil {
+ logrus.Errorf("Failed to DestroyLayer %s: %s", id, err)
+ }
+
+ return nil
+}
+
+// Get returns the rootfs path for the id. This will mount the dir at its given path.
+func (d *Driver) Get(id, mountLabel string) (string, error) {
+ panicIfUsedByLcow()
+ logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel)
+ var dir string
+
+ rID, err := d.resolveID(id)
+ if err != nil {
+ return "", err
+ }
+ if count := d.ctr.Increment(rID); count > 1 {
+ return d.cache[rID], nil
+ }
+
+ // Getting the layer paths must be done outside of the lock.
+ layerChain, err := d.getLayerChain(rID)
+ if err != nil {
+ d.ctr.Decrement(rID)
+ return "", err
+ }
+
+ if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
+ d.ctr.Decrement(rID)
+ return "", err
+ }
+ if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
+ d.ctr.Decrement(rID)
+ if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
+ logrus.Warnf("Failed to Deactivate %s: %s", id, err)
+ }
+ return "", err
+ }
+
+ mountPath, err := hcsshim.GetLayerMountPath(d.info, rID)
+ if err != nil {
+ d.ctr.Decrement(rID)
+ if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
+ logrus.Warnf("Failed to Unprepare %s: %s", id, err)
+ }
+ if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
+ logrus.Warnf("Failed to Deactivate %s: %s", id, err)
+ }
+ return "", err
+ }
+ d.cacheMu.Lock()
+ d.cache[rID] = mountPath
+ d.cacheMu.Unlock()
+
+ // If the layer has a mount path, use that. Otherwise, use the
+ // folder path.
+ if mountPath != "" {
+ dir = mountPath
+ } else {
+ dir = d.dir(id)
+ }
+
+ return dir, nil
+}
+
+// Put adds a new layer to the driver.
+func (d *Driver) Put(id string) error {
+ panicIfUsedByLcow()
+ logrus.Debugf("WindowsGraphDriver Put() id %s", id)
+
+ rID, err := d.resolveID(id)
+ if err != nil {
+ return err
+ }
+ if count := d.ctr.Decrement(rID); count > 0 {
+ return nil
+ }
+ d.cacheMu.Lock()
+ _, exists := d.cache[rID]
+ delete(d.cache, rID)
+ d.cacheMu.Unlock()
+
+ // If the cache was not populated, then the layer was left unprepared and deactivated
+ if !exists {
+ return nil
+ }
+
+ if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
+ return err
+ }
+ return hcsshim.DeactivateLayer(d.info, rID)
+}
+
+// Cleanup ensures the information the driver stores is properly removed.
+// We use this opportunity to cleanup any -removing folders which may be
+// still left if the daemon was killed while it was removing a layer.
+func (d *Driver) Cleanup() error {
+ items, err := ioutil.ReadDir(d.info.HomeDir)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+
+ // Note we don't return an error below - it's possible the files
+ // are locked. However, next time around after the daemon exits,
+ // we likely will be able to to cleanup successfully. Instead we log
+ // warnings if there are errors.
+ for _, item := range items {
+ if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") {
+ if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil {
+ logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err)
+ } else {
+ logrus.Infof("Cleaned up %s", item.Name())
+ }
+ }
+ }
+
+ return nil
+}
+
+// Diff produces an archive of the changes between the specified
+// layer and its parent layer which may be "".
+// The layer should be mounted when calling this function
+func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) {
+ panicIfUsedByLcow()
+ rID, err := d.resolveID(id)
+ if err != nil {
+ return
+ }
+
+ layerChain, err := d.getLayerChain(rID)
+ if err != nil {
+ return
+ }
+
+ // this is assuming that the layer is unmounted
+ if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
+ return nil, err
+ }
+ prepare := func() {
+ if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
+ logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
+ }
+ }
+
+ arch, err := d.exportLayer(rID, layerChain)
+ if err != nil {
+ prepare()
+ return
+ }
+ return ioutils.NewReadCloserWrapper(arch, func() error {
+ err := arch.Close()
+ prepare()
+ return err
+ }), nil
+}
+
+// Changes produces a list of changes between the specified layer
+// and its parent layer. If parent is "", then all changes will be ADD changes.
+// The layer should not be mounted when calling this function.
+func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
+ panicIfUsedByLcow()
+ rID, err := d.resolveID(id)
+ if err != nil {
+ return nil, err
+ }
+ parentChain, err := d.getLayerChain(rID)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
+ logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2)
+ }
+ }()
+
+ var changes []archive.Change
+ err = winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error {
+ r, err := hcsshim.NewLayerReader(d.info, id, parentChain)
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+
+ for {
+ name, _, fileInfo, err := r.Next()
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ name = filepath.ToSlash(name)
+ if fileInfo == nil {
+ changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeDelete})
+ } else {
+ // Currently there is no way to tell between an add and a modify.
+ changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeModify})
+ }
+ }
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return changes, nil
+}
+
+// ApplyDiff extracts the changeset from the given diff into the
+// layer with the specified id and parent, returning the size of the
+// new layer in bytes.
+// The layer should not be mounted when calling this function
+func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
+ panicIfUsedByLcow()
+ var layerChain []string
+ if parent != "" {
+ rPId, err := d.resolveID(parent)
+ if err != nil {
+ return 0, err
+ }
+ parentChain, err := d.getLayerChain(rPId)
+ if err != nil {
+ return 0, err
+ }
+ parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId)
+ if err != nil {
+ return 0, err
+ }
+ layerChain = append(layerChain, parentPath)
+ layerChain = append(layerChain, parentChain...)
+ }
+
+ size, err := d.importLayer(id, diff, layerChain)
+ if err != nil {
+ return 0, err
+ }
+
+ if err = d.setLayerChain(id, layerChain); err != nil {
+ return 0, err
+ }
+
+ return size, nil
+}
+
+// DiffSize calculates the changes between the specified layer
+// and its parent and returns the size in bytes of the changes
+// relative to its base filesystem directory.
+func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
+ panicIfUsedByLcow()
+ rPId, err := d.resolveID(parent)
+ if err != nil {
+ return
+ }
+
+ changes, err := d.Changes(id, rPId)
+ if err != nil {
+ return
+ }
+
+ layerFs, err := d.Get(id, "")
+ if err != nil {
+ return
+ }
+ defer d.Put(id)
+
+ return archive.ChangesSize(layerFs, changes), nil
+}
+
+// Metadata returns custom driver information.
+func (d *Driver) Metadata(id string) (map[string]string, error) {
+ panicIfUsedByLcow()
+ m := make(map[string]string)
+ m["dir"] = d.dir(id)
+ return m, nil
+}
+
+func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error {
+ t := tar.NewWriter(w)
+ for {
+ name, size, fileInfo, err := r.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ if fileInfo == nil {
+ // Write a whiteout file.
+ hdr := &tar.Header{
+ Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), archive.WhiteoutPrefix+filepath.Base(name))),
+ }
+ err := t.WriteHeader(hdr)
+ if err != nil {
+ return err
+ }
+ } else {
+ err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return t.Close()
+}
+
+// exportLayer generates an archive from a layer based on the given ID.
+func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) {
+ archive, w := io.Pipe()
+ go func() {
+ err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error {
+ r, err := hcsshim.NewLayerReader(d.info, id, parentLayerPaths)
+ if err != nil {
+ return err
+ }
+
+ err = writeTarFromLayer(r, w)
+ cerr := r.Close()
+ if err == nil {
+ err = cerr
+ }
+ return err
+ })
+ w.CloseWithError(err)
+ }()
+
+ return archive, nil
+}
+
+// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and
+// writes it to a backup stream, and also saves any files that will be mutated
+// by the import layer process to a backup location.
+func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) {
+ var bcdBackup *os.File
+ var bcdBackupWriter *winio.BackupFileWriter
+ if backupPath, ok := mutatedFiles[hdr.Name]; ok {
+ bcdBackup, err = os.Create(filepath.Join(root, backupPath))
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ cerr := bcdBackup.Close()
+ if err == nil {
+ err = cerr
+ }
+ }()
+
+ bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false)
+ defer func() {
+ cerr := bcdBackupWriter.Close()
+ if err == nil {
+ err = cerr
+ }
+ }()
+
+ buf.Reset(io.MultiWriter(w, bcdBackupWriter))
+ } else {
+ buf.Reset(w)
+ }
+
+ defer func() {
+ ferr := buf.Flush()
+ if err == nil {
+ err = ferr
+ }
+ }()
+
+ return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr)
+}
+
+func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) {
+ t := tar.NewReader(r)
+ hdr, err := t.Next()
+ totalSize := int64(0)
+ buf := bufio.NewWriter(nil)
+ for err == nil {
+ base := path.Base(hdr.Name)
+ if strings.HasPrefix(base, archive.WhiteoutPrefix) {
+ name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):])
+ err = w.Remove(filepath.FromSlash(name))
+ if err != nil {
+ return 0, err
+ }
+ hdr, err = t.Next()
+ } else if hdr.Typeflag == tar.TypeLink {
+ err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname))
+ if err != nil {
+ return 0, err
+ }
+ hdr, err = t.Next()
+ } else {
+ var (
+ name string
+ size int64
+ fileInfo *winio.FileBasicInfo
+ )
+ name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr)
+ if err != nil {
+ return 0, err
+ }
+ err = w.Add(filepath.FromSlash(name), fileInfo)
+ if err != nil {
+ return 0, err
+ }
+ hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root)
+ totalSize += size
+ }
+ }
+ if err != io.EOF {
+ return 0, err
+ }
+ return totalSize, nil
+}
+
+// importLayer adds a new layer to the tag and graph store based on the given data.
+func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) {
+ if !noreexec {
+ cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...)
+ output := bytes.NewBuffer(nil)
+ cmd.Stdin = layerData
+ cmd.Stdout = output
+ cmd.Stderr = output
+
+ if err = cmd.Start(); err != nil {
+ return
+ }
+
+ if err = cmd.Wait(); err != nil {
+ return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output)
+ }
+
+ return strconv.ParseInt(output.String(), 10, 64)
+ }
+ return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...)
+}
+
+// writeLayerReexec is the re-exec entry point for writing a layer from a tar file
+func writeLayerReexec() {
+ size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...)
+ if err != nil {
+ fmt.Fprint(os.Stderr, err)
+ os.Exit(1)
+ }
+ fmt.Fprint(os.Stdout, size)
+}
+
+// writeLayer writes a layer from a tar file.
+func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (int64, error) {
+ err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege})
+ if err != nil {
+ return 0, err
+ }
+ if noreexec {
+ defer func() {
+ if err := winio.DisableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil {
+ // This should never happen, but just in case when in debugging mode.
+ // See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale.
+ panic("Failed to disabled process privileges while in non re-exec mode")
+ }
+ }()
+ }
+
+ info := hcsshim.DriverInfo{
+ Flavour: filterDriver,
+ HomeDir: home,
+ }
+
+ w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths)
+ if err != nil {
+ return 0, err
+ }
+
+ size, err := writeLayerFromTar(layerData, w, filepath.Join(home, id))
+ if err != nil {
+ return 0, err
+ }
+
+ err = w.Close()
+ if err != nil {
+ return 0, err
+ }
+
+ return size, nil
+}
+
+// resolveID computes the layerID information based on the given id.
+func (d *Driver) resolveID(id string) (string, error) {
+ content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID"))
+ if os.IsNotExist(err) {
+ return id, nil
+ } else if err != nil {
+ return "", err
+ }
+ return string(content), nil
+}
+
+// setID stores the layerId in disk.
+func (d *Driver) setID(id, altID string) error {
+ return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600)
+}
+
+// getLayerChain returns the layer chain information.
+func (d *Driver) getLayerChain(id string) ([]string, error) {
+ jPath := filepath.Join(d.dir(id), "layerchain.json")
+ content, err := ioutil.ReadFile(jPath)
+ if os.IsNotExist(err) {
+ return nil, nil
+ } else if err != nil {
+ return nil, fmt.Errorf("Unable to read layerchain file - %s", err)
+ }
+
+ var layerChain []string
+ err = json.Unmarshal(content, &layerChain)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err)
+ }
+
+ return layerChain, nil
+}
+
+// setLayerChain stores the layer chain information in disk.
+func (d *Driver) setLayerChain(id string, chain []string) error {
+ content, err := json.Marshal(&chain)
+ if err != nil {
+ return fmt.Errorf("Failed to marshall layerchain json - %s", err)
+ }
+
+ jPath := filepath.Join(d.dir(id), "layerchain.json")
+ err = ioutil.WriteFile(jPath, content, 0600)
+ if err != nil {
+ return fmt.Errorf("Unable to write layerchain file - %s", err)
+ }
+
+ return nil
+}
+
+type fileGetCloserWithBackupPrivileges struct {
+ path string
+}
+
+func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) {
+ if backupPath, ok := mutatedFiles[filename]; ok {
+ return os.Open(filepath.Join(fg.path, backupPath))
+ }
+
+ var f *os.File
+ // Open the file while holding the Windows backup privilege. This ensures that the
+ // file can be opened even if the caller does not actually have access to it according
+ // to the security descriptor. Also use sequential file access to avoid depleting the
+ // standby list - Microsoft VSO Bug Tracker #9900466
+ err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error {
+ path := longpath.AddPrefix(filepath.Join(fg.path, filename))
+ p, err := windows.UTF16FromString(path)
+ if err != nil {
+ return err
+ }
+ const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
+ h, err := windows.CreateFile(&p[0], windows.GENERIC_READ, windows.FILE_SHARE_READ, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0)
+ if err != nil {
+ return &os.PathError{Op: "open", Path: path, Err: err}
+ }
+ f = os.NewFile(uintptr(h), path)
+ return nil
+ })
+ return f, err
+}
+
+func (fg *fileGetCloserWithBackupPrivileges) Close() error {
+ return nil
+}
+
+// DiffGetter returns a FileGetCloser that can read files from the directory that
+// contains files for the layer differences. Used for direct access for tar-split.
+func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
+ panicIfUsedByLcow()
+ id, err := d.resolveID(id)
+ if err != nil {
+ return nil, err
+ }
+
+ return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil
+}
+
+// AdditionalImageStores returns additional image stores supported by the driver
+func (d *Driver) AdditionalImageStores() []string {
+ return nil
+}
+
+type storageOptions struct {
+ size uint64
+}
+
+func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) {
+ options := storageOptions{}
+
+ // Read size to change the block device size per container.
+ for key, val := range storageOpt {
+ key := strings.ToLower(key)
+ switch key {
+ case "size":
+ size, err := units.RAMInBytes(val)
+ if err != nil {
+ return nil, err
+ }
+ options.size = uint64(size)
+ default:
+ return nil, fmt.Errorf("Unknown storage option: %s", key)
+ }
+ }
+ return &options, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
new file mode 100644
index 000000000..8c8e7d671
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
@@ -0,0 +1,426 @@
+// +build linux freebsd solaris
+
+package zfs
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/containers/storage/drivers"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/mount"
+ "github.com/containers/storage/pkg/parsers"
+ zfs "github.com/mistifyio/go-zfs"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+type zfsOptions struct {
+ fsName string
+ mountPath string
+}
+
+func init() {
+ graphdriver.Register("zfs", Init)
+}
+
+// Logger returns a zfs logger implementation.
+type Logger struct{}
+
+// Log wraps log message from ZFS driver with a prefix '[zfs]'.
+func (*Logger) Log(cmd []string) {
+ logrus.Debugf("[zfs] %s", strings.Join(cmd, " "))
+}
+
+// Init returns a new ZFS driver.
+// It takes base mount path and an array of options which are represented as key value pairs.
+// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options.
+func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
+ var err error
+
+ if _, err := exec.LookPath("zfs"); err != nil {
+ logrus.Debugf("[zfs] zfs command is not available: %v", err)
+ return nil, errors.Wrap(graphdriver.ErrPrerequisites, "the 'zfs' command is not available")
+ }
+
+ file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600)
+ if err != nil {
+ logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err)
+ return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "could not open /dev/zfs: %v", err)
+ }
+ defer file.Close()
+
+ options, err := parseOptions(opt)
+ if err != nil {
+ return nil, err
+ }
+ options.mountPath = base
+
+ rootdir := path.Dir(base)
+
+ if options.fsName == "" {
+ err = checkRootdirFs(rootdir)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if options.fsName == "" {
+ options.fsName, err = lookupZfsDataset(rootdir)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ zfs.SetLogger(new(Logger))
+
+ filesystems, err := zfs.Filesystems(options.fsName)
+ if err != nil {
+ return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err)
+ }
+
+ filesystemsCache := make(map[string]bool, len(filesystems))
+ var rootDataset *zfs.Dataset
+ for _, fs := range filesystems {
+ if fs.Name == options.fsName {
+ rootDataset = fs
+ }
+ filesystemsCache[fs.Name] = true
+ }
+
+ if rootDataset == nil {
+ return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName)
+ }
+
+ rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to get root uid/guid: %v", err)
+ }
+ if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil {
+ return nil, fmt.Errorf("Failed to create '%s': %v", base, err)
+ }
+
+ if err := mount.MakePrivate(base); err != nil {
+ return nil, err
+ }
+ d := &Driver{
+ dataset: rootDataset,
+ options: options,
+ filesystemsCache: filesystemsCache,
+ uidMaps: uidMaps,
+ gidMaps: gidMaps,
+ ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
+ }
+ return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
+}
+
+func parseOptions(opt []string) (zfsOptions, error) {
+ var options zfsOptions
+ options.fsName = ""
+ for _, option := range opt {
+ key, val, err := parsers.ParseKeyValueOpt(option)
+ if err != nil {
+ return options, err
+ }
+ key = strings.ToLower(key)
+ switch key {
+ case "zfs.fsname":
+ options.fsName = val
+ default:
+ return options, fmt.Errorf("Unknown option %s", key)
+ }
+ }
+ return options, nil
+}
+
+func lookupZfsDataset(rootdir string) (string, error) {
+ var stat unix.Stat_t
+ if err := unix.Stat(rootdir, &stat); err != nil {
+ return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err)
+ }
+ wantedDev := stat.Dev
+
+ mounts, err := mount.GetMounts()
+ if err != nil {
+ return "", err
+ }
+ for _, m := range mounts {
+ if err := unix.Stat(m.Mountpoint, &stat); err != nil {
+ logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err)
+ continue // may fail on fuse file systems
+ }
+
+ if stat.Dev == wantedDev && m.Fstype == "zfs" {
+ return m.Source, nil
+ }
+ }
+
+ return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir)
+}
+
+// Driver holds information about the driver, such as zfs dataset, options and cache.
+type Driver struct {
+ dataset *zfs.Dataset
+ options zfsOptions
+ sync.Mutex // protects filesystem cache against concurrent access
+ filesystemsCache map[string]bool
+ uidMaps []idtools.IDMap
+ gidMaps []idtools.IDMap
+ ctr *graphdriver.RefCounter
+}
+
+func (d *Driver) String() string {
+ return "zfs"
+}
+
+// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver.
+func (d *Driver) Cleanup() error {
+ return nil
+}
+
+// Status returns information about the ZFS filesystem. It returns a two dimensional array of information
+// such as pool name, dataset name, disk usage, parent quota and compression used.
+// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent',
+// 'Space Available', 'Parent Quota' and 'Compression'.
+func (d *Driver) Status() [][2]string {
+ parts := strings.Split(d.dataset.Name, "/")
+ pool, err := zfs.GetZpool(parts[0])
+
+ var poolName, poolHealth string
+ if err == nil {
+ poolName = pool.Name
+ poolHealth = pool.Health
+ } else {
+ poolName = fmt.Sprintf("error while getting pool information %v", err)
+ poolHealth = "not available"
+ }
+
+ quota := "no"
+ if d.dataset.Quota != 0 {
+ quota = strconv.FormatUint(d.dataset.Quota, 10)
+ }
+
+ return [][2]string{
+ {"Zpool", poolName},
+ {"Zpool Health", poolHealth},
+ {"Parent Dataset", d.dataset.Name},
+ {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)},
+ {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)},
+ {"Parent Quota", quota},
+ {"Compression", d.dataset.Compression},
+ }
+}
+
+// Metadata returns image/container metadata related to graph driver
+func (d *Driver) Metadata(id string) (map[string]string, error) {
+ return map[string]string{
+ "Mountpoint": d.mountPath(id),
+ "Dataset": d.zfsPath(id),
+ }, nil
+}
+
+func (d *Driver) cloneFilesystem(name, parentName string) error {
+ snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond())
+ parentDataset := zfs.Dataset{Name: parentName}
+ snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false)
+ if err != nil {
+ return err
+ }
+
+ _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"})
+ if err == nil {
+ d.Lock()
+ d.filesystemsCache[name] = true
+ d.Unlock()
+ }
+
+ if err != nil {
+ snapshot.Destroy(zfs.DestroyDeferDeletion)
+ return err
+ }
+ return snapshot.Destroy(zfs.DestroyDeferDeletion)
+}
+
+func (d *Driver) zfsPath(id string) string {
+ return d.options.fsName + "/" + id
+}
+
+func (d *Driver) mountPath(id string) string {
+ return path.Join(d.options.mountPath, "graph", getMountpoint(id))
+}
+
+// CreateReadWrite creates a layer that is writable for use as a container
+// file system.
+func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
+ return d.Create(id, parent, opts)
+}
+
+// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent.
+func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
+ var storageOpt map[string]string
+ if opts != nil {
+ storageOpt = opts.StorageOpt
+ }
+
+ err := d.create(id, parent, storageOpt)
+ if err == nil {
+ return nil
+ }
+ if zfsError, ok := err.(*zfs.Error); ok {
+ if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") {
+ return err
+ }
+ // aborted build -> cleanup
+ } else {
+ return err
+ }
+
+ dataset := zfs.Dataset{Name: d.zfsPath(id)}
+ if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil {
+ return err
+ }
+
+ // retry
+ return d.create(id, parent, storageOpt)
+}
+
+func (d *Driver) create(id, parent string, storageOpt map[string]string) error {
+ name := d.zfsPath(id)
+ quota, err := parseStorageOpt(storageOpt)
+ if err != nil {
+ return err
+ }
+ if parent == "" {
+ mountoptions := map[string]string{"mountpoint": "legacy"}
+ fs, err := zfs.CreateFilesystem(name, mountoptions)
+ if err == nil {
+ err = setQuota(name, quota)
+ if err == nil {
+ d.Lock()
+ d.filesystemsCache[fs.Name] = true
+ d.Unlock()
+ }
+ }
+ return err
+ }
+ err = d.cloneFilesystem(name, d.zfsPath(parent))
+ if err == nil {
+ err = setQuota(name, quota)
+ }
+ return err
+}
+
+func parseStorageOpt(storageOpt map[string]string) (string, error) {
+ // Read size to change the disk quota per container
+ for k, v := range storageOpt {
+ key := strings.ToLower(k)
+ switch key {
+ case "size":
+ return v, nil
+ default:
+ return "0", fmt.Errorf("Unknown option %s", key)
+ }
+ }
+ return "0", nil
+}
+
+func setQuota(name string, quota string) error {
+ if quota == "0" {
+ return nil
+ }
+ fs, err := zfs.GetDataset(name)
+ if err != nil {
+ return err
+ }
+ return fs.SetProperty("quota", quota)
+}
+
+// Remove deletes the dataset, filesystem and the cache for the given id.
+func (d *Driver) Remove(id string) error {
+ name := d.zfsPath(id)
+ dataset := zfs.Dataset{Name: name}
+ err := dataset.Destroy(zfs.DestroyRecursive)
+ if err == nil {
+ d.Lock()
+ delete(d.filesystemsCache, name)
+ d.Unlock()
+ }
+ return err
+}
+
+// Get returns the mountpoint for the given id after creating the target directories if necessary.
+func (d *Driver) Get(id, mountLabel string) (string, error) {
+ mountpoint := d.mountPath(id)
+ if count := d.ctr.Increment(mountpoint); count > 1 {
+ return mountpoint, nil
+ }
+
+ filesystem := d.zfsPath(id)
+ options := label.FormatMountLabel("", mountLabel)
+ logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options)
+
+ rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
+ if err != nil {
+ d.ctr.Decrement(mountpoint)
+ return "", err
+ }
+ // Create the target directories if they don't exist
+ if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil {
+ d.ctr.Decrement(mountpoint)
+ return "", err
+ }
+
+ if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil {
+ d.ctr.Decrement(mountpoint)
+ return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err)
+ }
+
+ // this could be our first mount after creation of the filesystem, and the root dir may still have root
+ // permissions instead of the remapped root uid:gid (if user namespaces are enabled):
+ if err := os.Chown(mountpoint, rootUID, rootGID); err != nil {
+ mount.Unmount(mountpoint)
+ d.ctr.Decrement(mountpoint)
+ return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err)
+ }
+
+ return mountpoint, nil
+}
+
+// Put removes the existing mountpoint for the given id if it exists.
+func (d *Driver) Put(id string) error {
+ mountpoint := d.mountPath(id)
+ if count := d.ctr.Decrement(mountpoint); count > 0 {
+ return nil
+ }
+ mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint)
+ if err != nil || !mounted {
+ return err
+ }
+
+ logrus.Debugf(`[zfs] unmount("%s")`, mountpoint)
+
+ if err := mount.Unmount(mountpoint); err != nil {
+ return fmt.Errorf("error unmounting to %s: %v", mountpoint, err)
+ }
+ return nil
+}
+
+// Exists checks to see if the cache entry exists for the given id.
+func (d *Driver) Exists(id string) bool {
+ d.Lock()
+ defer d.Unlock()
+ return d.filesystemsCache[d.zfsPath(id)]
+}
+
+// AdditionalImageStores returns additional image stores supported by the driver
+func (d *Driver) AdditionalImageStores() []string {
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
new file mode 100644
index 000000000..69c0448d3
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
@@ -0,0 +1,39 @@
+package zfs
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/containers/storage/drivers"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+func checkRootdirFs(rootdir string) error {
+ var buf unix.Statfs_t
+ if err := unix.Statfs(rootdir, &buf); err != nil {
+ return fmt.Errorf("Failed to access '%s': %s", rootdir, err)
+ }
+
+ // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ]
+ if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) {
+ logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir)
+ return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir)
+ }
+
+ return nil
+}
+
+func getMountpoint(id string) string {
+ maxlen := 12
+
+ // we need to preserve filesystem suffix
+ suffix := strings.SplitN(id, "-", 2)
+
+ if len(suffix) > 1 {
+ return id[:maxlen] + "-" + suffix[1]
+ }
+
+ return id[:maxlen]
+}
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
new file mode 100644
index 000000000..da298047d
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
@@ -0,0 +1,28 @@
+package zfs
+
+import (
+ "fmt"
+
+ "github.com/containers/storage/drivers"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+func checkRootdirFs(rootdir string) error {
+ var buf unix.Statfs_t
+ if err := unix.Statfs(rootdir, &buf); err != nil {
+ return fmt.Errorf("Failed to access '%s': %s", rootdir, err)
+ }
+
+ if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs {
+ logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir)
+ return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir)
+ }
+
+ return nil
+}
+
+func getMountpoint(id string) string {
+ return id
+}
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go
new file mode 100644
index 000000000..2383bf3bf
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go
@@ -0,0 +1,59 @@
+// +build solaris,cgo
+
+package zfs
+
+/*
+#include <sys/statvfs.h>
+#include <stdlib.h>
+
+static inline struct statvfs *getstatfs(char *s) {
+ struct statvfs *buf;
+ int err;
+ buf = (struct statvfs *)malloc(sizeof(struct statvfs));
+ err = statvfs(s, buf);
+ return buf;
+}
+*/
+import "C"
+import (
+ "path/filepath"
+ "strings"
+ "unsafe"
+
+ "github.com/containers/storage/drivers"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+func checkRootdirFs(rootdir string) error {
+
+ cs := C.CString(filepath.Dir(rootdir))
+ defer C.free(unsafe.Pointer(cs))
+ buf := C.getstatfs(cs)
+ defer C.free(unsafe.Pointer(buf))
+
+ // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
+ if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
+ (buf.f_basetype[3] != 0) {
+ logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir)
+ return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir)
+ }
+
+ return nil
+}
+
+/* rootfs is introduced to comply with the OCI spec
+which states that root filesystem must be mounted at <CID>/rootfs/ instead of <CID>/
+*/
+func getMountpoint(id string) string {
+ maxlen := 12
+
+ // we need to preserve filesystem suffix
+ suffix := strings.SplitN(id, "-", 2)
+
+ if len(suffix) > 1 {
+ return filepath.Join(id[:maxlen]+"-"+suffix[1], "rootfs", "root")
+ }
+
+ return filepath.Join(id[:maxlen], "rootfs", "root")
+}
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go
new file mode 100644
index 000000000..ce8daadaf
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go
@@ -0,0 +1,11 @@
+// +build !linux,!freebsd,!solaris
+
+package zfs
+
+func checkRootdirFs(rootdir string) error {
+ return nil
+}
+
+func getMountpoint(id string) string {
+ return id
+}
diff --git a/vendor/github.com/containers/storage/errors.go b/vendor/github.com/containers/storage/errors.go
new file mode 100644
index 000000000..bed6f8cdc
--- /dev/null
+++ b/vendor/github.com/containers/storage/errors.go
@@ -0,0 +1,56 @@
+package storage
+
+import (
+ "errors"
+)
+
+var (
+ // ErrContainerUnknown indicates that there was no container with the specified name or ID.
+ ErrContainerUnknown = errors.New("container not known")
+ // ErrImageUnknown indicates that there was no image with the specified name or ID.
+ ErrImageUnknown = errors.New("image not known")
+ // ErrParentUnknown indicates that we didn't record the ID of the parent of the specified layer.
+ ErrParentUnknown = errors.New("parent of layer not known")
+ // ErrLayerUnknown indicates that there was no layer with the specified name or ID.
+ ErrLayerUnknown = errors.New("layer not known")
+ // ErrLoadError indicates that there was an initialization error.
+ ErrLoadError = errors.New("error loading storage metadata")
+ // ErrDuplicateID indicates that an ID which is to be assigned to a new item is already being used.
+ ErrDuplicateID = errors.New("that ID is already in use")
+ // ErrDuplicateName indicates that a name which is to be assigned to a new item is already being used.
+ ErrDuplicateName = errors.New("that name is already in use")
+ // ErrParentIsContainer is returned when a caller attempts to create a layer as a child of a container's layer.
+ ErrParentIsContainer = errors.New("would-be parent layer is a container")
+ // ErrNotAContainer is returned when the caller attempts to delete a container that isn't a container.
+ ErrNotAContainer = errors.New("identifier is not a container")
+ // ErrNotAnImage is returned when the caller attempts to delete an image that isn't an image.
+ ErrNotAnImage = errors.New("identifier is not an image")
+ // ErrNotALayer is returned when the caller attempts to delete a layer that isn't a layer.
+ ErrNotALayer = errors.New("identifier is not a layer")
+ // ErrNotAnID is returned when the caller attempts to read or write metadata from an item that doesn't exist.
+ ErrNotAnID = errors.New("identifier is not a layer, image, or container")
+ // ErrLayerHasChildren is returned when the caller attempts to delete a layer that has children.
+ ErrLayerHasChildren = errors.New("layer has children")
+ // ErrLayerUsedByImage is returned when the caller attempts to delete a layer that is an image's top layer.
+ ErrLayerUsedByImage = errors.New("layer is in use by an image")
+ // ErrLayerUsedByContainer is returned when the caller attempts to delete a layer that is a container's layer.
+ ErrLayerUsedByContainer = errors.New("layer is in use by a container")
+ // ErrImageUsedByContainer is returned when the caller attempts to delete an image that is a container's image.
+ ErrImageUsedByContainer = errors.New("image is in use by a container")
+ // ErrIncompleteOptions is returned when the caller attempts to initialize a Store without providing required information.
+ ErrIncompleteOptions = errors.New("missing necessary StoreOptions")
+ // ErrSizeUnknown is returned when the caller asks for the size of a big data item, but the Store couldn't determine the answer.
+ ErrSizeUnknown = errors.New("size is not known")
+ // ErrStoreIsReadOnly is returned when the caller makes a call to a read-only store that would require modifying its contents.
+ ErrStoreIsReadOnly = errors.New("called a write method on a read-only store")
+ // ErrLockReadOnly indicates that the caller only took a read-only lock, and is not allowed to write.
+ ErrLockReadOnly = errors.New("lock is not a read-write lock")
+ // ErrDuplicateImageNames indicates that the read-only store uses the same name for multiple images.
+ ErrDuplicateImageNames = errors.New("read-only image store assigns the same name to multiple images")
+ // ErrDuplicateLayerNames indicates that the read-only store uses the same name for multiple layers.
+ ErrDuplicateLayerNames = errors.New("read-only layer store assigns the same name to multiple layers")
+ // ErrInvalidBigDataName indicates that the name for a big data item is not acceptable; it may be empty.
+ ErrInvalidBigDataName = errors.New("not a valid name for a big data item")
+ // ErrDigestUnknown indicates that we were unable to compute the digest of a specified item.
+ ErrDigestUnknown = errors.New("could not compute digest of item")
+)
diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go
new file mode 100644
index 000000000..ed22e131f
--- /dev/null
+++ b/vendor/github.com/containers/storage/images.go
@@ -0,0 +1,576 @@
+package storage
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/stringid"
+ "github.com/containers/storage/pkg/truncindex"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+// An Image is a reference to a layer and an associated metadata string.
+type Image struct {
+ // ID is either one which was specified at create-time, or a random
+ // value which was generated by the library.
+ ID string `json:"id"`
+
+ // Names is an optional set of user-defined convenience values. The
+ // image can be referred to by its ID or any of its names. Names are
+ // unique among images.
+ Names []string `json:"names,omitempty"`
+
+ // TopLayer is the ID of the topmost layer of the image itself, if the
+ // image contains one or more layers. Multiple images can refer to the
+ // same top layer.
+ TopLayer string `json:"layer"`
+
+ // Metadata is data we keep for the convenience of the caller. It is not
+ // expected to be large, since it is kept in memory.
+ Metadata string `json:"metadata,omitempty"`
+
+ // BigDataNames is a list of names of data items that we keep for the
+ // convenience of the caller. They can be large, and are only in
+ // memory when being read from or written to disk.
+ BigDataNames []string `json:"big-data-names,omitempty"`
+
+ // BigDataSizes maps the names in BigDataNames to the sizes of the data
+ // that has been stored, if they're known.
+ BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"`
+
+ // BigDataDigests maps the names in BigDataNames to the digests of the
+ // data that has been stored, if they're known.
+ BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"`
+
+ // Created is the datestamp for when this image was created. Older
+ // versions of the library did not track this information, so callers
+ // will likely want to use the IsZero() method to verify that a value
+ // is set before using it.
+ Created time.Time `json:"created,omitempty"`
+
+ Flags map[string]interface{} `json:"flags,omitempty"`
+}
+
+// ROImageStore provides bookkeeping for information about Images.
+type ROImageStore interface {
+ ROFileBasedStore
+ ROMetadataStore
+ ROBigDataStore
+
+ // Exists checks if there is an image with the given ID or name.
+ Exists(id string) bool
+
+ // Get retrieves information about an image given an ID or name.
+ Get(id string) (*Image, error)
+
+ // Lookup attempts to translate a name to an ID. Most methods do this
+ // implicitly.
+ Lookup(name string) (string, error)
+
+ // Images returns a slice enumerating the known images.
+ Images() ([]Image, error)
+}
+
+// ImageStore provides bookkeeping for information about Images.
+type ImageStore interface {
+ ROImageStore
+ RWFileBasedStore
+ RWMetadataStore
+ RWBigDataStore
+ FlaggableStore
+
+ // Create creates an image that has a specified ID (or a random one) and
+ // optional names, using the specified layer as its topmost (hopefully
+ // read-only) layer. That layer can be referenced by multiple images.
+ Create(id string, names []string, layer, metadata string, created time.Time) (*Image, error)
+
+ // SetNames replaces the list of names associated with an image with the
+ // supplied values.
+ SetNames(id string, names []string) error
+
+ // Delete removes the record of the image.
+ Delete(id string) error
+
+ // Wipe removes records of all images.
+ Wipe() error
+}
+
+type imageStore struct {
+ lockfile Locker
+ dir string
+ images []*Image
+ idindex *truncindex.TruncIndex
+ byid map[string]*Image
+ byname map[string]*Image
+}
+
+func (r *imageStore) Images() ([]Image, error) {
+ images := make([]Image, len(r.images))
+ for i := range r.images {
+ images[i] = *(r.images[i])
+ }
+ return images, nil
+}
+
+func (r *imageStore) imagespath() string {
+ return filepath.Join(r.dir, "images.json")
+}
+
+func (r *imageStore) datadir(id string) string {
+ return filepath.Join(r.dir, id)
+}
+
+func (r *imageStore) datapath(id, key string) string {
+ return filepath.Join(r.datadir(id), makeBigDataBaseName(key))
+}
+
+func (r *imageStore) Load() error {
+ shouldSave := false
+ rpath := r.imagespath()
+ data, err := ioutil.ReadFile(rpath)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ images := []*Image{}
+ idlist := []string{}
+ ids := make(map[string]*Image)
+ names := make(map[string]*Image)
+ if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil {
+ idlist = make([]string, 0, len(images))
+ for n, image := range images {
+ ids[image.ID] = images[n]
+ idlist = append(idlist, image.ID)
+ for _, name := range image.Names {
+ if conflict, ok := names[name]; ok {
+ r.removeName(conflict, name)
+ shouldSave = true
+ }
+ names[name] = images[n]
+ }
+ }
+ }
+ if shouldSave && !r.IsReadWrite() {
+ return ErrDuplicateImageNames
+ }
+ r.images = images
+ r.idindex = truncindex.NewTruncIndex(idlist)
+ r.byid = ids
+ r.byname = names
+ if shouldSave {
+ return r.Save()
+ }
+ return nil
+}
+
+func (r *imageStore) Save() error {
+ if !r.IsReadWrite() {
+ return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath())
+ }
+ rpath := r.imagespath()
+ if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
+ return err
+ }
+ jdata, err := json.Marshal(&r.images)
+ if err != nil {
+ return err
+ }
+ defer r.Touch()
+ return ioutils.AtomicWriteFile(rpath, jdata, 0600)
+}
+
+func newImageStore(dir string) (ImageStore, error) {
+ if err := os.MkdirAll(dir, 0700); err != nil {
+ return nil, err
+ }
+ lockfile, err := GetLockfile(filepath.Join(dir, "images.lock"))
+ if err != nil {
+ return nil, err
+ }
+ lockfile.Lock()
+ defer lockfile.Unlock()
+ istore := imageStore{
+ lockfile: lockfile,
+ dir: dir,
+ images: []*Image{},
+ byid: make(map[string]*Image),
+ byname: make(map[string]*Image),
+ }
+ if err := istore.Load(); err != nil {
+ return nil, err
+ }
+ return &istore, nil
+}
+
+func newROImageStore(dir string) (ROImageStore, error) {
+ lockfile, err := GetROLockfile(filepath.Join(dir, "images.lock"))
+ if err != nil {
+ return nil, err
+ }
+ lockfile.Lock()
+ defer lockfile.Unlock()
+ istore := imageStore{
+ lockfile: lockfile,
+ dir: dir,
+ images: []*Image{},
+ byid: make(map[string]*Image),
+ byname: make(map[string]*Image),
+ }
+ if err := istore.Load(); err != nil {
+ return nil, err
+ }
+ return &istore, nil
+}
+
+func (r *imageStore) lookup(id string) (*Image, bool) {
+ if image, ok := r.byid[id]; ok {
+ return image, ok
+ } else if image, ok := r.byname[id]; ok {
+ return image, ok
+ } else if longid, err := r.idindex.Get(id); err == nil {
+ image, ok := r.byid[longid]
+ return image, ok
+ }
+ return nil, false
+}
+
+func (r *imageStore) ClearFlag(id string, flag string) error {
+ if !r.IsReadWrite() {
+ return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on images at %q", r.imagespath())
+ }
+ image, ok := r.lookup(id)
+ if !ok {
+ return ErrImageUnknown
+ }
+ delete(image.Flags, flag)
+ return r.Save()
+}
+
+func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
+ if !r.IsReadWrite() {
+ return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on images at %q", r.imagespath())
+ }
+ image, ok := r.lookup(id)
+ if !ok {
+ return ErrImageUnknown
+ }
+ if image.Flags == nil {
+ image.Flags = make(map[string]interface{})
+ }
+ image.Flags[flag] = value
+ return r.Save()
+}
+
+func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time) (image *Image, err error) {
+ if !r.IsReadWrite() {
+ return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath())
+ }
+ if id == "" {
+ id = stringid.GenerateRandomID()
+ _, idInUse := r.byid[id]
+ for idInUse {
+ id = stringid.GenerateRandomID()
+ _, idInUse = r.byid[id]
+ }
+ }
+ if _, idInUse := r.byid[id]; idInUse {
+ return nil, ErrDuplicateID
+ }
+ names = dedupeNames(names)
+ for _, name := range names {
+ if _, nameInUse := r.byname[name]; nameInUse {
+ return nil, ErrDuplicateName
+ }
+ }
+ if created.IsZero() {
+ created = time.Now().UTC()
+ }
+ if err == nil {
+ image = &Image{
+ ID: id,
+ Names: names,
+ TopLayer: layer,
+ Metadata: metadata,
+ BigDataNames: []string{},
+ BigDataSizes: make(map[string]int64),
+ BigDataDigests: make(map[string]digest.Digest),
+ Created: created,
+ Flags: make(map[string]interface{}),
+ }
+ r.images = append(r.images, image)
+ r.idindex.Add(id)
+ r.byid[id] = image
+ for _, name := range names {
+ r.byname[name] = image
+ }
+ err = r.Save()
+ }
+ return image, err
+}
+
+func (r *imageStore) Metadata(id string) (string, error) {
+ if image, ok := r.lookup(id); ok {
+ return image.Metadata, nil
+ }
+ return "", ErrImageUnknown
+}
+
+func (r *imageStore) SetMetadata(id, metadata string) error {
+ if !r.IsReadWrite() {
+ return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify image metadata at %q", r.imagespath())
+ }
+ if image, ok := r.lookup(id); ok {
+ image.Metadata = metadata
+ return r.Save()
+ }
+ return ErrImageUnknown
+}
+
+func (r *imageStore) removeName(image *Image, name string) {
+ image.Names = stringSliceWithoutValue(image.Names, name)
+}
+
+func (r *imageStore) SetNames(id string, names []string) error {
+ if !r.IsReadWrite() {
+ return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath())
+ }
+ names = dedupeNames(names)
+ if image, ok := r.lookup(id); ok {
+ for _, name := range image.Names {
+ delete(r.byname, name)
+ }
+ for _, name := range names {
+ if otherImage, ok := r.byname[name]; ok {
+ r.removeName(otherImage, name)
+ }
+ r.byname[name] = image
+ }
+ image.Names = names
+ return r.Save()
+ }
+ return ErrImageUnknown
+}
+
+func (r *imageStore) Delete(id string) error {
+ if !r.IsReadWrite() {
+ return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath())
+ }
+ image, ok := r.lookup(id)
+ if !ok {
+ return ErrImageUnknown
+ }
+ id = image.ID
+ toDeleteIndex := -1
+ for i, candidate := range r.images {
+ if candidate.ID == id {
+ toDeleteIndex = i
+ }
+ }
+ delete(r.byid, id)
+ r.idindex.Delete(id)
+ for _, name := range image.Names {
+ delete(r.byname, name)
+ }
+ if toDeleteIndex != -1 {
+ // delete the image at toDeleteIndex
+ if toDeleteIndex == len(r.images)-1 {
+ r.images = r.images[:len(r.images)-1]
+ } else {
+ r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...)
+ }
+ }
+ if err := r.Save(); err != nil {
+ return err
+ }
+ if err := os.RemoveAll(r.datadir(id)); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (r *imageStore) Get(id string) (*Image, error) {
+ if image, ok := r.lookup(id); ok {
+ return image, nil
+ }
+ return nil, ErrImageUnknown
+}
+
+func (r *imageStore) Lookup(name string) (id string, err error) {
+ if image, ok := r.lookup(name); ok {
+ return image.ID, nil
+ }
+ return "", ErrImageUnknown
+}
+
+func (r *imageStore) Exists(id string) bool {
+ _, ok := r.lookup(id)
+ return ok
+}
+
+func (r *imageStore) BigData(id, key string) ([]byte, error) {
+ if key == "" {
+ return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name")
+ }
+ image, ok := r.lookup(id)
+ if !ok {
+ return nil, ErrImageUnknown
+ }
+ return ioutil.ReadFile(r.datapath(image.ID, key))
+}
+
+func (r *imageStore) BigDataSize(id, key string) (int64, error) {
+ if key == "" {
+ return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of image big data with empty name")
+ }
+ image, ok := r.lookup(id)
+ if !ok {
+ return -1, ErrImageUnknown
+ }
+ if image.BigDataSizes == nil {
+ image.BigDataSizes = make(map[string]int64)
+ }
+ if size, ok := image.BigDataSizes[key]; ok {
+ return size, nil
+ }
+ if data, err := r.BigData(id, key); err == nil && data != nil {
+ if r.SetBigData(id, key, data) == nil {
+ image, ok := r.lookup(id)
+ if !ok {
+ return -1, ErrImageUnknown
+ }
+ if size, ok := image.BigDataSizes[key]; ok {
+ return size, nil
+ }
+ }
+ }
+ return -1, ErrSizeUnknown
+}
+
+func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) {
+ if key == "" {
+ return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of image big data value with empty name")
+ }
+ image, ok := r.lookup(id)
+ if !ok {
+ return "", ErrImageUnknown
+ }
+ if image.BigDataDigests == nil {
+ image.BigDataDigests = make(map[string]digest.Digest)
+ }
+ if d, ok := image.BigDataDigests[key]; ok {
+ return d, nil
+ }
+ if data, err := r.BigData(id, key); err == nil && data != nil {
+ if r.SetBigData(id, key, data) == nil {
+ image, ok := r.lookup(id)
+ if !ok {
+ return "", ErrImageUnknown
+ }
+ if d, ok := image.BigDataDigests[key]; ok {
+ return d, nil
+ }
+ }
+ }
+ return "", ErrDigestUnknown
+}
+
+func (r *imageStore) BigDataNames(id string) ([]string, error) {
+ image, ok := r.lookup(id)
+ if !ok {
+ return nil, ErrImageUnknown
+ }
+ return image.BigDataNames, nil
+}
+
+func (r *imageStore) SetBigData(id, key string, data []byte) error {
+ if key == "" {
+ return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item")
+ }
+ if !r.IsReadWrite() {
+ return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath())
+ }
+ image, ok := r.lookup(id)
+ if !ok {
+ return ErrImageUnknown
+ }
+ if err := os.MkdirAll(r.datadir(image.ID), 0700); err != nil {
+ return err
+ }
+ err := ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
+ if err == nil {
+ save := false
+ if image.BigDataSizes == nil {
+ image.BigDataSizes = make(map[string]int64)
+ }
+ oldSize, sizeOk := image.BigDataSizes[key]
+ image.BigDataSizes[key] = int64(len(data))
+ if image.BigDataDigests == nil {
+ image.BigDataDigests = make(map[string]digest.Digest)
+ }
+ oldDigest, digestOk := image.BigDataDigests[key]
+ newDigest := digest.Canonical.FromBytes(data)
+ image.BigDataDigests[key] = newDigest
+ if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
+ save = true
+ }
+ addName := true
+ for _, name := range image.BigDataNames {
+ if name == key {
+ addName = false
+ break
+ }
+ }
+ if addName {
+ image.BigDataNames = append(image.BigDataNames, key)
+ save = true
+ }
+ if save {
+ err = r.Save()
+ }
+ }
+ return err
+}
+
+func (r *imageStore) Wipe() error {
+ if !r.IsReadWrite() {
+ return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath())
+ }
+ ids := make([]string, 0, len(r.byid))
+ for id := range r.byid {
+ ids = append(ids, id)
+ }
+ for _, id := range ids {
+ if err := r.Delete(id); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *imageStore) Lock() {
+ r.lockfile.Lock()
+}
+
+func (r *imageStore) Unlock() {
+ r.lockfile.Unlock()
+}
+
+func (r *imageStore) Touch() error {
+ return r.lockfile.Touch()
+}
+
+func (r *imageStore) Modified() (bool, error) {
+ return r.lockfile.Modified()
+}
+
+func (r *imageStore) IsReadWrite() bool {
+ return r.lockfile.IsReadWrite()
+}
+
+func (r *imageStore) TouchedSince(when time.Time) bool {
+ return r.lockfile.TouchedSince(when)
+}
diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go
new file mode 100644
index 000000000..fd3ef11a6
--- /dev/null
+++ b/vendor/github.com/containers/storage/images_ffjson.go
@@ -0,0 +1,1148 @@
+// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
+// source: images.go
+
+package storage
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "github.com/opencontainers/go-digest"
+ fflib "github.com/pquerna/ffjson/fflib/v1"
+)
+
+// MarshalJSON marshal bytes to json - template
+func (j *Image) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{ "id":`)
+ fflib.WriteJsonString(buf, string(j.ID))
+ buf.WriteByte(',')
+ if len(j.Names) != 0 {
+ buf.WriteString(`"names":`)
+ if j.Names != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.Names {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ fflib.WriteJsonString(buf, string(v))
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ buf.WriteString(`"layer":`)
+ fflib.WriteJsonString(buf, string(j.TopLayer))
+ buf.WriteByte(',')
+ if len(j.Metadata) != 0 {
+ buf.WriteString(`"metadata":`)
+ fflib.WriteJsonString(buf, string(j.Metadata))
+ buf.WriteByte(',')
+ }
+ if len(j.BigDataNames) != 0 {
+ buf.WriteString(`"big-data-names":`)
+ if j.BigDataNames != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.BigDataNames {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ fflib.WriteJsonString(buf, string(v))
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.BigDataSizes) != 0 {
+ if j.BigDataSizes == nil {
+ buf.WriteString(`"big-data-sizes":null`)
+ } else {
+ buf.WriteString(`"big-data-sizes":{ `)
+ for key, value := range j.BigDataSizes {
+ fflib.WriteJsonString(buf, key)
+ buf.WriteString(`:`)
+ fflib.FormatBits2(buf, uint64(value), 10, value < 0)
+ buf.WriteByte(',')
+ }
+ buf.Rewind(1)
+ buf.WriteByte('}')
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.BigDataDigests) != 0 {
+ if j.BigDataDigests == nil {
+ buf.WriteString(`"big-data-digests":null`)
+ } else {
+ buf.WriteString(`"big-data-digests":{ `)
+ for key, value := range j.BigDataDigests {
+ fflib.WriteJsonString(buf, key)
+ buf.WriteString(`:`)
+ fflib.WriteJsonString(buf, string(value))
+ buf.WriteByte(',')
+ }
+ buf.Rewind(1)
+ buf.WriteByte('}')
+ }
+ buf.WriteByte(',')
+ }
+ if true {
+ buf.WriteString(`"created":`)
+
+ {
+
+ obj, err = j.Created.MarshalJSON()
+ if err != nil {
+ return err
+ }
+ buf.Write(obj)
+
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.Flags) != 0 {
+ buf.WriteString(`"flags":`)
+ /* Falling back. type=map[string]interface {} kind=map */
+ err = buf.Encode(j.Flags)
+ if err != nil {
+ return err
+ }
+ buf.WriteByte(',')
+ }
+ buf.Rewind(1)
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffjtImagebase = iota
+ ffjtImagenosuchkey
+
+ ffjtImageID
+
+ ffjtImageNames
+
+ ffjtImageTopLayer
+
+ ffjtImageMetadata
+
+ ffjtImageBigDataNames
+
+ ffjtImageBigDataSizes
+
+ ffjtImageBigDataDigests
+
+ ffjtImageCreated
+
+ ffjtImageFlags
+)
+
+var ffjKeyImageID = []byte("id")
+
+var ffjKeyImageNames = []byte("names")
+
+var ffjKeyImageTopLayer = []byte("layer")
+
+var ffjKeyImageMetadata = []byte("metadata")
+
+var ffjKeyImageBigDataNames = []byte("big-data-names")
+
+var ffjKeyImageBigDataSizes = []byte("big-data-sizes")
+
+var ffjKeyImageBigDataDigests = []byte("big-data-digests")
+
+var ffjKeyImageCreated = []byte("created")
+
+var ffjKeyImageFlags = []byte("flags")
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *Image) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *Image) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtImagebase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtImagenosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'b':
+
+ if bytes.Equal(ffjKeyImageBigDataNames, kn) {
+ currentKey = ffjtImageBigDataNames
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyImageBigDataSizes, kn) {
+ currentKey = ffjtImageBigDataSizes
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyImageBigDataDigests, kn) {
+ currentKey = ffjtImageBigDataDigests
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'c':
+
+ if bytes.Equal(ffjKeyImageCreated, kn) {
+ currentKey = ffjtImageCreated
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'f':
+
+ if bytes.Equal(ffjKeyImageFlags, kn) {
+ currentKey = ffjtImageFlags
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'i':
+
+ if bytes.Equal(ffjKeyImageID, kn) {
+ currentKey = ffjtImageID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'l':
+
+ if bytes.Equal(ffjKeyImageTopLayer, kn) {
+ currentKey = ffjtImageTopLayer
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'm':
+
+ if bytes.Equal(ffjKeyImageMetadata, kn) {
+ currentKey = ffjtImageMetadata
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'n':
+
+ if bytes.Equal(ffjKeyImageNames, kn) {
+ currentKey = ffjtImageNames
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.EqualFoldRight(ffjKeyImageFlags, kn) {
+ currentKey = ffjtImageFlags
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyImageCreated, kn) {
+ currentKey = ffjtImageCreated
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyImageBigDataDigests, kn) {
+ currentKey = ffjtImageBigDataDigests
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyImageBigDataSizes, kn) {
+ currentKey = ffjtImageBigDataSizes
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyImageBigDataNames, kn) {
+ currentKey = ffjtImageBigDataNames
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyImageMetadata, kn) {
+ currentKey = ffjtImageMetadata
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyImageTopLayer, kn) {
+ currentKey = ffjtImageTopLayer
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyImageNames, kn) {
+ currentKey = ffjtImageNames
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyImageID, kn) {
+ currentKey = ffjtImageID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffjtImagenosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtImageID:
+ goto handle_ID
+
+ case ffjtImageNames:
+ goto handle_Names
+
+ case ffjtImageTopLayer:
+ goto handle_TopLayer
+
+ case ffjtImageMetadata:
+ goto handle_Metadata
+
+ case ffjtImageBigDataNames:
+ goto handle_BigDataNames
+
+ case ffjtImageBigDataSizes:
+ goto handle_BigDataSizes
+
+ case ffjtImageBigDataDigests:
+ goto handle_BigDataDigests
+
+ case ffjtImageCreated:
+ goto handle_Created
+
+ case ffjtImageFlags:
+ goto handle_Flags
+
+ case ffjtImagenosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_ID:
+
+ /* handler: j.ID type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.ID = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Names:
+
+ /* handler: j.Names type=[]string kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.Names = nil
+ } else {
+
+ j.Names = []string{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJNames string
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJNames type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ tmpJNames = string(string(outBuf))
+
+ }
+ }
+
+ j.Names = append(j.Names, tmpJNames)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_TopLayer:
+
+ /* handler: j.TopLayer type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.TopLayer = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Metadata:
+
+ /* handler: j.Metadata type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.Metadata = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_BigDataNames:
+
+ /* handler: j.BigDataNames type=[]string kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.BigDataNames = nil
+ } else {
+
+ j.BigDataNames = []string{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJBigDataNames string
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJBigDataNames type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ tmpJBigDataNames = string(string(outBuf))
+
+ }
+ }
+
+ j.BigDataNames = append(j.BigDataNames, tmpJBigDataNames)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_BigDataSizes:
+
+ /* handler: j.BigDataSizes type=map[string]int64 kind=map quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.BigDataSizes = nil
+ } else {
+
+ j.BigDataSizes = make(map[string]int64, 0)
+
+ wantVal := true
+
+ for {
+
+ var k string
+
+ var tmpJBigDataSizes int64
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_bracket {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: k type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ k = string(string(outBuf))
+
+ }
+ }
+
+ // Expect ':' after key
+ tok = fs.Scan()
+ if tok != fflib.FFTok_colon {
+ return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok))
+ }
+
+ tok = fs.Scan()
+ /* handler: tmpJBigDataSizes type=int64 kind=int64 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ tmpJBigDataSizes = int64(tval)
+
+ }
+ }
+
+ j.BigDataSizes[k] = tmpJBigDataSizes
+
+ wantVal = false
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_BigDataDigests:
+
+ /* handler: j.BigDataDigests type=map[string]digest.Digest kind=map quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.BigDataDigests = nil
+ } else {
+
+ j.BigDataDigests = make(map[string]digest.Digest, 0)
+
+ wantVal := true
+
+ for {
+
+ var k string
+
+ var tmpJBigDataDigests digest.Digest
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_bracket {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: k type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ k = string(string(outBuf))
+
+ }
+ }
+
+ // Expect ':' after key
+ tok = fs.Scan()
+ if tok != fflib.FFTok_colon {
+ return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok))
+ }
+
+ tok = fs.Scan()
+ /* handler: tmpJBigDataDigests type=digest.Digest kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ tmpJBigDataDigests = digest.Digest(string(outBuf))
+
+ }
+ }
+
+ j.BigDataDigests[k] = tmpJBigDataDigests
+
+ wantVal = false
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Created:
+
+ /* handler: j.Created type=time.Time kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ err = j.Created.UnmarshalJSON(tbuf)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Flags:
+
+ /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.Flags = nil
+ } else {
+
+ j.Flags = make(map[string]interface{}, 0)
+
+ wantVal := true
+
+ for {
+
+ var k string
+
+ var tmpJFlags interface{}
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_bracket {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: k type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ k = string(string(outBuf))
+
+ }
+ }
+
+ // Expect ':' after key
+ tok = fs.Scan()
+ if tok != fflib.FFTok_colon {
+ return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok))
+ }
+
+ tok = fs.Scan()
+ /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/
+
+ {
+ /* Falling back. type=interface {} kind=interface */
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ err = json.Unmarshal(tbuf, &tmpJFlags)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+
+ j.Flags[k] = tmpJFlags
+
+ wantVal = false
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *imageStore) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *imageStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{}`)
+ return nil
+}
+
+const (
+ ffjtimageStorebase = iota
+ ffjtimageStorenosuchkey
+)
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *imageStore) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *imageStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtimageStorebase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtimageStorenosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ }
+
+ currentKey = ffjtimageStorenosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtimageStorenosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
new file mode 100644
index 000000000..f51406a02
--- /dev/null
+++ b/vendor/github.com/containers/storage/layers.go
@@ -0,0 +1,1040 @@
+package storage
+
+import (
+ "bytes"
+ "compress/gzip"
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "time"
+
+ drivers "github.com/containers/storage/drivers"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/stringid"
+ "github.com/containers/storage/pkg/truncindex"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "github.com/vbatts/tar-split/tar/asm"
+ "github.com/vbatts/tar-split/tar/storage"
+)
+
+const (
+ tarSplitSuffix = ".tar-split.gz"
+ incompleteFlag = "incomplete"
+ compressionFlag = "diff-compression"
+)
+
+// A Layer is a record of a copy-on-write layer that's stored by the lower
+// level graph driver.
+type Layer struct {
+ // ID is either one which was specified at create-time, or a random
+ // value which was generated by the library.
+ ID string `json:"id"`
+
+ // Names is an optional set of user-defined convenience values. The
+ // layer can be referred to by its ID or any of its names. Names are
+ // unique among layers.
+ Names []string `json:"names,omitempty"`
+
+ // Parent is the ID of a layer from which this layer inherits data.
+ Parent string `json:"parent,omitempty"`
+
+ // Metadata is data we keep for the convenience of the caller. It is not
+ // expected to be large, since it is kept in memory.
+ Metadata string `json:"metadata,omitempty"`
+
+ // MountLabel is an SELinux label which should be used when attempting to mount
+ // the layer.
+ MountLabel string `json:"mountlabel,omitempty"`
+
+ // MountPoint is the path where the layer is mounted, or where it was most
+ // recently mounted. This can change between subsequent Unmount() and
+ // Mount() calls, so the caller should consult this value after Mount()
+ // succeeds to find the location of the container's root filesystem.
+ MountPoint string `json:"-"`
+
+ // MountCount is used as a reference count for the container's layer being
+ // mounted at the mount point.
+ MountCount int `json:"-"`
+
+ // Created is the datestamp for when this layer was created. Older
+ // versions of the library did not track this information, so callers
+ // will likely want to use the IsZero() method to verify that a value
+ // is set before using it.
+ Created time.Time `json:"created,omitempty"`
+
+ // CompressedDigest is the digest of the blob that was last passed to
+ // ApplyDiff() or Put(), as it was presented to us.
+ CompressedDigest digest.Digest `json:"compressed-diff-digest,omitempty"`
+
+ // CompressedSize is the length of the blob that was last passed to
+ // ApplyDiff() or Put(), as it was presented to us. If
+ // CompressedDigest is not set, this should be treated as if it were an
+ // uninitialized value.
+ CompressedSize int64 `json:"compressed-size,omitempty"`
+
+ // UncompressedDigest is the digest of the blob that was last passed to
+ // ApplyDiff() or Put(), after we decompressed it. Often referred to
+ // as a DiffID.
+ UncompressedDigest digest.Digest `json:"diff-digest,omitempty"`
+
+ // UncompressedSize is the length of the blob that was last passed to
+ // ApplyDiff() or Put(), after we decompressed it. If
+ // UncompressedDigest is not set, this should be treated as if it were
+ // an uninitialized value.
+ UncompressedSize int64 `json:"diff-size,omitempty"`
+
+ // CompressionType is the type of compression which we detected on the blob
+ // that was last passed to ApplyDiff() or Put().
+ CompressionType archive.Compression `json:"compression,omitempty"`
+
+ // Flags is arbitrary data about the layer.
+ Flags map[string]interface{} `json:"flags,omitempty"`
+}
+
+type layerMountPoint struct {
+ ID string `json:"id"`
+ MountPoint string `json:"path"`
+ MountCount int `json:"count"`
+}
+
+// DiffOptions override the default behavior of Diff() methods.
+type DiffOptions struct {
+ // Compression, if set overrides the default compressor when generating a diff.
+ Compression *archive.Compression
+}
+
+// ROLayerStore wraps a graph driver, adding the ability to refer to layers by
+// name, and keeping track of parent-child relationships, along with a list of
+// all known layers.
+type ROLayerStore interface {
+ ROFileBasedStore
+ ROMetadataStore
+
+ // Exists checks if a layer with the specified name or ID is known.
+ Exists(id string) bool
+
+ // Get retrieves information about a layer given an ID or name.
+ Get(id string) (*Layer, error)
+
+ // Status returns an slice of key-value pairs, suitable for human consumption,
+ // relaying whatever status information the underlying driver can share.
+ Status() ([][2]string, error)
+
+ // Changes returns a slice of Change structures, which contain a pathname
+ // (Path) and a description of what sort of change (Kind) was made by the
+ // layer (either ChangeModify, ChangeAdd, or ChangeDelete), relative to a
+ // specified layer. By default, the layer's parent is used as a reference.
+ Changes(from, to string) ([]archive.Change, error)
+
+ // Diff produces a tarstream which can be applied to a layer with the contents
+ // of the first layer to produce a layer with the contents of the second layer.
+ // By default, the parent of the second layer is used as the first
+ // layer, so it need not be specified. Options can be used to override
+ // default behavior, but are also not required.
+ Diff(from, to string, options *DiffOptions) (io.ReadCloser, error)
+
+ // DiffSize produces an estimate of the length of the tarstream which would be
+ // produced by Diff.
+ DiffSize(from, to string) (int64, error)
+
+ // Size produces a cached value for the uncompressed size of the layer,
+ // if one is known, or -1 if it is not known. If the layer can not be
+ // found, it returns an error.
+ Size(name string) (int64, error)
+
+ // Lookup attempts to translate a name to an ID. Most methods do this
+ // implicitly.
+ Lookup(name string) (string, error)
+
+ // LayersByCompressedDigest returns a slice of the layers with the
+ // specified compressed digest value recorded for them.
+ LayersByCompressedDigest(d digest.Digest) ([]Layer, error)
+
+ // LayersByUncompressedDigest returns a slice of the layers with the
+ // specified uncompressed digest value recorded for them.
+ LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
+
+ // Layers returns a slice of the known layers.
+ Layers() ([]Layer, error)
+}
+
+// LayerStore wraps a graph driver, adding the ability to refer to layers by
+// name, and keeping track of parent-child relationships, along with a list of
+// all known layers.
+type LayerStore interface {
+ ROLayerStore
+ RWFileBasedStore
+ RWMetadataStore
+ FlaggableStore
+
+ // Create creates a new layer, optionally giving it a specified ID rather than
+ // a randomly-generated one, either inheriting data from another specified
+ // layer or the empty base layer. The new layer can optionally be given names
+ // and have an SELinux label specified for use when mounting it. Some
+ // underlying drivers can accept a "size" option. At this time, most
+ // underlying drivers do not themselves distinguish between writeable
+ // and read-only layers.
+ Create(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool) (*Layer, error)
+
+ // CreateWithFlags combines the functions of Create and SetFlag.
+ CreateWithFlags(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}) (layer *Layer, err error)
+
+ // Put combines the functions of CreateWithFlags and ApplyDiff.
+ Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error)
+
+ // SetNames replaces the list of names associated with a layer with the
+ // supplied values.
+ SetNames(id string, names []string) error
+
+ // Delete deletes a layer with the specified name or ID.
+ Delete(id string) error
+
+ // Wipe deletes all layers.
+ Wipe() error
+
+ // Mount mounts a layer for use. If the specified layer is the parent of other
+ // layers, it should not be written to. An SELinux label to be applied to the
+ // mount can be specified to override the one configured for the layer.
+ Mount(id, mountLabel string) (string, error)
+
+ // Unmount unmounts a layer when it is no longer in use.
+ Unmount(id string) error
+
+ // ApplyDiff reads a tarstream which was created by a previous call to Diff and
+ // applies its changes to a specified layer.
+ ApplyDiff(to string, diff io.Reader) (int64, error)
+}
+
+type layerStore struct {
+ lockfile Locker
+ rundir string
+ driver drivers.Driver
+ layerdir string
+ layers []*Layer
+ idindex *truncindex.TruncIndex
+ byid map[string]*Layer
+ byname map[string]*Layer
+ bymount map[string]*Layer
+ bycompressedsum map[digest.Digest][]string
+ byuncompressedsum map[digest.Digest][]string
+}
+
+func (r *layerStore) Layers() ([]Layer, error) {
+ layers := make([]Layer, len(r.layers))
+ for i := range r.layers {
+ layers[i] = *(r.layers[i])
+ }
+ return layers, nil
+}
+
+func (r *layerStore) mountspath() string {
+ return filepath.Join(r.rundir, "mountpoints.json")
+}
+
+func (r *layerStore) layerspath() string {
+ return filepath.Join(r.layerdir, "layers.json")
+}
+
+func (r *layerStore) Load() error {
+ shouldSave := false
+ rpath := r.layerspath()
+ data, err := ioutil.ReadFile(rpath)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ layers := []*Layer{}
+ idlist := []string{}
+ ids := make(map[string]*Layer)
+ names := make(map[string]*Layer)
+ mounts := make(map[string]*Layer)
+ compressedsums := make(map[digest.Digest][]string)
+ uncompressedsums := make(map[digest.Digest][]string)
+ if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil {
+ idlist = make([]string, 0, len(layers))
+ for n, layer := range layers {
+ ids[layer.ID] = layers[n]
+ idlist = append(idlist, layer.ID)
+ for _, name := range layer.Names {
+ if conflict, ok := names[name]; ok {
+ r.removeName(conflict, name)
+ shouldSave = true
+ }
+ names[name] = layers[n]
+ }
+ if layer.CompressedDigest != "" {
+ compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID)
+ }
+ if layer.UncompressedDigest != "" {
+ uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID)
+ }
+ }
+ }
+ if shouldSave && !r.IsReadWrite() {
+ return ErrDuplicateLayerNames
+ }
+ mpath := r.mountspath()
+ data, err = ioutil.ReadFile(mpath)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ layerMounts := []layerMountPoint{}
+ if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil {
+ for _, mount := range layerMounts {
+ if mount.MountPoint != "" {
+ if layer, ok := ids[mount.ID]; ok {
+ mounts[mount.MountPoint] = layer
+ layer.MountPoint = mount.MountPoint
+ layer.MountCount = mount.MountCount
+ }
+ }
+ }
+ }
+ r.layers = layers
+ r.idindex = truncindex.NewTruncIndex(idlist)
+ r.byid = ids
+ r.byname = names
+ r.bymount = mounts
+ r.bycompressedsum = compressedsums
+ r.byuncompressedsum = uncompressedsums
+ err = nil
+ // Last step: if we're writable, try to remove anything that a previous
+ // user of this storage area marked for deletion but didn't manage to
+ // actually delete.
+ if r.IsReadWrite() {
+ for _, layer := range r.layers {
+ if layer.Flags == nil {
+ layer.Flags = make(map[string]interface{})
+ }
+ if cleanup, ok := layer.Flags[incompleteFlag]; ok {
+ if b, ok := cleanup.(bool); ok && b {
+ err = r.Delete(layer.ID)
+ if err != nil {
+ break
+ }
+ shouldSave = true
+ }
+ }
+ }
+ if shouldSave {
+ return r.Save()
+ }
+ }
+ return err
+}
+
+func (r *layerStore) Save() error {
+ if !r.IsReadWrite() {
+ return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
+ }
+ rpath := r.layerspath()
+ if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
+ return err
+ }
+ jldata, err := json.Marshal(&r.layers)
+ if err != nil {
+ return err
+ }
+ mpath := r.mountspath()
+ if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil {
+ return err
+ }
+ mounts := make([]layerMountPoint, 0, len(r.layers))
+ for _, layer := range r.layers {
+ if layer.MountPoint != "" && layer.MountCount > 0 {
+ mounts = append(mounts, layerMountPoint{
+ ID: layer.ID,
+ MountPoint: layer.MountPoint,
+ MountCount: layer.MountCount,
+ })
+ }
+ }
+ jmdata, err := json.Marshal(&mounts)
+ if err != nil {
+ return err
+ }
+ if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil {
+ return err
+ }
+ defer r.Touch()
+ return ioutils.AtomicWriteFile(mpath, jmdata, 0600)
+}
+
+func newLayerStore(rundir string, layerdir string, driver drivers.Driver) (LayerStore, error) {
+ if err := os.MkdirAll(rundir, 0700); err != nil {
+ return nil, err
+ }
+ if err := os.MkdirAll(layerdir, 0700); err != nil {
+ return nil, err
+ }
+ lockfile, err := GetLockfile(filepath.Join(layerdir, "layers.lock"))
+ if err != nil {
+ return nil, err
+ }
+ lockfile.Lock()
+ defer lockfile.Unlock()
+ rlstore := layerStore{
+ lockfile: lockfile,
+ driver: driver,
+ rundir: rundir,
+ layerdir: layerdir,
+ byid: make(map[string]*Layer),
+ bymount: make(map[string]*Layer),
+ byname: make(map[string]*Layer),
+ }
+ if err := rlstore.Load(); err != nil {
+ return nil, err
+ }
+ return &rlstore, nil
+}
+
+func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROLayerStore, error) {
+ lockfile, err := GetROLockfile(filepath.Join(layerdir, "layers.lock"))
+ if err != nil {
+ return nil, err
+ }
+ lockfile.Lock()
+ defer lockfile.Unlock()
+ rlstore := layerStore{
+ lockfile: lockfile,
+ driver: driver,
+ rundir: rundir,
+ layerdir: layerdir,
+ byid: make(map[string]*Layer),
+ bymount: make(map[string]*Layer),
+ byname: make(map[string]*Layer),
+ }
+ if err := rlstore.Load(); err != nil {
+ return nil, err
+ }
+ return &rlstore, nil
+}
+
+func (r *layerStore) lookup(id string) (*Layer, bool) {
+ if layer, ok := r.byid[id]; ok {
+ return layer, ok
+ } else if layer, ok := r.byname[id]; ok {
+ return layer, ok
+ } else if longid, err := r.idindex.Get(id); err == nil {
+ layer, ok := r.byid[longid]
+ return layer, ok
+ }
+ return nil, false
+}
+
+func (r *layerStore) Size(name string) (int64, error) {
+ layer, ok := r.lookup(name)
+ if !ok {
+ return -1, ErrLayerUnknown
+ }
+ // We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that
+ // a zero value is not just present because it was never set to anything else (which can happen if the layer was
+ // created by a version of this library that didn't keep track of digest and size information).
+ if layer.UncompressedDigest != "" {
+ return layer.UncompressedSize, nil
+ }
+ return -1, nil
+}
+
+func (r *layerStore) ClearFlag(id string, flag string) error {
+ if !r.IsReadWrite() {
+ return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on layers at %q", r.layerspath())
+ }
+ layer, ok := r.lookup(id)
+ if !ok {
+ return ErrLayerUnknown
+ }
+ delete(layer.Flags, flag)
+ return r.Save()
+}
+
+func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
+ if !r.IsReadWrite() {
+ return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on layers at %q", r.layerspath())
+ }
+ layer, ok := r.lookup(id)
+ if !ok {
+ return ErrLayerUnknown
+ }
+ if layer.Flags == nil {
+ layer.Flags = make(map[string]interface{})
+ }
+ layer.Flags[flag] = value
+ return r.Save()
+}
+
+func (r *layerStore) Status() ([][2]string, error) {
+ return r.driver.Status(), nil
+}
+
+func (r *layerStore) Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff io.Reader) (layer *Layer, size int64, err error) {
+ if !r.IsReadWrite() {
+ return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath())
+ }
+ size = -1
+ if err := os.MkdirAll(r.rundir, 0700); err != nil {
+ return nil, -1, err
+ }
+ if err := os.MkdirAll(r.layerdir, 0700); err != nil {
+ return nil, -1, err
+ }
+ if parent != "" {
+ if parentLayer, ok := r.lookup(parent); ok {
+ parent = parentLayer.ID
+ }
+ }
+ if id == "" {
+ id = stringid.GenerateRandomID()
+ _, idInUse := r.byid[id]
+ for idInUse {
+ id = stringid.GenerateRandomID()
+ _, idInUse = r.byid[id]
+ }
+ }
+ if _, idInUse := r.byid[id]; idInUse {
+ return nil, -1, ErrDuplicateID
+ }
+ names = dedupeNames(names)
+ for _, name := range names {
+ if _, nameInUse := r.byname[name]; nameInUse {
+ return nil, -1, ErrDuplicateName
+ }
+ }
+ opts := drivers.CreateOpts{
+ MountLabel: mountLabel,
+ StorageOpt: options,
+ }
+ if writeable {
+ err = r.driver.CreateReadWrite(id, parent, &opts)
+ } else {
+ err = r.driver.Create(id, parent, &opts)
+ }
+ if err == nil {
+ layer = &Layer{
+ ID: id,
+ Parent: parent,
+ Names: names,
+ MountLabel: mountLabel,
+ Created: time.Now().UTC(),
+ Flags: make(map[string]interface{}),
+ }
+ r.layers = append(r.layers, layer)
+ r.idindex.Add(id)
+ r.byid[id] = layer
+ for _, name := range names {
+ r.byname[name] = layer
+ }
+ for flag, value := range flags {
+ layer.Flags[flag] = value
+ }
+ if diff != nil {
+ layer.Flags[incompleteFlag] = true
+ err = r.Save()
+ if err != nil {
+ // We don't have a record of this layer, but at least
+ // try to clean it up underneath us.
+ r.driver.Remove(id)
+ return nil, -1, err
+ }
+ size, err = r.ApplyDiff(layer.ID, diff)
+ if err != nil {
+ if r.Delete(layer.ID) != nil {
+ // Either a driver error or an error saving.
+ // We now have a layer that's been marked for
+ // deletion but which we failed to remove.
+ }
+ return nil, -1, err
+ }
+ delete(layer.Flags, incompleteFlag)
+ }
+ err = r.Save()
+ if err != nil {
+ // We don't have a record of this layer, but at least
+ // try to clean it up underneath us.
+ r.driver.Remove(id)
+ return nil, -1, err
+ }
+ }
+ return layer, size, err
+}
+
+func (r *layerStore) CreateWithFlags(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}) (layer *Layer, err error) {
+ layer, _, err = r.Put(id, parent, names, mountLabel, options, writeable, flags, nil)
+ return layer, err
+}
+
+func (r *layerStore) Create(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool) (layer *Layer, err error) {
+ return r.CreateWithFlags(id, parent, names, mountLabel, options, writeable, nil)
+}
+
+func (r *layerStore) Mount(id, mountLabel string) (string, error) {
+ if !r.IsReadWrite() {
+ return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
+ }
+ layer, ok := r.lookup(id)
+ if !ok {
+ return "", ErrLayerUnknown
+ }
+ if layer.MountCount > 0 {
+ layer.MountCount++
+ return layer.MountPoint, r.Save()
+ }
+ if mountLabel == "" {
+ mountLabel = layer.MountLabel
+ }
+ mountpoint, err := r.driver.Get(id, mountLabel)
+ if mountpoint != "" && err == nil {
+ if layer.MountPoint != "" {
+ delete(r.bymount, layer.MountPoint)
+ }
+ layer.MountPoint = filepath.Clean(mountpoint)
+ layer.MountCount++
+ r.bymount[layer.MountPoint] = layer
+ err = r.Save()
+ }
+ return mountpoint, err
+}
+
+func (r *layerStore) Unmount(id string) error {
+ if !r.IsReadWrite() {
+ return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
+ }
+ layer, ok := r.lookup(id)
+ if !ok {
+ layerByMount, ok := r.bymount[filepath.Clean(id)]
+ if !ok {
+ return ErrLayerUnknown
+ }
+ layer = layerByMount
+ }
+ if layer.MountCount > 1 {
+ layer.MountCount--
+ return r.Save()
+ }
+ err := r.driver.Put(id)
+ if err == nil || os.IsNotExist(err) {
+ if layer.MountPoint != "" {
+ delete(r.bymount, layer.MountPoint)
+ }
+ layer.MountCount--
+ layer.MountPoint = ""
+ err = r.Save()
+ }
+ return err
+}
+
+func (r *layerStore) removeName(layer *Layer, name string) {
+ layer.Names = stringSliceWithoutValue(layer.Names, name)
+}
+
+func (r *layerStore) SetNames(id string, names []string) error {
+ if !r.IsReadWrite() {
+ return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath())
+ }
+ names = dedupeNames(names)
+ if layer, ok := r.lookup(id); ok {
+ for _, name := range layer.Names {
+ delete(r.byname, name)
+ }
+ for _, name := range names {
+ if otherLayer, ok := r.byname[name]; ok {
+ r.removeName(otherLayer, name)
+ }
+ r.byname[name] = layer
+ }
+ layer.Names = names
+ return r.Save()
+ }
+ return ErrLayerUnknown
+}
+
+func (r *layerStore) Metadata(id string) (string, error) {
+ if layer, ok := r.lookup(id); ok {
+ return layer.Metadata, nil
+ }
+ return "", ErrLayerUnknown
+}
+
+func (r *layerStore) SetMetadata(id, metadata string) error {
+ if !r.IsReadWrite() {
+ return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer metadata at %q", r.layerspath())
+ }
+ if layer, ok := r.lookup(id); ok {
+ layer.Metadata = metadata
+ return r.Save()
+ }
+ return ErrLayerUnknown
+}
+
+func (r *layerStore) tspath(id string) string {
+ return filepath.Join(r.layerdir, id+tarSplitSuffix)
+}
+
+func (r *layerStore) Delete(id string) error {
+ if !r.IsReadWrite() {
+ return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
+ }
+ layer, ok := r.lookup(id)
+ if !ok {
+ return ErrLayerUnknown
+ }
+ id = layer.ID
+ for layer.MountCount > 0 {
+ if err := r.Unmount(id); err != nil {
+ return err
+ }
+ }
+ err := r.driver.Remove(id)
+ if err == nil {
+ os.Remove(r.tspath(id))
+ delete(r.byid, id)
+ r.idindex.Delete(id)
+ if layer.MountPoint != "" {
+ delete(r.bymount, layer.MountPoint)
+ }
+ toDeleteIndex := -1
+ for i, candidate := range r.layers {
+ if candidate.ID == id {
+ toDeleteIndex = i
+ break
+ }
+ }
+ if toDeleteIndex != -1 {
+ // delete the layer at toDeleteIndex
+ if toDeleteIndex == len(r.layers)-1 {
+ r.layers = r.layers[:len(r.layers)-1]
+ } else {
+ r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...)
+ }
+ }
+ if err = r.Save(); err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+func (r *layerStore) Lookup(name string) (id string, err error) {
+ if layer, ok := r.lookup(name); ok {
+ return layer.ID, nil
+ }
+ return "", ErrLayerUnknown
+}
+
+func (r *layerStore) Exists(id string) bool {
+ _, ok := r.lookup(id)
+ return ok
+}
+
+func (r *layerStore) Get(id string) (*Layer, error) {
+ if layer, ok := r.lookup(id); ok {
+ return layer, nil
+ }
+ return nil, ErrLayerUnknown
+}
+
+func (r *layerStore) Wipe() error {
+ if !r.IsReadWrite() {
+ return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
+ }
+ ids := make([]string, 0, len(r.byid))
+ for id := range r.byid {
+ ids = append(ids, id)
+ }
+ for _, id := range ids {
+ if err := r.Delete(id); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID string, toLayer *Layer, err error) {
+ var ok bool
+ var fromLayer *Layer
+ toLayer, ok = r.lookup(to)
+ if !ok {
+ return "", "", nil, ErrLayerUnknown
+ }
+ to = toLayer.ID
+ if from == "" {
+ from = toLayer.Parent
+ }
+ if from != "" {
+ fromLayer, ok = r.lookup(from)
+ if ok {
+ from = fromLayer.ID
+ } else {
+ fromLayer, ok = r.lookup(toLayer.Parent)
+ if ok {
+ from = fromLayer.ID
+ }
+ }
+ }
+ return from, to, toLayer, nil
+}
+
+func (r *layerStore) Changes(from, to string) ([]archive.Change, error) {
+ from, to, _, err := r.findParentAndLayer(from, to)
+ if err != nil {
+ return nil, ErrLayerUnknown
+ }
+ return r.driver.Changes(to, from)
+}
+
+type simpleGetCloser struct {
+ r *layerStore
+ path string
+ id string
+}
+
+func (s *simpleGetCloser) Get(path string) (io.ReadCloser, error) {
+ return os.Open(filepath.Join(s.path, path))
+}
+
+func (s *simpleGetCloser) Close() error {
+ return s.r.Unmount(s.id)
+}
+
+func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) {
+ if getter, ok := r.driver.(drivers.DiffGetterDriver); ok {
+ return getter.DiffGetter(id)
+ }
+ path, err := r.Mount(id, "")
+ if err != nil {
+ return nil, err
+ }
+ return &simpleGetCloser{
+ r: r,
+ path: path,
+ id: id,
+ }, nil
+}
+
+func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) {
+ var metadata storage.Unpacker
+
+ from, to, toLayer, err := r.findParentAndLayer(from, to)
+ if err != nil {
+ return nil, ErrLayerUnknown
+ }
+ // Default to applying the type of compression that we noted was used
+ // for the layerdiff when it was applied.
+ compression := toLayer.CompressionType
+ // If a particular compression type (or no compression) was selected,
+ // use that instead.
+ if options != nil && options.Compression != nil {
+ compression = *options.Compression
+ }
+ maybeCompressReadCloser := func(rc io.ReadCloser) (io.ReadCloser, error) {
+ // Depending on whether or not compression is desired, return either the
+ // passed-in ReadCloser, or a new one that provides its readers with a
+ // compressed version of the data that the original would have provided
+ // to its readers.
+ if compression == archive.Uncompressed {
+ return rc, nil
+ }
+ preader, pwriter := io.Pipe()
+ compressor, err := archive.CompressStream(pwriter, compression)
+ if err != nil {
+ rc.Close()
+ pwriter.Close()
+ preader.Close()
+ return nil, err
+ }
+ go func() {
+ defer pwriter.Close()
+ defer compressor.Close()
+ defer rc.Close()
+ io.Copy(compressor, rc)
+ }()
+ return preader, nil
+ }
+
+ if from != toLayer.Parent {
+ diff, err := r.driver.Diff(to, from)
+ if err != nil {
+ return nil, err
+ }
+ return maybeCompressReadCloser(diff)
+ }
+
+ tsfile, err := os.Open(r.tspath(to))
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return nil, err
+ }
+ diff, err := r.driver.Diff(to, from)
+ if err != nil {
+ return nil, err
+ }
+ return maybeCompressReadCloser(diff)
+ }
+ defer tsfile.Close()
+
+ decompressor, err := gzip.NewReader(tsfile)
+ if err != nil {
+ return nil, err
+ }
+ defer decompressor.Close()
+
+ tsbytes, err := ioutil.ReadAll(decompressor)
+ if err != nil {
+ return nil, err
+ }
+
+ metadata = storage.NewJSONUnpacker(bytes.NewBuffer(tsbytes))
+
+ fgetter, err := r.newFileGetter(to)
+ if err != nil {
+ return nil, err
+ }
+
+ tarstream := asm.NewOutputTarStream(fgetter, metadata)
+ rc := ioutils.NewReadCloserWrapper(tarstream, func() error {
+ err1 := tarstream.Close()
+ err2 := fgetter.Close()
+ if err2 == nil {
+ return err1
+ }
+ return err2
+ })
+ return maybeCompressReadCloser(rc)
+}
+
+func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
+ from, to, _, err = r.findParentAndLayer(from, to)
+ if err != nil {
+ return -1, ErrLayerUnknown
+ }
+ return r.driver.DiffSize(to, from)
+}
+
+func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) {
+ if !r.IsReadWrite() {
+ return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath())
+ }
+
+ layer, ok := r.lookup(to)
+ if !ok {
+ return -1, ErrLayerUnknown
+ }
+
+ header := make([]byte, 10240)
+ n, err := diff.Read(header)
+ if err != nil && err != io.EOF {
+ return -1, err
+ }
+
+ compression := archive.DetectCompression(header[:n])
+ compressedDigest := digest.Canonical.Digester()
+ compressedCounter := ioutils.NewWriteCounter(compressedDigest.Hash())
+ defragmented := io.TeeReader(io.MultiReader(bytes.NewBuffer(header[:n]), diff), compressedCounter)
+
+ tsdata := bytes.Buffer{}
+ compressor, err := gzip.NewWriterLevel(&tsdata, gzip.BestSpeed)
+ if err != nil {
+ compressor = gzip.NewWriter(&tsdata)
+ }
+ metadata := storage.NewJSONPacker(compressor)
+ uncompressed, err := archive.DecompressStream(defragmented)
+ if err != nil {
+ return -1, err
+ }
+ uncompressedDigest := digest.Canonical.Digester()
+ uncompressedCounter := ioutils.NewWriteCounter(uncompressedDigest.Hash())
+ payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedCounter), metadata, storage.NewDiscardFilePutter())
+ if err != nil {
+ return -1, err
+ }
+ size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, payload)
+ if err != nil {
+ return -1, err
+ }
+ compressor.Close()
+ if err == nil {
+ if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil {
+ return -1, err
+ }
+ if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil {
+ return -1, err
+ }
+ }
+
+ updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) {
+ var newList []string
+ if oldvalue != "" {
+ for _, value := range (*m)[oldvalue] {
+ if value != id {
+ newList = append(newList, value)
+ }
+ }
+ if len(newList) > 0 {
+ (*m)[oldvalue] = newList
+ } else {
+ delete(*m, oldvalue)
+ }
+ }
+ if newvalue != "" {
+ (*m)[newvalue] = append((*m)[newvalue], id)
+ }
+ }
+ updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest.Digest(), layer.ID)
+ layer.CompressedDigest = compressedDigest.Digest()
+ layer.CompressedSize = compressedCounter.Count
+ updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest.Digest(), layer.ID)
+ layer.UncompressedDigest = uncompressedDigest.Digest()
+ layer.UncompressedSize = uncompressedCounter.Count
+ layer.CompressionType = compression
+
+ err = r.Save()
+
+ return size, err
+}
+
+func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Digest) ([]Layer, error) {
+ var layers []Layer
+ for _, layerID := range m[d] {
+ layer, ok := r.lookup(layerID)
+ if !ok {
+ return nil, ErrLayerUnknown
+ }
+ layers = append(layers, *layer)
+ }
+ return layers, nil
+}
+
+func (r *layerStore) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) {
+ return r.layersByDigestMap(r.bycompressedsum, d)
+}
+
+func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
+ return r.layersByDigestMap(r.byuncompressedsum, d)
+}
+
+func (r *layerStore) Lock() {
+ r.lockfile.Lock()
+}
+
+func (r *layerStore) Unlock() {
+ r.lockfile.Unlock()
+}
+
+func (r *layerStore) Touch() error {
+ return r.lockfile.Touch()
+}
+
+func (r *layerStore) Modified() (bool, error) {
+ return r.lockfile.Modified()
+}
+
+func (r *layerStore) IsReadWrite() bool {
+ return r.lockfile.IsReadWrite()
+}
+
+func (r *layerStore) TouchedSince(when time.Time) bool {
+ return r.lockfile.TouchedSince(when)
+}
diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go
new file mode 100644
index 000000000..8bec40e17
--- /dev/null
+++ b/vendor/github.com/containers/storage/layers_ffjson.go
@@ -0,0 +1,1713 @@
+// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
+// source: layers.go
+
+package storage
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/opencontainers/go-digest"
+ fflib "github.com/pquerna/ffjson/fflib/v1"
+)
+
+// MarshalJSON marshal bytes to json - template
+func (j *DiffOptions) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *DiffOptions) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ if j.Compression != nil {
+ buf.WriteString(`{"Compression":`)
+ fflib.FormatBits2(buf, uint64(*j.Compression), 10, *j.Compression < 0)
+ } else {
+ buf.WriteString(`{"Compression":null`)
+ }
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffjtDiffOptionsbase = iota
+ ffjtDiffOptionsnosuchkey
+
+ ffjtDiffOptionsCompression
+)
+
+var ffjKeyDiffOptionsCompression = []byte("Compression")
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *DiffOptions) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *DiffOptions) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtDiffOptionsbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtDiffOptionsnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'C':
+
+ if bytes.Equal(ffjKeyDiffOptionsCompression, kn) {
+ currentKey = ffjtDiffOptionsCompression
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.EqualFoldRight(ffjKeyDiffOptionsCompression, kn) {
+ currentKey = ffjtDiffOptionsCompression
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffjtDiffOptionsnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtDiffOptionsCompression:
+ goto handle_Compression
+
+ case ffjtDiffOptionsnosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_Compression:
+
+ /* handler: j.Compression type=archive.Compression kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ j.Compression = nil
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ ttypval := archive.Compression(tval)
+ j.Compression = &ttypval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *Layer) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *Layer) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{ "id":`)
+ fflib.WriteJsonString(buf, string(j.ID))
+ buf.WriteByte(',')
+ if len(j.Names) != 0 {
+ buf.WriteString(`"names":`)
+ if j.Names != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.Names {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ fflib.WriteJsonString(buf, string(v))
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.Parent) != 0 {
+ buf.WriteString(`"parent":`)
+ fflib.WriteJsonString(buf, string(j.Parent))
+ buf.WriteByte(',')
+ }
+ if len(j.Metadata) != 0 {
+ buf.WriteString(`"metadata":`)
+ fflib.WriteJsonString(buf, string(j.Metadata))
+ buf.WriteByte(',')
+ }
+ if len(j.MountLabel) != 0 {
+ buf.WriteString(`"mountlabel":`)
+ fflib.WriteJsonString(buf, string(j.MountLabel))
+ buf.WriteByte(',')
+ }
+ if true {
+ buf.WriteString(`"created":`)
+
+ {
+
+ obj, err = j.Created.MarshalJSON()
+ if err != nil {
+ return err
+ }
+ buf.Write(obj)
+
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.CompressedDigest) != 0 {
+ buf.WriteString(`"compressed-diff-digest":`)
+ fflib.WriteJsonString(buf, string(j.CompressedDigest))
+ buf.WriteByte(',')
+ }
+ if j.CompressedSize != 0 {
+ buf.WriteString(`"compressed-size":`)
+ fflib.FormatBits2(buf, uint64(j.CompressedSize), 10, j.CompressedSize < 0)
+ buf.WriteByte(',')
+ }
+ if len(j.UncompressedDigest) != 0 {
+ buf.WriteString(`"diff-digest":`)
+ fflib.WriteJsonString(buf, string(j.UncompressedDigest))
+ buf.WriteByte(',')
+ }
+ if j.UncompressedSize != 0 {
+ buf.WriteString(`"diff-size":`)
+ fflib.FormatBits2(buf, uint64(j.UncompressedSize), 10, j.UncompressedSize < 0)
+ buf.WriteByte(',')
+ }
+ if j.CompressionType != 0 {
+ buf.WriteString(`"compression":`)
+ fflib.FormatBits2(buf, uint64(j.CompressionType), 10, j.CompressionType < 0)
+ buf.WriteByte(',')
+ }
+ if len(j.Flags) != 0 {
+ buf.WriteString(`"flags":`)
+ /* Falling back. type=map[string]interface {} kind=map */
+ err = buf.Encode(j.Flags)
+ if err != nil {
+ return err
+ }
+ buf.WriteByte(',')
+ }
+ buf.Rewind(1)
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffjtLayerbase = iota
+ ffjtLayernosuchkey
+
+ ffjtLayerID
+
+ ffjtLayerNames
+
+ ffjtLayerParent
+
+ ffjtLayerMetadata
+
+ ffjtLayerMountLabel
+
+ ffjtLayerCreated
+
+ ffjtLayerCompressedDigest
+
+ ffjtLayerCompressedSize
+
+ ffjtLayerUncompressedDigest
+
+ ffjtLayerUncompressedSize
+
+ ffjtLayerCompressionType
+
+ ffjtLayerFlags
+)
+
+var ffjKeyLayerID = []byte("id")
+
+var ffjKeyLayerNames = []byte("names")
+
+var ffjKeyLayerParent = []byte("parent")
+
+var ffjKeyLayerMetadata = []byte("metadata")
+
+var ffjKeyLayerMountLabel = []byte("mountlabel")
+
+var ffjKeyLayerCreated = []byte("created")
+
+var ffjKeyLayerCompressedDigest = []byte("compressed-diff-digest")
+
+var ffjKeyLayerCompressedSize = []byte("compressed-size")
+
+var ffjKeyLayerUncompressedDigest = []byte("diff-digest")
+
+var ffjKeyLayerUncompressedSize = []byte("diff-size")
+
+var ffjKeyLayerCompressionType = []byte("compression")
+
+var ffjKeyLayerFlags = []byte("flags")
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *Layer) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *Layer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtLayerbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtLayernosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'c':
+
+ if bytes.Equal(ffjKeyLayerCreated, kn) {
+ currentKey = ffjtLayerCreated
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerCompressedDigest, kn) {
+ currentKey = ffjtLayerCompressedDigest
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerCompressedSize, kn) {
+ currentKey = ffjtLayerCompressedSize
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerCompressionType, kn) {
+ currentKey = ffjtLayerCompressionType
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'd':
+
+ if bytes.Equal(ffjKeyLayerUncompressedDigest, kn) {
+ currentKey = ffjtLayerUncompressedDigest
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerUncompressedSize, kn) {
+ currentKey = ffjtLayerUncompressedSize
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'f':
+
+ if bytes.Equal(ffjKeyLayerFlags, kn) {
+ currentKey = ffjtLayerFlags
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'i':
+
+ if bytes.Equal(ffjKeyLayerID, kn) {
+ currentKey = ffjtLayerID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'm':
+
+ if bytes.Equal(ffjKeyLayerMetadata, kn) {
+ currentKey = ffjtLayerMetadata
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerMountLabel, kn) {
+ currentKey = ffjtLayerMountLabel
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'n':
+
+ if bytes.Equal(ffjKeyLayerNames, kn) {
+ currentKey = ffjtLayerNames
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'p':
+
+ if bytes.Equal(ffjKeyLayerParent, kn) {
+ currentKey = ffjtLayerParent
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerFlags, kn) {
+ currentKey = ffjtLayerFlags
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerCompressionType, kn) {
+ currentKey = ffjtLayerCompressionType
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerUncompressedSize, kn) {
+ currentKey = ffjtLayerUncompressedSize
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerUncompressedDigest, kn) {
+ currentKey = ffjtLayerUncompressedDigest
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerCompressedSize, kn) {
+ currentKey = ffjtLayerCompressedSize
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerCompressedDigest, kn) {
+ currentKey = ffjtLayerCompressedDigest
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerCreated, kn) {
+ currentKey = ffjtLayerCreated
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerMountLabel, kn) {
+ currentKey = ffjtLayerMountLabel
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerMetadata, kn) {
+ currentKey = ffjtLayerMetadata
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerParent, kn) {
+ currentKey = ffjtLayerParent
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerNames, kn) {
+ currentKey = ffjtLayerNames
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerID, kn) {
+ currentKey = ffjtLayerID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffjtLayernosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtLayerID:
+ goto handle_ID
+
+ case ffjtLayerNames:
+ goto handle_Names
+
+ case ffjtLayerParent:
+ goto handle_Parent
+
+ case ffjtLayerMetadata:
+ goto handle_Metadata
+
+ case ffjtLayerMountLabel:
+ goto handle_MountLabel
+
+ case ffjtLayerCreated:
+ goto handle_Created
+
+ case ffjtLayerCompressedDigest:
+ goto handle_CompressedDigest
+
+ case ffjtLayerCompressedSize:
+ goto handle_CompressedSize
+
+ case ffjtLayerUncompressedDigest:
+ goto handle_UncompressedDigest
+
+ case ffjtLayerUncompressedSize:
+ goto handle_UncompressedSize
+
+ case ffjtLayerCompressionType:
+ goto handle_CompressionType
+
+ case ffjtLayerFlags:
+ goto handle_Flags
+
+ case ffjtLayernosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_ID:
+
+ /* handler: j.ID type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.ID = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Names:
+
+ /* handler: j.Names type=[]string kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.Names = nil
+ } else {
+
+ j.Names = []string{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJNames string
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJNames type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ tmpJNames = string(string(outBuf))
+
+ }
+ }
+
+ j.Names = append(j.Names, tmpJNames)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Parent:
+
+ /* handler: j.Parent type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.Parent = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Metadata:
+
+ /* handler: j.Metadata type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.Metadata = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_MountLabel:
+
+ /* handler: j.MountLabel type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.MountLabel = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Created:
+
+ /* handler: j.Created type=time.Time kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ err = j.Created.UnmarshalJSON(tbuf)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_CompressedDigest:
+
+ /* handler: j.CompressedDigest type=digest.Digest kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.CompressedDigest = digest.Digest(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_CompressedSize:
+
+ /* handler: j.CompressedSize type=int64 kind=int64 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ j.CompressedSize = int64(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_UncompressedDigest:
+
+ /* handler: j.UncompressedDigest type=digest.Digest kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.UncompressedDigest = digest.Digest(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_UncompressedSize:
+
+ /* handler: j.UncompressedSize type=int64 kind=int64 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ j.UncompressedSize = int64(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_CompressionType:
+
+ /* handler: j.CompressionType type=archive.Compression kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ j.CompressionType = archive.Compression(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Flags:
+
+ /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.Flags = nil
+ } else {
+
+ j.Flags = make(map[string]interface{}, 0)
+
+ wantVal := true
+
+ for {
+
+ var k string
+
+ var tmpJFlags interface{}
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_bracket {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: k type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ k = string(string(outBuf))
+
+ }
+ }
+
+ // Expect ':' after key
+ tok = fs.Scan()
+ if tok != fflib.FFTok_colon {
+ return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok))
+ }
+
+ tok = fs.Scan()
+ /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/
+
+ {
+ /* Falling back. type=interface {} kind=interface */
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ err = json.Unmarshal(tbuf, &tmpJFlags)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+
+ j.Flags[k] = tmpJFlags
+
+ wantVal = false
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *layerMountPoint) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *layerMountPoint) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{"id":`)
+ fflib.WriteJsonString(buf, string(j.ID))
+ buf.WriteString(`,"path":`)
+ fflib.WriteJsonString(buf, string(j.MountPoint))
+ buf.WriteString(`,"count":`)
+ fflib.FormatBits2(buf, uint64(j.MountCount), 10, j.MountCount < 0)
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffjtlayerMountPointbase = iota
+ ffjtlayerMountPointnosuchkey
+
+ ffjtlayerMountPointID
+
+ ffjtlayerMountPointMountPoint
+
+ ffjtlayerMountPointMountCount
+)
+
+var ffjKeylayerMountPointID = []byte("id")
+
+var ffjKeylayerMountPointMountPoint = []byte("path")
+
+var ffjKeylayerMountPointMountCount = []byte("count")
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *layerMountPoint) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *layerMountPoint) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtlayerMountPointbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtlayerMountPointnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'c':
+
+ if bytes.Equal(ffjKeylayerMountPointMountCount, kn) {
+ currentKey = ffjtlayerMountPointMountCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'i':
+
+ if bytes.Equal(ffjKeylayerMountPointID, kn) {
+ currentKey = ffjtlayerMountPointID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'p':
+
+ if bytes.Equal(ffjKeylayerMountPointMountPoint, kn) {
+ currentKey = ffjtlayerMountPointMountPoint
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointMountCount, kn) {
+ currentKey = ffjtlayerMountPointMountCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointMountPoint, kn) {
+ currentKey = ffjtlayerMountPointMountPoint
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointID, kn) {
+ currentKey = ffjtlayerMountPointID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffjtlayerMountPointnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtlayerMountPointID:
+ goto handle_ID
+
+ case ffjtlayerMountPointMountPoint:
+ goto handle_MountPoint
+
+ case ffjtlayerMountPointMountCount:
+ goto handle_MountCount
+
+ case ffjtlayerMountPointnosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_ID:
+
+ /* handler: j.ID type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.ID = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_MountPoint:
+
+ /* handler: j.MountPoint type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.MountPoint = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_MountCount:
+
+ /* handler: j.MountCount type=int kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ j.MountCount = int(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *layerStore) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *layerStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{}`)
+ return nil
+}
+
+const (
+ ffjtlayerStorebase = iota
+ ffjtlayerStorenosuchkey
+)
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *layerStore) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *layerStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtlayerStorebase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtlayerStorenosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ }
+
+ currentKey = ffjtlayerStorenosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtlayerStorenosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *simpleGetCloser) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *simpleGetCloser) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{}`)
+ return nil
+}
+
+const (
+ ffjtsimpleGetCloserbase = iota
+ ffjtsimpleGetClosernosuchkey
+)
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *simpleGetCloser) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *simpleGetCloser) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtsimpleGetCloserbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtsimpleGetClosernosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ }
+
+ currentKey = ffjtsimpleGetClosernosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtsimpleGetClosernosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/lockfile.go b/vendor/github.com/containers/storage/lockfile.go
new file mode 100644
index 000000000..41ee9017a
--- /dev/null
+++ b/vendor/github.com/containers/storage/lockfile.go
@@ -0,0 +1,184 @@
+package storage
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "github.com/containers/storage/pkg/stringid"
+ "github.com/pkg/errors"
+ "golang.org/x/sys/unix"
+)
+
+// A Locker represents a file lock where the file is used to cache an
+// identifier of the last party that made changes to whatever's being protected
+// by the lock.
+type Locker interface {
+ sync.Locker
+
+ // Touch records, for others sharing the lock, that the caller was the
+ // last writer. It should only be called with the lock held.
+ Touch() error
+
+ // Modified() checks if the most recent writer was a party other than the
+ // last recorded writer. It should only be called with the lock held.
+ Modified() (bool, error)
+
+ // TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time.
+ TouchedSince(when time.Time) bool
+
+ // IsReadWrite() checks if the lock file is read-write
+ IsReadWrite() bool
+}
+
+type lockfile struct {
+ mu sync.Mutex
+ file string
+ fd uintptr
+ lw string
+ locktype int16
+}
+
+var (
+ lockfiles map[string]*lockfile
+ lockfilesLock sync.Mutex
+)
+
+// GetLockfile opens a read-write lock file, creating it if necessary. The
+// Locker object it returns will be returned unlocked.
+func GetLockfile(path string) (Locker, error) {
+ lockfilesLock.Lock()
+ defer lockfilesLock.Unlock()
+ if lockfiles == nil {
+ lockfiles = make(map[string]*lockfile)
+ }
+ cleanPath := filepath.Clean(path)
+ if locker, ok := lockfiles[cleanPath]; ok {
+ if !locker.IsReadWrite() {
+ return nil, errors.Wrapf(ErrLockReadOnly, "lock %q is a read-only lock", cleanPath)
+ }
+ return locker, nil
+ }
+ fd, err := unix.Open(cleanPath, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error opening %q", cleanPath)
+ }
+ unix.CloseOnExec(fd)
+ locker := &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_WRLCK}
+ lockfiles[filepath.Clean(path)] = locker
+ return locker, nil
+}
+
+// GetROLockfile opens a read-only lock file. The Locker object it returns
+// will be returned unlocked.
+func GetROLockfile(path string) (Locker, error) {
+ lockfilesLock.Lock()
+ defer lockfilesLock.Unlock()
+ if lockfiles == nil {
+ lockfiles = make(map[string]*lockfile)
+ }
+ cleanPath := filepath.Clean(path)
+ if locker, ok := lockfiles[cleanPath]; ok {
+ if locker.IsReadWrite() {
+ return nil, fmt.Errorf("lock %q is a read-write lock", cleanPath)
+ }
+ return locker, nil
+ }
+ fd, err := unix.Open(cleanPath, os.O_RDONLY, 0)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error opening %q", cleanPath)
+ }
+ unix.CloseOnExec(fd)
+ locker := &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_RDLCK}
+ lockfiles[filepath.Clean(path)] = locker
+ return locker, nil
+}
+
+// Lock locks the lock file
+func (l *lockfile) Lock() {
+ lk := unix.Flock_t{
+ Type: l.locktype,
+ Whence: int16(os.SEEK_SET),
+ Start: 0,
+ Len: 0,
+ Pid: int32(os.Getpid()),
+ }
+ l.mu.Lock()
+ for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
+ time.Sleep(10 * time.Millisecond)
+ }
+}
+
+// Unlock unlocks the lock file
+func (l *lockfile) Unlock() {
+ lk := unix.Flock_t{
+ Type: unix.F_UNLCK,
+ Whence: int16(os.SEEK_SET),
+ Start: 0,
+ Len: 0,
+ Pid: int32(os.Getpid()),
+ }
+ for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
+ time.Sleep(10 * time.Millisecond)
+ }
+ l.mu.Unlock()
+}
+
+// Touch updates the lock file with the UID of the user
+func (l *lockfile) Touch() error {
+ l.lw = stringid.GenerateRandomID()
+ id := []byte(l.lw)
+ _, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
+ if err != nil {
+ return err
+ }
+ n, err := unix.Write(int(l.fd), id)
+ if err != nil {
+ return err
+ }
+ if n != len(id) {
+ return unix.ENOSPC
+ }
+ err = unix.Fsync(int(l.fd))
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// Modified indicates if the lock file has been updated since the last time it was loaded
+func (l *lockfile) Modified() (bool, error) {
+ id := []byte(l.lw)
+ _, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
+ if err != nil {
+ return true, err
+ }
+ n, err := unix.Read(int(l.fd), id)
+ if err != nil {
+ return true, err
+ }
+ if n != len(id) {
+ return true, unix.ENOSPC
+ }
+ lw := l.lw
+ l.lw = string(id)
+ return l.lw != lw, nil
+}
+
+// TouchedSince indicates if the lock file has been touched since the specified time
+func (l *lockfile) TouchedSince(when time.Time) bool {
+ st := unix.Stat_t{}
+ err := unix.Fstat(int(l.fd), &st)
+ if err != nil {
+ return true
+ }
+ touched := time.Unix(statTMtimeUnix(st))
+ return when.Before(touched)
+}
+
+// IsRWLock indicates if the lock file is a read-write lock
+func (l *lockfile) IsReadWrite() bool {
+ return (l.locktype == unix.F_WRLCK)
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/README.md b/vendor/github.com/containers/storage/pkg/archive/README.md
new file mode 100644
index 000000000..7307d9694
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/README.md
@@ -0,0 +1 @@
+This code provides helper functions for dealing with archive files.
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
new file mode 100644
index 000000000..abee36f7e
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -0,0 +1,1251 @@
+package archive
+
+import (
+ "archive/tar"
+ "bufio"
+ "bytes"
+ "compress/bzip2"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "syscall"
+
+ "github.com/containers/storage/pkg/fileutils"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/pools"
+ "github.com/containers/storage/pkg/promise"
+ "github.com/containers/storage/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+type (
+ // Compression is the state represents if compressed or not.
+ Compression int
+ // WhiteoutFormat is the format of whiteouts unpacked
+ WhiteoutFormat int
+
+ // TarOptions wraps the tar options.
+ TarOptions struct {
+ IncludeFiles []string
+ ExcludePatterns []string
+ Compression Compression
+ NoLchown bool
+ UIDMaps []idtools.IDMap
+ GIDMaps []idtools.IDMap
+ ChownOpts *idtools.IDPair
+ IncludeSourceDir bool
+ // WhiteoutFormat is the expected on disk format for whiteout files.
+ // This format will be converted to the standard format on pack
+ // and from the standard format on unpack.
+ WhiteoutFormat WhiteoutFormat
+ // When unpacking, specifies whether overwriting a directory with a
+ // non-directory is allowed and vice versa.
+ NoOverwriteDirNonDir bool
+ // For each include when creating an archive, the included name will be
+ // replaced with the matching name from this map.
+ RebaseNames map[string]string
+ InUserNS bool
+ }
+)
+
+// Archiver allows the reuse of most utility functions of this package
+// with a pluggable Untar function. Also, to facilitate the passing of
+// specific id mappings for untar, an archiver can be created with maps
+// which will then be passed to Untar operations
+type Archiver struct {
+ Untar func(io.Reader, string, *TarOptions) error
+ IDMappings *idtools.IDMappings
+}
+
+// NewDefaultArchiver returns a new Archiver without any IDMappings
+func NewDefaultArchiver() *Archiver {
+ return &Archiver{Untar: Untar, IDMappings: &idtools.IDMappings{}}
+}
+
+// breakoutError is used to differentiate errors related to breaking out
+// When testing archive breakout in the unit tests, this error is expected
+// in order for the test to pass.
+type breakoutError error
+
+const (
+ // Uncompressed represents the uncompressed.
+ Uncompressed Compression = iota
+ // Bzip2 is bzip2 compression algorithm.
+ Bzip2
+ // Gzip is gzip compression algorithm.
+ Gzip
+ // Xz is xz compression algorithm.
+ Xz
+)
+
+const (
+ // AUFSWhiteoutFormat is the default format for whiteouts
+ AUFSWhiteoutFormat WhiteoutFormat = iota
+ // OverlayWhiteoutFormat formats whiteout according to the overlay
+ // standard.
+ OverlayWhiteoutFormat
+)
+
+const (
+ modeISDIR = 040000 // Directory
+ modeISFIFO = 010000 // FIFO
+ modeISREG = 0100000 // Regular file
+ modeISLNK = 0120000 // Symbolic link
+ modeISBLK = 060000 // Block special file
+ modeISCHR = 020000 // Character special file
+ modeISSOCK = 0140000 // Socket
+)
+
+// IsArchivePath checks if the (possibly compressed) file at the given path
+// starts with a tar file header.
+func IsArchivePath(path string) bool {
+ file, err := os.Open(path)
+ if err != nil {
+ return false
+ }
+ defer file.Close()
+ rdr, err := DecompressStream(file)
+ if err != nil {
+ return false
+ }
+ r := tar.NewReader(rdr)
+ _, err = r.Next()
+ return err == nil
+}
+
+// DetectCompression detects the compression algorithm of the source.
+func DetectCompression(source []byte) Compression {
+ for compression, m := range map[Compression][]byte{
+ Bzip2: {0x42, 0x5A, 0x68},
+ Gzip: {0x1F, 0x8B, 0x08},
+ Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
+ } {
+ if len(source) < len(m) {
+ logrus.Debug("Len too short")
+ continue
+ }
+ if bytes.Equal(m, source[:len(m)]) {
+ return compression
+ }
+ }
+ return Uncompressed
+}
+
+func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) {
+ args := []string{"xz", "-d", "-c", "-q"}
+
+ return cmdStream(exec.Command(args[0], args[1:]...), archive)
+}
+
+// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
+func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
+ p := pools.BufioReader32KPool
+ buf := p.Get(archive)
+ bs, err := buf.Peek(10)
+ if err != nil && err != io.EOF {
+ // Note: we'll ignore any io.EOF error because there are some odd
+ // cases where the layer.tar file will be empty (zero bytes) and
+ // that results in an io.EOF from the Peek() call. So, in those
+ // cases we'll just treat it as a non-compressed stream and
+ // that means just create an empty layer.
+ // See Issue 18170
+ return nil, err
+ }
+
+ compression := DetectCompression(bs)
+ switch compression {
+ case Uncompressed:
+ readBufWrapper := p.NewReadCloserWrapper(buf, buf)
+ return readBufWrapper, nil
+ case Gzip:
+ gzReader, err := gzip.NewReader(buf)
+ if err != nil {
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
+ return readBufWrapper, nil
+ case Bzip2:
+ bz2Reader := bzip2.NewReader(buf)
+ readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
+ return readBufWrapper, nil
+ case Xz:
+ xzReader, chdone, err := xzDecompress(buf)
+ if err != nil {
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
+ return ioutils.NewReadCloserWrapper(readBufWrapper, func() error {
+ <-chdone
+ return readBufWrapper.Close()
+ }), nil
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+// CompressStream compresses the dest with specified compression algorithm.
+func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
+ p := pools.BufioWriter32KPool
+ buf := p.Get(dest)
+ switch compression {
+ case Uncompressed:
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
+ return writeBufWrapper, nil
+ case Gzip:
+ gzWriter := gzip.NewWriter(dest)
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
+ return writeBufWrapper, nil
+ case Bzip2, Xz:
+ // archive/bzip2 does not support writing, and there is no xz support at all
+ // However, this is not a problem as docker only currently generates gzipped tars
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to
+// modify the contents or header of an entry in the archive. If the file already
+// exists in the archive the TarModifierFunc will be called with the Header and
+// a reader which will return the files content. If the file does not exist both
+// header and content will be nil.
+type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error)
+
+// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the
+// tar stream are modified if they match any of the keys in mods.
+func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser {
+ pipeReader, pipeWriter := io.Pipe()
+
+ go func() {
+ tarReader := tar.NewReader(inputTarStream)
+ tarWriter := tar.NewWriter(pipeWriter)
+ defer inputTarStream.Close()
+ defer tarWriter.Close()
+
+ modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error {
+ header, data, err := modifier(name, original, tarReader)
+ switch {
+ case err != nil:
+ return err
+ case header == nil:
+ return nil
+ }
+
+ header.Name = name
+ header.Size = int64(len(data))
+ if err := tarWriter.WriteHeader(header); err != nil {
+ return err
+ }
+ if len(data) != 0 {
+ if _, err := tarWriter.Write(data); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ var err error
+ var originalHeader *tar.Header
+ for {
+ originalHeader, err = tarReader.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+
+ modifier, ok := mods[originalHeader.Name]
+ if !ok {
+ // No modifiers for this file, copy the header and data
+ if err := tarWriter.WriteHeader(originalHeader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ if _, err := pools.Copy(tarWriter, tarReader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ continue
+ }
+ delete(mods, originalHeader.Name)
+
+ if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ }
+
+ // Apply the modifiers that haven't matched any files in the archive
+ for name, modifier := range mods {
+ if err := modify(name, nil, modifier, nil); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ }
+
+ pipeWriter.Close()
+
+ }()
+ return pipeReader
+}
+
+// Extension returns the extension of a file that uses the specified compression algorithm.
+func (compression *Compression) Extension() string {
+ switch *compression {
+ case Uncompressed:
+ return "tar"
+ case Bzip2:
+ return "tar.bz2"
+ case Gzip:
+ return "tar.gz"
+ case Xz:
+ return "tar.xz"
+ }
+ return ""
+}
+
+// FileInfoHeader creates a populated Header from fi.
+// Compared to archive pkg this function fills in more information.
+// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
+// which have been deleted since Go 1.9 archive/tar.
+func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
+ hdr, err := tar.FileInfoHeader(fi, link)
+ if err != nil {
+ return nil, err
+ }
+ hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
+ name, err = canonicalTarName(name, fi.IsDir())
+ if err != nil {
+ return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err)
+ }
+ hdr.Name = name
+ if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
+ return nil, err
+ }
+ return hdr, nil
+}
+
+// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar
+// https://github.com/golang/go/commit/66b5a2f
+func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
+ fm := fi.Mode()
+ switch {
+ case fm.IsRegular():
+ mode |= modeISREG
+ case fi.IsDir():
+ mode |= modeISDIR
+ case fm&os.ModeSymlink != 0:
+ mode |= modeISLNK
+ case fm&os.ModeDevice != 0:
+ if fm&os.ModeCharDevice != 0 {
+ mode |= modeISCHR
+ } else {
+ mode |= modeISBLK
+ }
+ case fm&os.ModeNamedPipe != 0:
+ mode |= modeISFIFO
+ case fm&os.ModeSocket != 0:
+ mode |= modeISSOCK
+ }
+ return mode
+}
+
+// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
+// to a tar header
+func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
+ capability, _ := system.Lgetxattr(path, "security.capability")
+ if capability != nil {
+ hdr.Xattrs = make(map[string]string)
+ hdr.Xattrs["security.capability"] = string(capability)
+ }
+ return nil
+}
+
+type tarWhiteoutConverter interface {
+ ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
+ ConvertRead(*tar.Header, string) (bool, error)
+}
+
+type tarAppender struct {
+ TarWriter *tar.Writer
+ Buffer *bufio.Writer
+
+ // for hardlink mapping
+ SeenFiles map[uint64]string
+ IDMappings *idtools.IDMappings
+ ChownOpts *idtools.IDPair
+
+ // For packing and unpacking whiteout files in the
+ // non standard format. The whiteout files defined
+ // by the AUFS standard are used as the tar whiteout
+ // standard.
+ WhiteoutConverter tarWhiteoutConverter
+}
+
+func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender {
+ return &tarAppender{
+ SeenFiles: make(map[uint64]string),
+ TarWriter: tar.NewWriter(writer),
+ Buffer: pools.BufioWriter32KPool.Get(nil),
+ IDMappings: idMapping,
+ ChownOpts: chownOpts,
+ }
+}
+
+// canonicalTarName provides a platform-independent and consistent posix-style
+//path for files and directories to be archived regardless of the platform.
+func canonicalTarName(name string, isDir bool) (string, error) {
+ name, err := CanonicalTarNameForPath(name)
+ if err != nil {
+ return "", err
+ }
+
+ // suffix with '/' for directories
+ if isDir && !strings.HasSuffix(name, "/") {
+ name += "/"
+ }
+ return name, nil
+}
+
+// addTarFile adds to the tar archive a file from `path` as `name`
+func (ta *tarAppender) addTarFile(path, name string) error {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return err
+ }
+
+ var link string
+ if fi.Mode()&os.ModeSymlink != 0 {
+ var err error
+ link, err = os.Readlink(path)
+ if err != nil {
+ return err
+ }
+ }
+
+ hdr, err := FileInfoHeader(name, fi, link)
+ if err != nil {
+ return err
+ }
+ if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
+ return err
+ }
+
+ // if it's not a directory and has more than 1 link,
+ // it's hard linked, so set the type flag accordingly
+ if !fi.IsDir() && hasHardlinks(fi) {
+ inode, err := getInodeFromStat(fi.Sys())
+ if err != nil {
+ return err
+ }
+ // a link should have a name that it links too
+ // and that linked name should be first in the tar archive
+ if oldpath, ok := ta.SeenFiles[inode]; ok {
+ hdr.Typeflag = tar.TypeLink
+ hdr.Linkname = oldpath
+ hdr.Size = 0 // This Must be here for the writer math to add up!
+ } else {
+ ta.SeenFiles[inode] = name
+ }
+ }
+
+ //handle re-mapping container ID mappings back to host ID mappings before
+ //writing tar headers/files. We skip whiteout files because they were written
+ //by the kernel and already have proper ownership relative to the host
+ if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() {
+ fileIDPair, err := getFileUIDGID(fi.Sys())
+ if err != nil {
+ return err
+ }
+ hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair)
+ if err != nil {
+ return err
+ }
+ }
+
+ // explicitly override with ChownOpts
+ if ta.ChownOpts != nil {
+ hdr.Uid = ta.ChownOpts.UID
+ hdr.Gid = ta.ChownOpts.GID
+ }
+
+ if ta.WhiteoutConverter != nil {
+ wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
+ if err != nil {
+ return err
+ }
+
+ // If a new whiteout file exists, write original hdr, then
+ // replace hdr with wo to be written after. Whiteouts should
+ // always be written after the original. Note the original
+ // hdr may have been updated to be a whiteout with returning
+ // a whiteout header
+ if wo != nil {
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+ return fmt.Errorf("tar: cannot use whiteout for non-empty file")
+ }
+ hdr = wo
+ }
+ }
+
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+
+ if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+ // We use system.OpenSequential to ensure we use sequential file
+ // access on Windows to avoid depleting the standby list.
+ // On Linux, this equates to a regular os.Open.
+ file, err := system.OpenSequential(path)
+ if err != nil {
+ return err
+ }
+
+ ta.Buffer.Reset(ta.TarWriter)
+ defer ta.Buffer.Reset(nil)
+ _, err = io.Copy(ta.Buffer, file)
+ file.Close()
+ if err != nil {
+ return err
+ }
+ err = ta.Buffer.Flush()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error {
+ // hdr.Mode is in linux format, which we can use for sycalls,
+ // but for os.Foo() calls we need the mode converted to os.FileMode,
+ // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
+ hdrInfo := hdr.FileInfo()
+
+ switch hdr.Typeflag {
+ case tar.TypeDir:
+ // Create directory unless it exists as a directory already.
+ // In that case we just want to merge the two
+ if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
+ if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+
+ case tar.TypeReg, tar.TypeRegA:
+ // Source is regular file. We use system.OpenFileSequential to use sequential
+ // file access to avoid depleting the standby list on Windows.
+ // On Linux, this equates to a regular os.OpenFile
+ file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
+ if err != nil {
+ return err
+ }
+ if _, err := io.Copy(file, reader); err != nil {
+ file.Close()
+ return err
+ }
+ file.Close()
+
+ case tar.TypeBlock, tar.TypeChar:
+ if inUserns { // cannot create devices in a userns
+ return nil
+ }
+ // Handle this is an OS-specific way
+ if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+ return err
+ }
+
+ case tar.TypeFifo:
+ // Handle this is an OS-specific way
+ if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+ return err
+ }
+
+ case tar.TypeLink:
+ targetPath := filepath.Join(extractDir, hdr.Linkname)
+ // check for hardlink breakout
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
+ }
+ if err := os.Link(targetPath, path); err != nil {
+ return err
+ }
+
+ case tar.TypeSymlink:
+ // path -> hdr.Linkname = targetPath
+ // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
+ targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
+
+ // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
+ // that symlink would first have to be created, which would be caught earlier, at this very check:
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
+ }
+ if err := os.Symlink(hdr.Linkname, path); err != nil {
+ return err
+ }
+
+ case tar.TypeXGlobalHeader:
+ logrus.Debug("PAX Global Extended Headers found and ignored")
+ return nil
+
+ default:
+ return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
+ }
+
+ // Lchown is not supported on Windows.
+ if Lchown && runtime.GOOS != "windows" {
+ if chownOpts == nil {
+ chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
+ }
+ if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
+ return err
+ }
+ }
+
+ var errors []string
+ for key, value := range hdr.Xattrs {
+ if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
+ if err == syscall.ENOTSUP {
+ // We ignore errors here because not all graphdrivers support
+ // xattrs *cough* old versions of AUFS *cough*. However only
+ // ENOTSUP should be emitted in that case, otherwise we still
+ // bail.
+ errors = append(errors, err.Error())
+ continue
+ }
+ return err
+ }
+
+ }
+
+ if len(errors) > 0 {
+ logrus.WithFields(logrus.Fields{
+ "errors": errors,
+ }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
+ }
+
+ // There is no LChmod, so ignore mode for symlink. Also, this
+ // must happen after chown, as that can modify the file mode
+ if err := handleLChmod(hdr, path, hdrInfo); err != nil {
+ return err
+ }
+
+ aTime := hdr.AccessTime
+ if aTime.Before(hdr.ModTime) {
+ // Last access time should never be before last modified time.
+ aTime = hdr.ModTime
+ }
+
+ // system.Chtimes doesn't support a NOFOLLOW flag atm
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
+ return err
+ }
+ } else {
+ ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)}
+ if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+ return err
+ }
+ }
+ return nil
+}
+
+// Tar creates an archive from the directory at `path`, and returns it as a
+// stream of bytes.
+func Tar(path string, compression Compression) (io.ReadCloser, error) {
+ return TarWithOptions(path, &TarOptions{Compression: compression})
+}
+
+// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
+// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
+func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
+
+ // Fix the source path to work with long path names. This is a no-op
+ // on platforms other than Windows.
+ srcPath = fixVolumePathPrefix(srcPath)
+
+ pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
+ if err != nil {
+ return nil, err
+ }
+
+ pipeReader, pipeWriter := io.Pipe()
+
+ compressWriter, err := CompressStream(pipeWriter, options.Compression)
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ ta := newTarAppender(
+ idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
+ compressWriter,
+ options.ChownOpts,
+ )
+ ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat)
+
+ defer func() {
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Errorf("Can't close tar writer: %s", err)
+ }
+ if err := compressWriter.Close(); err != nil {
+ logrus.Errorf("Can't close compress writer: %s", err)
+ }
+ if err := pipeWriter.Close(); err != nil {
+ logrus.Errorf("Can't close pipe writer: %s", err)
+ }
+ }()
+
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+
+ stat, err := os.Lstat(srcPath)
+ if err != nil {
+ return
+ }
+
+ if !stat.IsDir() {
+ // We can't later join a non-dir with any includes because the
+ // 'walk' will error if "file/." is stat-ed and "file" is not a
+ // directory. So, we must split the source path and use the
+ // basename as the include.
+ if len(options.IncludeFiles) > 0 {
+ logrus.Warn("Tar: Can't archive a file with includes")
+ }
+
+ dir, base := SplitPathDirEntry(srcPath)
+ srcPath = dir
+ options.IncludeFiles = []string{base}
+ }
+
+ if len(options.IncludeFiles) == 0 {
+ options.IncludeFiles = []string{"."}
+ }
+
+ seen := make(map[string]bool)
+
+ for _, include := range options.IncludeFiles {
+ rebaseName := options.RebaseNames[include]
+
+ walkRoot := getWalkRoot(srcPath, include)
+ filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
+ if err != nil {
+ logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
+ return nil
+ }
+
+ relFilePath, err := filepath.Rel(srcPath, filePath)
+ if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
+ // Error getting relative path OR we are looking
+ // at the source directory path. Skip in both situations.
+ return nil
+ }
+
+ if options.IncludeSourceDir && include == "." && relFilePath != "." {
+ relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
+ }
+
+ skip := false
+
+ // If "include" is an exact match for the current file
+ // then even if there's an "excludePatterns" pattern that
+ // matches it, don't skip it. IOW, assume an explicit 'include'
+ // is asking for that file no matter what - which is true
+ // for some files, like .dockerignore and Dockerfile (sometimes)
+ if include != relFilePath {
+ skip, err = pm.Matches(relFilePath)
+ if err != nil {
+ logrus.Errorf("Error matching %s: %v", relFilePath, err)
+ return err
+ }
+ }
+
+ if skip {
+ // If we want to skip this file and its a directory
+ // then we should first check to see if there's an
+ // excludes pattern (e.g. !dir/file) that starts with this
+ // dir. If so then we can't skip this dir.
+
+ // Its not a dir then so we can just return/skip.
+ if !f.IsDir() {
+ return nil
+ }
+
+ // No exceptions (!...) in patterns so just skip dir
+ if !pm.Exclusions() {
+ return filepath.SkipDir
+ }
+
+ dirSlash := relFilePath + string(filepath.Separator)
+
+ for _, pat := range pm.Patterns() {
+ if !pat.Exclusion() {
+ continue
+ }
+ if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
+ // found a match - so can't skip this dir
+ return nil
+ }
+ }
+
+ // No matching exclusion dir so just skip dir
+ return filepath.SkipDir
+ }
+
+ if seen[relFilePath] {
+ return nil
+ }
+ seen[relFilePath] = true
+
+ // Rename the base resource.
+ if rebaseName != "" {
+ var replacement string
+ if rebaseName != string(filepath.Separator) {
+ // Special case the root directory to replace with an
+ // empty string instead so that we don't end up with
+ // double slashes in the paths.
+ replacement = rebaseName
+ }
+
+ relFilePath = strings.Replace(relFilePath, include, replacement, 1)
+ }
+
+ if err := ta.addTarFile(filePath, relFilePath); err != nil {
+ logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
+ // if pipe is broken, stop writing tar stream to it
+ if err == io.ErrClosedPipe {
+ return err
+ }
+ }
+ return nil
+ })
+ }
+ }()
+
+ return pipeReader, nil
+}
+
+// Unpack unpacks the decompressedArchive to dest with options.
+func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
+ tr := tar.NewReader(decompressedArchive)
+ trBuf := pools.BufioReader32KPool.Get(nil)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+ idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
+ rootIDs := idMappings.RootPair()
+ whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat)
+
+ // Iterate through the files in the archive.
+loop:
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ // Normalize name, for safety and for a simple is-root check
+ // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
+ // This keeps "..\" as-is, but normalizes "\..\" to "\".
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ for _, exclude := range options.ExcludePatterns {
+ if strings.HasPrefix(hdr.Name, exclude) {
+ continue loop
+ }
+ }
+
+ // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
+ // the filepath format for the OS on which the daemon is running. Hence
+ // the check for a slash-suffix MUST be done in an OS-agnostic way.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return err
+ }
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+
+ // If path exits we almost always just want to remove and replace it
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing directory with a non-directory from the archive.
+ return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
+ }
+
+ if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing non-directory with a directory from the archive.
+ return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
+ }
+
+ if fi.IsDir() && hdr.Name == "." {
+ continue
+ }
+
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return err
+ }
+ }
+ }
+ trBuf.Reset(tr)
+
+ if err := remapIDs(idMappings, hdr); err != nil {
+ return err
+ }
+
+ if whiteoutConverter != nil {
+ writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
+ if err != nil {
+ return err
+ }
+ if !writeFile {
+ continue
+ }
+ }
+
+ if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil {
+ return err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+
+ if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive may be compressed with one of the following algorithms:
+// identity (uncompressed), gzip, bzip2, xz.
+// FIXME: specify behavior when target path exists vs. doesn't exist.
+func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, true)
+}
+
+// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive must be an uncompressed stream.
+func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, false)
+}
+
+// Handler for teasing out the automatic decompression
+func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
+ if tarArchive == nil {
+ return fmt.Errorf("Empty archive")
+ }
+ dest = filepath.Clean(dest)
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+
+ r := tarArchive
+ if decompress {
+ decompressedArchive, err := DecompressStream(tarArchive)
+ if err != nil {
+ return err
+ }
+ defer decompressedArchive.Close()
+ r = decompressedArchive
+ }
+
+ return Unpack(r, dest, options)
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func (archiver *Archiver) TarUntar(src, dst string) error {
+ logrus.Debugf("TarUntar(%s %s)", src, dst)
+ archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ options := &TarOptions{
+ UIDMaps: archiver.IDMappings.UIDs(),
+ GIDMaps: archiver.IDMappings.GIDs(),
+ }
+ return archiver.Untar(archive, dst, options)
+}
+
+// UntarPath untar a file from path to a destination, src is the source tar file path.
+func (archiver *Archiver) UntarPath(src, dst string) error {
+ archive, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ options := &TarOptions{
+ UIDMaps: archiver.IDMappings.UIDs(),
+ GIDMaps: archiver.IDMappings.GIDs(),
+ }
+ return archiver.Untar(archive, dst, options)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func (archiver *Archiver) CopyWithTar(src, dst string) error {
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+ if !srcSt.IsDir() {
+ return archiver.CopyFileWithTar(src, dst)
+ }
+
+ // if this archiver is set up with ID mapping we need to create
+ // the new destination directory with the remapped root UID/GID pair
+ // as owner
+ rootIDs := archiver.IDMappings.RootPair()
+ // Create dst, copy src's content into it
+ logrus.Debugf("Creating dest directory: %s", dst)
+ if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
+ return err
+ }
+ logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
+ return archiver.TarUntar(src, dst)
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
+ logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+
+ if srcSt.IsDir() {
+ return fmt.Errorf("Can't copy a directory")
+ }
+
+ // Clean up the trailing slash. This must be done in an operating
+ // system specific manner.
+ if dst[len(dst)-1] == os.PathSeparator {
+ dst = filepath.Join(dst, filepath.Base(src))
+ }
+ // Create the holding directory if necessary
+ if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil {
+ return err
+ }
+
+ r, w := io.Pipe()
+ errC := promise.Go(func() error {
+ defer w.Close()
+
+ srcF, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ hdr, err := tar.FileInfoHeader(srcSt, "")
+ if err != nil {
+ return err
+ }
+ hdr.Name = filepath.Base(dst)
+ hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+
+ if err := remapIDs(archiver.IDMappings, hdr); err != nil {
+ return err
+ }
+
+ tw := tar.NewWriter(w)
+ defer tw.Close()
+ if err := tw.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if _, err := io.Copy(tw, srcF); err != nil {
+ return err
+ }
+ return nil
+ })
+ defer func() {
+ if er := <-errC; err == nil && er != nil {
+ err = er
+ }
+ }()
+
+ err = archiver.Untar(r, filepath.Dir(dst), nil)
+ if err != nil {
+ r.CloseWithError(err)
+ }
+ return err
+}
+
+func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error {
+ ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid})
+ hdr.Uid, hdr.Gid = ids.UID, ids.GID
+ return err
+}
+
+// cmdStream executes a command, and returns its stdout as a stream.
+// If the command fails to run or doesn't complete successfully, an error
+// will be returned, including anything written on stderr.
+func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) {
+ chdone := make(chan struct{})
+ cmd.Stdin = input
+ pipeR, pipeW := io.Pipe()
+ cmd.Stdout = pipeW
+ var errBuf bytes.Buffer
+ cmd.Stderr = &errBuf
+
+ // Run the command and return the pipe
+ if err := cmd.Start(); err != nil {
+ return nil, nil, err
+ }
+
+ // Copy stdout to the returned pipe
+ go func() {
+ if err := cmd.Wait(); err != nil {
+ pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
+ } else {
+ pipeW.Close()
+ }
+ close(chdone)
+ }()
+
+ return pipeR, chdone, nil
+}
+
+// NewTempArchive reads the content of src into a temporary file, and returns the contents
+// of that file as an archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
+ f, err := ioutil.TempFile(dir, "")
+ if err != nil {
+ return nil, err
+ }
+ if _, err := io.Copy(f, src); err != nil {
+ return nil, err
+ }
+ if _, err := f.Seek(0, 0); err != nil {
+ return nil, err
+ }
+ st, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ size := st.Size()
+ return &TempArchive{File: f, Size: size}, nil
+}
+
+// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+type TempArchive struct {
+ *os.File
+ Size int64 // Pre-computed from Stat().Size() as a convenience
+ read int64
+ closed bool
+}
+
+// Close closes the underlying file if it's still open, or does a no-op
+// to allow callers to try to close the TempArchive multiple times safely.
+func (archive *TempArchive) Close() error {
+ if archive.closed {
+ return nil
+ }
+
+ archive.closed = true
+
+ return archive.File.Close()
+}
+
+func (archive *TempArchive) Read(data []byte) (int, error) {
+ n, err := archive.File.Read(data)
+ archive.read += int64(n)
+ if err != nil || archive.read == archive.Size {
+ archive.Close()
+ os.Remove(archive.File.Name())
+ }
+ return n, err
+}
+
+// IsArchive checks for the magic bytes of a tar or any supported compression
+// algorithm.
+func IsArchive(header []byte) bool {
+ compression := DetectCompression(header)
+ if compression != Uncompressed {
+ return true
+ }
+ r := tar.NewReader(bytes.NewBuffer(header))
+ _, err := r.Next()
+ return err == nil
+}
+
+// UntarPath is a convenience function which looks for an archive
+// at filesystem path `src`, and unpacks it at `dst`.
+func UntarPath(src, dst string) error {
+ return NewDefaultArchiver().UntarPath(src, dst)
+}
+
+const (
+ // HeaderSize is the size in bytes of a tar header
+ HeaderSize = 512
+)
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
new file mode 100644
index 000000000..5a14eb91a
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
@@ -0,0 +1,92 @@
+package archive
+
+import (
+ "archive/tar"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/storage/pkg/system"
+ "golang.org/x/sys/unix"
+)
+
+func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
+ if format == OverlayWhiteoutFormat {
+ return overlayWhiteoutConverter{}
+ }
+ return nil
+}
+
+type overlayWhiteoutConverter struct{}
+
+func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
+ // convert whiteouts to AUFS format
+ if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
+ // we just rename the file and make it normal
+ dir, filename := filepath.Split(hdr.Name)
+ hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
+ hdr.Mode = 0600
+ hdr.Typeflag = tar.TypeReg
+ hdr.Size = 0
+ }
+
+ if fi.Mode()&os.ModeDir != 0 {
+ // convert opaque dirs to AUFS format by writing an empty file with the prefix
+ opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
+ if err != nil {
+ return nil, err
+ }
+ if len(opaque) == 1 && opaque[0] == 'y' {
+ if hdr.Xattrs != nil {
+ delete(hdr.Xattrs, "trusted.overlay.opaque")
+ }
+
+ // create a header for the whiteout file
+ // it should inherit some properties from the parent, but be a regular file
+ wo = &tar.Header{
+ Typeflag: tar.TypeReg,
+ Mode: hdr.Mode & int64(os.ModePerm),
+ Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
+ Size: 0,
+ Uid: hdr.Uid,
+ Uname: hdr.Uname,
+ Gid: hdr.Gid,
+ Gname: hdr.Gname,
+ AccessTime: hdr.AccessTime,
+ ChangeTime: hdr.ChangeTime,
+ }
+ }
+ }
+
+ return
+}
+
+func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
+ base := filepath.Base(path)
+ dir := filepath.Dir(path)
+
+ // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
+ if base == WhiteoutOpaqueDir {
+ err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
+ // don't write the file itself
+ return false, err
+ }
+
+ // if a file was deleted and we are using overlay, we need to create a character device
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+
+ if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
+ return false, err
+ }
+ if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
+ return false, err
+ }
+
+ // don't write the file itself
+ return false, nil
+ }
+
+ return true, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_other.go b/vendor/github.com/containers/storage/pkg/archive/archive_other.go
new file mode 100644
index 000000000..54acbf285
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_other.go
@@ -0,0 +1,7 @@
+// +build !linux
+
+package archive
+
+func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
new file mode 100644
index 000000000..bdc1a3d79
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
@@ -0,0 +1,122 @@
+// +build !windows
+
+package archive
+
+import (
+ "archive/tar"
+ "errors"
+ "os"
+ "path/filepath"
+ "syscall"
+
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/system"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
+ "golang.org/x/sys/unix"
+)
+
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+ return srcPath
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific. On Linux, we
+// can't use filepath.Join(srcPath,include) because this will clean away
+// a trailing "." or "/" which may be important.
+func getWalkRoot(srcPath string, include string) string {
+ return srcPath + string(filepath.Separator) + include
+}
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) (string, error) {
+ return p, nil // already unix-style
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ return perm // noop for unix as golang APIs provide perm bits correctly
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if ok {
+ // Currently go does not fill in the major/minors
+ if s.Mode&unix.S_IFBLK != 0 ||
+ s.Mode&unix.S_IFCHR != 0 {
+ hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert
+ hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert
+ }
+ }
+
+ return
+}
+
+func getInodeFromStat(stat interface{}) (inode uint64, err error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if ok {
+ inode = s.Ino
+ }
+
+ return
+}
+
+func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if !ok {
+ return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t")
+ }
+ return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil
+}
+
+func major(device uint64) uint64 {
+ return (device >> 8) & 0xfff
+}
+
+func minor(device uint64) uint64 {
+ return (device & 0xff) | ((device >> 12) & 0xfff00)
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ if rsystem.RunningInUserNS() {
+ // cannot create a device if running in user namespace
+ return nil
+ }
+
+ mode := uint32(hdr.Mode & 07777)
+ switch hdr.Typeflag {
+ case tar.TypeBlock:
+ mode |= unix.S_IFBLK
+ case tar.TypeChar:
+ mode |= unix.S_IFCHR
+ case tar.TypeFifo:
+ mode |= unix.S_IFIFO
+ }
+
+ return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
new file mode 100644
index 000000000..0bcbb925d
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
@@ -0,0 +1,79 @@
+// +build windows
+
+package archive
+
+import (
+ "archive/tar"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/longpath"
+)
+
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+ return longpath.AddPrefix(srcPath)
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific.
+func getWalkRoot(srcPath string, include string) string {
+ return filepath.Join(srcPath, include)
+}
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) (string, error) {
+ // windows: convert windows style relative path with backslashes
+ // into forward slashes. Since windows does not allow '/' or '\'
+ // in file names, it is mostly safe to replace however we must
+ // check just in case
+ if strings.Contains(p, "/") {
+ return "", fmt.Errorf("Windows path contains forward slash: %s", p)
+ }
+ return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
+
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
+ permPart := perm & os.ModePerm
+ noPermPart := perm &^ os.ModePerm
+ // Add the x bit: make everything +x from windows
+ permPart |= 0111
+ permPart &= 0755
+
+ return noPermPart | permPart
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
+ // do nothing. no notion of Rdev, Nlink in stat on Windows
+ return
+}
+
+func getInodeFromStat(stat interface{}) (inode uint64, err error) {
+ // do nothing. no notion of Inode in stat on Windows
+ return
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ return nil
+}
+
+func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
+ // no notion of file ownership mapping yet on Windows
+ return idtools.IDPair{0, 0}, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go
new file mode 100644
index 000000000..6ba4b8ec6
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/changes.go
@@ -0,0 +1,441 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/pools"
+ "github.com/containers/storage/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// ChangeType represents the change type.
+type ChangeType int
+
+const (
+ // ChangeModify represents the modify operation.
+ ChangeModify = iota
+ // ChangeAdd represents the add operation.
+ ChangeAdd
+ // ChangeDelete represents the delete operation.
+ ChangeDelete
+)
+
+func (c ChangeType) String() string {
+ switch c {
+ case ChangeModify:
+ return "C"
+ case ChangeAdd:
+ return "A"
+ case ChangeDelete:
+ return "D"
+ }
+ return ""
+}
+
+// Change represents a change, it wraps the change type and path.
+// It describes changes of the files in the path respect to the
+// parent layers. The change could be modify, add, delete.
+// This is used for layer diff.
+type Change struct {
+ Path string
+ Kind ChangeType
+}
+
+func (change *Change) String() string {
+ return fmt.Sprintf("%s %s", change.Kind, change.Path)
+}
+
+// for sort.Sort
+type changesByPath []Change
+
+func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
+func (c changesByPath) Len() int { return len(c) }
+func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
+
+// Gnu tar and the go tar writer don't have sub-second mtime
+// precision, which is problematic when we apply changes via tar
+// files, we handle this by comparing for exact times, *or* same
+// second count and either a or b having exactly 0 nanoseconds
+func sameFsTime(a, b time.Time) bool {
+ return a == b ||
+ (a.Unix() == b.Unix() &&
+ (a.Nanosecond() == 0 || b.Nanosecond() == 0))
+}
+
+func sameFsTimeSpec(a, b syscall.Timespec) bool {
+ return a.Sec == b.Sec &&
+ (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
+}
+
+// Changes walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+func Changes(layers []string, rw string) ([]Change, error) {
+ return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
+}
+
+func aufsMetadataSkip(path string) (skip bool, err error) {
+ skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
+ if err != nil {
+ skip = true
+ }
+ return
+}
+
+func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
+ f := filepath.Base(path)
+
+ // If there is a whiteout, then the file was removed
+ if strings.HasPrefix(f, WhiteoutPrefix) {
+ originalFile := f[len(WhiteoutPrefix):]
+ return filepath.Join(filepath.Dir(path), originalFile), nil
+ }
+
+ return "", nil
+}
+
+type skipChange func(string) (bool, error)
+type deleteChange func(string, string, os.FileInfo) (string, error)
+
+func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
+ var (
+ changes []Change
+ changedDirs = make(map[string]struct{})
+ )
+
+ err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ path, err = filepath.Rel(rw, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ path = filepath.Join(string(os.PathSeparator), path)
+
+ // Skip root
+ if path == string(os.PathSeparator) {
+ return nil
+ }
+
+ if sc != nil {
+ if skip, err := sc(path); skip {
+ return err
+ }
+ }
+
+ change := Change{
+ Path: path,
+ }
+
+ deletedFile, err := dc(rw, path, f)
+ if err != nil {
+ return err
+ }
+
+ // Find out what kind of modification happened
+ if deletedFile != "" {
+ change.Path = deletedFile
+ change.Kind = ChangeDelete
+ } else {
+ // Otherwise, the file was added
+ change.Kind = ChangeAdd
+
+ // ...Unless it already existed in a top layer, in which case, it's a modification
+ for _, layer := range layers {
+ stat, err := os.Stat(filepath.Join(layer, path))
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err == nil {
+ // The file existed in the top layer, so that's a modification
+
+ // However, if it's a directory, maybe it wasn't actually modified.
+ // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
+ if stat.IsDir() && f.IsDir() {
+ if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
+ // Both directories are the same, don't record the change
+ return nil
+ }
+ }
+ change.Kind = ChangeModify
+ break
+ }
+ }
+ }
+
+ // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
+ // This block is here to ensure the change is recorded even if the
+ // modify time, mode and size of the parent directory in the rw and ro layers are all equal.
+ // Check https://github.com/docker/docker/pull/13590 for details.
+ if f.IsDir() {
+ changedDirs[path] = struct{}{}
+ }
+ if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
+ parent := filepath.Dir(path)
+ if _, ok := changedDirs[parent]; !ok && parent != "/" {
+ changes = append(changes, Change{Path: parent, Kind: ChangeModify})
+ changedDirs[parent] = struct{}{}
+ }
+ }
+
+ // Record change
+ changes = append(changes, change)
+ return nil
+ })
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ return changes, nil
+}
+
+// FileInfo describes the information of a file.
+type FileInfo struct {
+ parent *FileInfo
+ name string
+ stat *system.StatT
+ children map[string]*FileInfo
+ capability []byte
+ added bool
+}
+
+// LookUp looks up the file information of a file.
+func (info *FileInfo) LookUp(path string) *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ parent := info
+ if path == string(os.PathSeparator) {
+ return info
+ }
+
+ pathElements := strings.Split(path, string(os.PathSeparator))
+ for _, elem := range pathElements {
+ if elem != "" {
+ child := parent.children[elem]
+ if child == nil {
+ return nil
+ }
+ parent = child
+ }
+ }
+ return parent
+}
+
+func (info *FileInfo) path() string {
+ if info.parent == nil {
+ // As this runs on the daemon side, file paths are OS specific.
+ return string(os.PathSeparator)
+ }
+ return filepath.Join(info.parent.path(), info.name)
+}
+
+func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
+
+ sizeAtEntry := len(*changes)
+
+ if oldInfo == nil {
+ // add
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeAdd,
+ }
+ *changes = append(*changes, change)
+ info.added = true
+ }
+
+ // We make a copy so we can modify it to detect additions
+ // also, we only recurse on the old dir if the new info is a directory
+ // otherwise any previous delete/change is considered recursive
+ oldChildren := make(map[string]*FileInfo)
+ if oldInfo != nil && info.isDir() {
+ for k, v := range oldInfo.children {
+ oldChildren[k] = v
+ }
+ }
+
+ for name, newChild := range info.children {
+ oldChild := oldChildren[name]
+ if oldChild != nil {
+ // change?
+ oldStat := oldChild.stat
+ newStat := newChild.stat
+ // Note: We can't compare inode or ctime or blocksize here, because these change
+ // when copying a file into a container. However, that is not generally a problem
+ // because any content change will change mtime, and any status change should
+ // be visible when actually comparing the stat fields. The only time this
+ // breaks down is if some code intentionally hides a change by setting
+ // back mtime
+ if statDifferent(oldStat, newStat) ||
+ !bytes.Equal(oldChild.capability, newChild.capability) {
+ change := Change{
+ Path: newChild.path(),
+ Kind: ChangeModify,
+ }
+ *changes = append(*changes, change)
+ newChild.added = true
+ }
+
+ // Remove from copy so we can detect deletions
+ delete(oldChildren, name)
+ }
+
+ newChild.addChanges(oldChild, changes)
+ }
+ for _, oldChild := range oldChildren {
+ // delete
+ change := Change{
+ Path: oldChild.path(),
+ Kind: ChangeDelete,
+ }
+ *changes = append(*changes, change)
+ }
+
+ // If there were changes inside this directory, we need to add it, even if the directory
+ // itself wasn't changed. This is needed to properly save and restore filesystem permissions.
+ // As this runs on the daemon side, file paths are OS specific.
+ if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeModify,
+ }
+ // Let's insert the directory entry before the recently added entries located inside this dir
+ *changes = append(*changes, change) // just to resize the slice, will be overwritten
+ copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
+ (*changes)[sizeAtEntry] = change
+ }
+
+}
+
+// Changes add changes to file information.
+func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
+ var changes []Change
+
+ info.addChanges(oldInfo, &changes)
+
+ return changes
+}
+
+func newRootFileInfo() *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ root := &FileInfo{
+ name: string(os.PathSeparator),
+ children: make(map[string]*FileInfo),
+ }
+ return root
+}
+
+// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
+// If oldDir is "", then all files in newDir will be Add-Changes.
+func ChangesDirs(newDir, oldDir string) ([]Change, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ )
+ if oldDir == "" {
+ emptyDir, err := ioutil.TempDir("", "empty")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(emptyDir)
+ oldDir = emptyDir
+ }
+ oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
+ if err != nil {
+ return nil, err
+ }
+
+ return newRoot.Changes(oldRoot), nil
+}
+
+// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
+func ChangesSize(newDir string, changes []Change) int64 {
+ var (
+ size int64
+ sf = make(map[uint64]struct{})
+ )
+ for _, change := range changes {
+ if change.Kind == ChangeModify || change.Kind == ChangeAdd {
+ file := filepath.Join(newDir, change.Path)
+ fileInfo, err := os.Lstat(file)
+ if err != nil {
+ logrus.Errorf("Can not stat %q: %s", file, err)
+ continue
+ }
+
+ if fileInfo != nil && !fileInfo.IsDir() {
+ if hasHardlinks(fileInfo) {
+ inode := getIno(fileInfo)
+ if _, ok := sf[inode]; !ok {
+ size += fileInfo.Size()
+ sf[inode] = struct{}{}
+ }
+ } else {
+ size += fileInfo.Size()
+ }
+ }
+ }
+ }
+ return size
+}
+
+// ExportChanges produces an Archive from the provided changes, relative to dir.
+func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
+ reader, writer := io.Pipe()
+ go func() {
+ ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
+
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ sort.Sort(changesByPath(changes))
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+ for _, change := range changes {
+ if change.Kind == ChangeDelete {
+ whiteOutDir := filepath.Dir(change.Path)
+ whiteOutBase := filepath.Base(change.Path)
+ whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
+ timestamp := time.Now()
+ hdr := &tar.Header{
+ Name: whiteOut[1:],
+ Size: 0,
+ ModTime: timestamp,
+ AccessTime: timestamp,
+ ChangeTime: timestamp,
+ }
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ logrus.Debugf("Can't write whiteout header: %s", err)
+ }
+ } else {
+ path := filepath.Join(dir, change.Path)
+ if err := ta.addTarFile(path, change.Path[1:]); err != nil {
+ logrus.Debugf("Can't add file %s to tar: %s", path, err)
+ }
+ }
+ }
+
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Debugf("Can't close layer: %s", err)
+ }
+ if err := writer.Close(); err != nil {
+ logrus.Debugf("failed close Changes writer: %s", err)
+ }
+ }()
+ return reader, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go
new file mode 100644
index 000000000..90c9a627e
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go
@@ -0,0 +1,313 @@
+package archive
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "syscall"
+ "unsafe"
+
+ "github.com/containers/storage/pkg/system"
+ "golang.org/x/sys/unix"
+)
+
+// walker is used to implement collectFileInfoForChanges on linux. Where this
+// method in general returns the entire contents of two directory trees, we
+// optimize some FS calls out on linux. In particular, we take advantage of the
+// fact that getdents(2) returns the inode of each file in the directory being
+// walked, which, when walking two trees in parallel to generate a list of
+// changes, can be used to prune subtrees without ever having to lstat(2) them
+// directly. Eliminating stat calls in this way can save up to seconds on large
+// images.
+type walker struct {
+ dir1 string
+ dir2 string
+ root1 *FileInfo
+ root2 *FileInfo
+}
+
+// collectFileInfoForChanges returns a complete representation of the trees
+// rooted at dir1 and dir2, with one important exception: any subtree or
+// leaf where the inode and device numbers are an exact match between dir1
+// and dir2 will be pruned from the results. This method is *only* to be used
+// to generating a list of changes between the two directories, as it does not
+// reflect the full contents.
+func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
+ w := &walker{
+ dir1: dir1,
+ dir2: dir2,
+ root1: newRootFileInfo(),
+ root2: newRootFileInfo(),
+ }
+
+ i1, err := os.Lstat(w.dir1)
+ if err != nil {
+ return nil, nil, err
+ }
+ i2, err := os.Lstat(w.dir2)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if err := w.walk("/", i1, i2); err != nil {
+ return nil, nil, err
+ }
+
+ return w.root1, w.root2, nil
+}
+
+// Given a FileInfo, its path info, and a reference to the root of the tree
+// being constructed, register this file with the tree.
+func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
+ if fi == nil {
+ return nil
+ }
+ parent := root.LookUp(filepath.Dir(path))
+ if parent == nil {
+ return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
+ }
+ info := &FileInfo{
+ name: filepath.Base(path),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+ cpath := filepath.Join(dir, path)
+ stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
+ if err != nil {
+ return err
+ }
+ info.stat = stat
+ info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
+ parent.children[info.name] = info
+ return nil
+}
+
+// Walk a subtree rooted at the same path in both trees being iterated. For
+// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
+func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
+ // Register these nodes with the return trees, unless we're still at the
+ // (already-created) roots:
+ if path != "/" {
+ if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
+ return err
+ }
+ if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
+ return err
+ }
+ }
+
+ is1Dir := i1 != nil && i1.IsDir()
+ is2Dir := i2 != nil && i2.IsDir()
+
+ sameDevice := false
+ if i1 != nil && i2 != nil {
+ si1 := i1.Sys().(*syscall.Stat_t)
+ si2 := i2.Sys().(*syscall.Stat_t)
+ if si1.Dev == si2.Dev {
+ sameDevice = true
+ }
+ }
+
+ // If these files are both non-existent, or leaves (non-dirs), we are done.
+ if !is1Dir && !is2Dir {
+ return nil
+ }
+
+ // Fetch the names of all the files contained in both directories being walked:
+ var names1, names2 []nameIno
+ if is1Dir {
+ names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+ if is2Dir {
+ names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+
+ // We have lists of the files contained in both parallel directories, sorted
+ // in the same order. Walk them in parallel, generating a unique merged list
+ // of all items present in either or both directories.
+ var names []string
+ ix1 := 0
+ ix2 := 0
+
+ for {
+ if ix1 >= len(names1) {
+ break
+ }
+ if ix2 >= len(names2) {
+ break
+ }
+
+ ni1 := names1[ix1]
+ ni2 := names2[ix2]
+
+ switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
+ case -1: // ni1 < ni2 -- advance ni1
+ // we will not encounter ni1 in names2
+ names = append(names, ni1.name)
+ ix1++
+ case 0: // ni1 == ni2
+ if ni1.ino != ni2.ino || !sameDevice {
+ names = append(names, ni1.name)
+ }
+ ix1++
+ ix2++
+ case 1: // ni1 > ni2 -- advance ni2
+ // we will not encounter ni2 in names1
+ names = append(names, ni2.name)
+ ix2++
+ }
+ }
+ for ix1 < len(names1) {
+ names = append(names, names1[ix1].name)
+ ix1++
+ }
+ for ix2 < len(names2) {
+ names = append(names, names2[ix2].name)
+ ix2++
+ }
+
+ // For each of the names present in either or both of the directories being
+ // iterated, stat the name under each root, and recurse the pair of them:
+ for _, name := range names {
+ fname := filepath.Join(path, name)
+ var cInfo1, cInfo2 os.FileInfo
+ if is1Dir {
+ cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if is2Dir {
+ cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if err = w.walk(fname, cInfo1, cInfo2); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// {name,inode} pairs used to support the early-pruning logic of the walker type
+type nameIno struct {
+ name string
+ ino uint64
+}
+
+type nameInoSlice []nameIno
+
+func (s nameInoSlice) Len() int { return len(s) }
+func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
+
+// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
+// numbers further up the stack when reading directory contents. Unlike
+// os.Readdirnames, which returns a list of filenames, this function returns a
+// list of {filename,inode} pairs.
+func readdirnames(dirname string) (names []nameIno, err error) {
+ var (
+ size = 100
+ buf = make([]byte, 4096)
+ nbuf int
+ bufp int
+ nb int
+ )
+
+ f, err := os.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ names = make([]nameIno, 0, size) // Empty with room to grow.
+ for {
+ // Refill the buffer if necessary
+ if bufp >= nbuf {
+ bufp = 0
+ nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
+ if nbuf < 0 {
+ nbuf = 0
+ }
+ if err != nil {
+ return nil, os.NewSyscallError("readdirent", err)
+ }
+ if nbuf <= 0 {
+ break // EOF
+ }
+ }
+
+ // Drain the buffer
+ nb, names = parseDirent(buf[bufp:nbuf], names)
+ bufp += nb
+ }
+
+ sl := nameInoSlice(names)
+ sort.Sort(sl)
+ return sl, nil
+}
+
+// parseDirent is a minor modification of unix.ParseDirent (linux version)
+// which returns {name,inode} pairs instead of just names.
+func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
+ origlen := len(buf)
+ for len(buf) > 0 {
+ dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0]))
+ buf = buf[dirent.Reclen:]
+ if dirent.Ino == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:clen(bytes[:])])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ names = append(names, nameIno{name, dirent.Ino})
+ }
+ return origlen - len(buf), names
+}
+
+func clen(n []byte) int {
+ for i := 0; i < len(n); i++ {
+ if n[i] == 0 {
+ return i
+ }
+ }
+ return len(n)
+}
+
+// OverlayChanges walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+func OverlayChanges(layers []string, rw string) ([]Change, error) {
+ return changes(layers, rw, overlayDeletedFile, nil)
+}
+
+func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
+ if fi.Mode()&os.ModeCharDevice != 0 {
+ s := fi.Sys().(*syscall.Stat_t)
+ if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
+ return path, nil
+ }
+ }
+ if fi.Mode()&os.ModeDir != 0 {
+ opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
+ if err != nil {
+ return "", err
+ }
+ if len(opaque) == 1 && opaque[0] == 'y' {
+ return path, nil
+ }
+ }
+
+ return "", nil
+
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go
new file mode 100644
index 000000000..e1d1e7a91
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go
@@ -0,0 +1,97 @@
+// +build !linux
+
+package archive
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/containers/storage/pkg/system"
+)
+
+func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ err1, err2 error
+ errs = make(chan error, 2)
+ )
+ go func() {
+ oldRoot, err1 = collectFileInfo(oldDir)
+ errs <- err1
+ }()
+ go func() {
+ newRoot, err2 = collectFileInfo(newDir)
+ errs <- err2
+ }()
+
+ // block until both routines have returned
+ for i := 0; i < 2; i++ {
+ if err := <-errs; err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return oldRoot, newRoot, nil
+}
+
+func collectFileInfo(sourceDir string) (*FileInfo, error) {
+ root := newRootFileInfo()
+
+ err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ relPath, err := filepath.Rel(sourceDir, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ relPath = filepath.Join(string(os.PathSeparator), relPath)
+
+ // See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
+ // Temporary workaround. If the returned path starts with two backslashes,
+ // trim it down to a single backslash. Only relevant on Windows.
+ if runtime.GOOS == "windows" {
+ if strings.HasPrefix(relPath, `\\`) {
+ relPath = relPath[1:]
+ }
+ }
+
+ if relPath == string(os.PathSeparator) {
+ return nil
+ }
+
+ parent := root.LookUp(filepath.Dir(relPath))
+ if parent == nil {
+ return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
+ }
+
+ info := &FileInfo{
+ name: filepath.Base(relPath),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+
+ s, err := system.Lstat(path)
+ if err != nil {
+ return err
+ }
+ info.stat = s
+
+ info.capability, _ = system.Lgetxattr(path, "security.capability")
+
+ parent.children[info.name] = info
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return root, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go
new file mode 100644
index 000000000..d669c01b4
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go
@@ -0,0 +1,37 @@
+// +build !windows
+
+package archive
+
+import (
+ "os"
+ "syscall"
+
+ "github.com/containers/storage/pkg/system"
+ "golang.org/x/sys/unix"
+)
+
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.Mode() != newStat.Mode() ||
+ oldStat.UID() != newStat.UID() ||
+ oldStat.GID() != newStat.GID() ||
+ oldStat.Rdev() != newStat.Rdev() ||
+ // Don't look at size for dirs, its not a good measure of change
+ (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
+ (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0
+}
+
+func getIno(fi os.FileInfo) uint64 {
+ return fi.Sys().(*syscall.Stat_t).Ino
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return fi.Sys().(*syscall.Stat_t).Nlink > 1
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go
new file mode 100644
index 000000000..5ad3d7e38
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go
@@ -0,0 +1,30 @@
+package archive
+
+import (
+ "os"
+
+ "github.com/containers/storage/pkg/system"
+)
+
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
+
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.Mtim() != newStat.Mtim() ||
+ oldStat.Mode() != newStat.Mode() ||
+ oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode().IsDir()
+}
+
+func getIno(fi os.FileInfo) (inode uint64) {
+ return
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return false
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go
new file mode 100644
index 000000000..ea012b2d9
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/copy.go
@@ -0,0 +1,461 @@
+package archive
+
+import (
+ "archive/tar"
+ "errors"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/storage/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// Errors used or returned by this file.
+var (
+ ErrNotDirectory = errors.New("not a directory")
+ ErrDirNotExists = errors.New("no such directory")
+ ErrCannotCopyDir = errors.New("cannot copy directory")
+ ErrInvalidCopySource = errors.New("invalid copy source content")
+)
+
+// PreserveTrailingDotOrSeparator returns the given cleaned path (after
+// processing using any utility functions from the path or filepath stdlib
+// packages) and appends a trailing `/.` or `/` if its corresponding original
+// path (from before being processed by utility functions from the path or
+// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
+// path already ends in a `.` path segment, then another is not added. If the
+// clean path already ends in a path separator, then another is not added.
+func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
+ // Ensure paths are in platform semantics
+ cleanedPath = normalizePath(cleanedPath)
+ originalPath = normalizePath(originalPath)
+
+ if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
+ if !hasTrailingPathSeparator(cleanedPath) {
+ // Add a separator if it doesn't already end with one (a cleaned
+ // path would only end in a separator if it is the root).
+ cleanedPath += string(filepath.Separator)
+ }
+ cleanedPath += "."
+ }
+
+ if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
+ cleanedPath += string(filepath.Separator)
+ }
+
+ return cleanedPath
+}
+
+// assertsDirectory returns whether the given path is
+// asserted to be a directory, i.e., the path ends with
+// a trailing '/' or `/.`, assuming a path separator of `/`.
+func assertsDirectory(path string) bool {
+ return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
+}
+
+// hasTrailingPathSeparator returns whether the given
+// path ends with the system's path separator character.
+func hasTrailingPathSeparator(path string) bool {
+ return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
+}
+
+// specifiesCurrentDir returns whether the given path specifies
+// a "current directory", i.e., the last path segment is `.`.
+func specifiesCurrentDir(path string) bool {
+ return filepath.Base(path) == "."
+}
+
+// SplitPathDirEntry splits the given path between its directory name and its
+// basename by first cleaning the path but preserves a trailing "." if the
+// original path specified the current directory.
+func SplitPathDirEntry(path string) (dir, base string) {
+ cleanedPath := filepath.Clean(normalizePath(path))
+
+ if specifiesCurrentDir(path) {
+ cleanedPath += string(filepath.Separator) + "."
+ }
+
+ return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
+}
+
+// TarResource archives the resource described by the given CopyInfo to a Tar
+// archive. A non-nil error is returned if sourcePath does not exist or is
+// asserted to be a directory but exists as another type of file.
+//
+// This function acts as a convenient wrapper around TarWithOptions, which
+// requires a directory as the source path. TarResource accepts either a
+// directory or a file path and correctly sets the Tar options.
+func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
+ return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
+}
+
+// TarResourceRebase is like TarResource but renames the first path element of
+// items in the resulting tar archive to match the given rebaseName if not "".
+func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
+ sourcePath = normalizePath(sourcePath)
+ if _, err = os.Lstat(sourcePath); err != nil {
+ // Catches the case where the source does not exist or is not a
+ // directory if asserted to be a directory, as this also causes an
+ // error.
+ return
+ }
+
+ // Separate the source path between its directory and
+ // the entry in that directory which we are archiving.
+ sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
+
+ filter := []string{sourceBase}
+
+ logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
+
+ return TarWithOptions(sourceDir, &TarOptions{
+ Compression: Uncompressed,
+ IncludeFiles: filter,
+ IncludeSourceDir: true,
+ RebaseNames: map[string]string{
+ sourceBase: rebaseName,
+ },
+ })
+}
+
+// CopyInfo holds basic info about the source
+// or destination path of a copy operation.
+type CopyInfo struct {
+ Path string
+ Exists bool
+ IsDir bool
+ RebaseName string
+}
+
+// CopyInfoSourcePath stats the given path to create a CopyInfo
+// struct representing that resource for the source of an archive copy
+// operation. The given path should be an absolute local path. A source path
+// has all symlinks evaluated that appear before the last path separator ("/"
+// on Unix). As it is to be a copy source, the path must exist.
+func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
+ // normalize the file path and then evaluate the symbol link
+ // we will use the target file instead of the symbol link if
+ // followLink is set
+ path = normalizePath(path)
+
+ resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ stat, err := os.Lstat(resolvedPath)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ return CopyInfo{
+ Path: resolvedPath,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ RebaseName: rebaseName,
+ }, nil
+}
+
+// CopyInfoDestinationPath stats the given path to create a CopyInfo
+// struct representing that resource for the destination of an archive copy
+// operation. The given path should be an absolute local path.
+func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
+ maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
+ path = normalizePath(path)
+ originalPath := path
+
+ stat, err := os.Lstat(path)
+
+ if err == nil && stat.Mode()&os.ModeSymlink == 0 {
+ // The path exists and is not a symlink.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+ }
+
+ // While the path is a symlink.
+ for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
+ if n > maxSymlinkIter {
+ // Don't follow symlinks more than this arbitrary number of times.
+ return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
+ }
+
+ // The path is a symbolic link. We need to evaluate it so that the
+ // destination of the copy operation is the link target and not the
+ // link itself. This is notably different than CopyInfoSourcePath which
+ // only evaluates symlinks before the last appearing path separator.
+ // Also note that it is okay if the last path element is a broken
+ // symlink as the copy operation should create the target.
+ var linkTarget string
+
+ linkTarget, err = os.Readlink(path)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ if !system.IsAbs(linkTarget) {
+ // Join with the parent directory.
+ dstParent, _ := SplitPathDirEntry(path)
+ linkTarget = filepath.Join(dstParent, linkTarget)
+ }
+
+ path = linkTarget
+ stat, err = os.Lstat(path)
+ }
+
+ if err != nil {
+ // It's okay if the destination path doesn't exist. We can still
+ // continue the copy operation if the parent directory exists.
+ if !os.IsNotExist(err) {
+ return CopyInfo{}, err
+ }
+
+ // Ensure destination parent dir exists.
+ dstParent, _ := SplitPathDirEntry(path)
+
+ parentDirStat, err := os.Lstat(dstParent)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+ if !parentDirStat.IsDir() {
+ return CopyInfo{}, ErrNotDirectory
+ }
+
+ return CopyInfo{Path: path}, nil
+ }
+
+ // The path exists after resolving symlinks.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+}
+
+// PrepareArchiveCopy prepares the given srcContent archive, which should
+// contain the archived resource described by srcInfo, to the destination
+// described by dstInfo. Returns the possibly modified content archive along
+// with the path to the destination directory which it should be extracted to.
+func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
+ // Ensure in platform semantics
+ srcInfo.Path = normalizePath(srcInfo.Path)
+ dstInfo.Path = normalizePath(dstInfo.Path)
+
+ // Separate the destination path between its directory and base
+ // components in case the source archive contents need to be rebased.
+ dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
+ _, srcBase := SplitPathDirEntry(srcInfo.Path)
+
+ switch {
+ case dstInfo.Exists && dstInfo.IsDir:
+ // The destination exists as a directory. No alteration
+ // to srcContent is needed as its contents can be
+ // simply extracted to the destination directory.
+ return dstInfo.Path, ioutil.NopCloser(srcContent), nil
+ case dstInfo.Exists && srcInfo.IsDir:
+ // The destination exists as some type of file and the source
+ // content is a directory. This is an error condition since
+ // you cannot copy a directory to an existing file location.
+ return "", nil, ErrCannotCopyDir
+ case dstInfo.Exists:
+ // The destination exists as some type of file and the source content
+ // is also a file. The source content entry will have to be renamed to
+ // have a basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case srcInfo.IsDir:
+ // The destination does not exist and the source content is an archive
+ // of a directory. The archive should be extracted to the parent of
+ // the destination path instead, and when it is, the directory that is
+ // created as a result should take the name of the destination path.
+ // The source content entries will have to be renamed to have a
+ // basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case assertsDirectory(dstInfo.Path):
+ // The destination does not exist and is asserted to be created as a
+ // directory, but the source content is not a directory. This is an
+ // error condition since you cannot create a directory from a file
+ // source.
+ return "", nil, ErrDirNotExists
+ default:
+ // The last remaining case is when the destination does not exist, is
+ // not asserted to be a directory, and the source content is not an
+ // archive of a directory. It this case, the destination file will need
+ // to be created when the archive is extracted and the source content
+ // entry will have to be renamed to have a basename which matches the
+ // destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ }
+
+}
+
+// RebaseArchiveEntries rewrites the given srcContent archive replacing
+// an occurrence of oldBase with newBase at the beginning of entry names.
+func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
+ if oldBase == string(os.PathSeparator) {
+ // If oldBase specifies the root directory, use an empty string as
+ // oldBase instead so that newBase doesn't replace the path separator
+ // that all paths will start with.
+ oldBase = ""
+ }
+
+ rebased, w := io.Pipe()
+
+ go func() {
+ srcTar := tar.NewReader(srcContent)
+ rebasedTar := tar.NewWriter(w)
+
+ for {
+ hdr, err := srcTar.Next()
+ if err == io.EOF {
+ // Signals end of archive.
+ rebasedTar.Close()
+ w.Close()
+ return
+ }
+ if err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
+ if hdr.Typeflag == tar.TypeLink {
+ hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
+ }
+
+ if err = rebasedTar.WriteHeader(hdr); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ if _, err = io.Copy(rebasedTar, srcTar); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+ }
+ }()
+
+ return rebased
+}
+
+// CopyResource performs an archive copy from the given source path to the
+// given destination path. The source path MUST exist and the destination
+// path's parent directory must exist.
+func CopyResource(srcPath, dstPath string, followLink bool) error {
+ var (
+ srcInfo CopyInfo
+ err error
+ )
+
+ // Ensure in platform semantics
+ srcPath = normalizePath(srcPath)
+ dstPath = normalizePath(dstPath)
+
+ // Clean the source and destination paths.
+ srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
+ dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
+
+ if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
+ return err
+ }
+
+ content, err := TarResource(srcInfo)
+ if err != nil {
+ return err
+ }
+ defer content.Close()
+
+ return CopyTo(content, srcInfo, dstPath)
+}
+
+// CopyTo handles extracting the given content whose
+// entries should be sourced from srcInfo to dstPath.
+func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
+ // The destination path need not exist, but CopyInfoDestinationPath will
+ // ensure that at least the parent directory exists.
+ dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
+ if err != nil {
+ return err
+ }
+
+ dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
+ if err != nil {
+ return err
+ }
+ defer copyArchive.Close()
+
+ options := &TarOptions{
+ NoLchown: true,
+ NoOverwriteDirNonDir: true,
+ }
+
+ return Untar(copyArchive, dstDir, options)
+}
+
+// ResolveHostSourcePath decides real path need to be copied with parameters such as
+// whether to follow symbol link or not, if followLink is true, resolvedPath will return
+// link target of any symbol link file, else it will only resolve symlink of directory
+// but return symbol link file itself without resolving.
+func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
+ if followLink {
+ resolvedPath, err = filepath.EvalSymlinks(path)
+ if err != nil {
+ return
+ }
+
+ resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
+ } else {
+ dirPath, basePath := filepath.Split(path)
+
+ // if not follow symbol link, then resolve symbol link of parent dir
+ var resolvedDirPath string
+ resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
+ if err != nil {
+ return
+ }
+ // resolvedDirPath will have been cleaned (no trailing path separators) so
+ // we can manually join it with the base path element.
+ resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
+ if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
+ rebaseName = filepath.Base(path)
+ }
+ }
+ return resolvedPath, rebaseName, nil
+}
+
+// GetRebaseName normalizes and compares path and resolvedPath,
+// return completed resolved path and rebased file name
+func GetRebaseName(path, resolvedPath string) (string, string) {
+ // linkTarget will have been cleaned (no trailing path separators and dot) so
+ // we can manually join it with them
+ var rebaseName string
+ if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) {
+ resolvedPath += string(filepath.Separator) + "."
+ }
+
+ if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) {
+ resolvedPath += string(filepath.Separator)
+ }
+
+ if filepath.Base(path) != filepath.Base(resolvedPath) {
+ // In the case where the path had a trailing separator and a symlink
+ // evaluation has changed the last path component, we will need to
+ // rebase the name in the archive that is being copied to match the
+ // originally requested name.
+ rebaseName = filepath.Base(path)
+ }
+ return resolvedPath, rebaseName
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go
new file mode 100644
index 000000000..e305b5e4a
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package archive
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.ToSlash(path)
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_windows.go b/vendor/github.com/containers/storage/pkg/archive/copy_windows.go
new file mode 100644
index 000000000..2b775b45c
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/copy_windows.go
@@ -0,0 +1,9 @@
+package archive
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.FromSlash(path)
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go
new file mode 100644
index 000000000..f93f4cb17
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/diff.go
@@ -0,0 +1,256 @@
+package archive
+
+import (
+ "archive/tar"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/pools"
+ "github.com/containers/storage/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
+ tr := tar.NewReader(layer)
+ trBuf := pools.BufioReader32KPool.Get(tr)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+ unpackedPaths := make(map[string]struct{})
+
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+ idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
+
+ aufsTempdir := ""
+ aufsHardlinks := make(map[string]*tar.Header)
+
+ // Iterate through the files in the archive.
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return 0, err
+ }
+
+ size += hdr.Size
+
+ // Normalize name, for safety and for a simple is-root check
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ // Windows does not support filenames with colons in them. Ignore
+ // these files. This is not a problem though (although it might
+ // appear that it is). Let's suppose a client is running docker pull.
+ // The daemon it points to is Windows. Would it make sense for the
+ // client to be doing a docker pull Ubuntu for example (which has files
+ // with colons in the name under /usr/share/man/man3)? No, absolutely
+ // not as it would really only make sense that they were pulling a
+ // Windows image. However, for development, it is necessary to be able
+ // to pull Linux images which are in the repository.
+ //
+ // TODO Windows. Once the registry is aware of what images are Windows-
+ // specific or Linux-specific, this warning should be changed to an error
+ // to cater for the situation where someone does manage to upload a Linux
+ // image but have it tagged as Windows inadvertently.
+ if runtime.GOOS == "windows" {
+ if strings.Contains(hdr.Name, ":") {
+ logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
+ continue
+ }
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists.
+ // This happened in some tests where an image had a tarfile without any
+ // parent directories.
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = system.MkdirAll(parentPath, 0600, "")
+ if err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ // Skip AUFS metadata dirs
+ if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
+ // Regular files inside /.wh..wh.plnk can be used as hardlink targets
+ // We don't want this directory, but we need the files in them so that
+ // such hardlinks can be resolved.
+ if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
+ basename := filepath.Base(hdr.Name)
+ aufsHardlinks[basename] = hdr
+ if aufsTempdir == "" {
+ if aufsTempdir, err = ioutil.TempDir("", "storageplnk"); err != nil {
+ return 0, err
+ }
+ defer os.RemoveAll(aufsTempdir)
+ }
+ if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
+ return 0, err
+ }
+ }
+
+ if hdr.Name != WhiteoutOpaqueDir {
+ continue
+ }
+ }
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return 0, err
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+ base := filepath.Base(path)
+
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ dir := filepath.Dir(path)
+ if base == WhiteoutOpaqueDir {
+ _, err := os.Lstat(dir)
+ if err != nil {
+ return 0, err
+ }
+ err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = nil // parent was deleted
+ }
+ return err
+ }
+ if path == dir {
+ return nil
+ }
+ if _, exists := unpackedPaths[path]; !exists {
+ err := os.RemoveAll(path)
+ return err
+ }
+ return nil
+ })
+ if err != nil {
+ return 0, err
+ }
+ } else {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+ if err := os.RemoveAll(originalPath); err != nil {
+ return 0, err
+ }
+ }
+ } else {
+ // If path exits we almost always just want to remove and replace it.
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ trBuf.Reset(tr)
+ srcData := io.Reader(trBuf)
+ srcHdr := hdr
+
+ // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
+ // we manually retarget these into the temporary files we extracted them into
+ if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
+ linkBasename := filepath.Base(hdr.Linkname)
+ srcHdr = aufsHardlinks[linkBasename]
+ if srcHdr == nil {
+ return 0, fmt.Errorf("Invalid aufs hardlink")
+ }
+ tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
+ if err != nil {
+ return 0, err
+ }
+ defer tmpFile.Close()
+ srcData = tmpFile
+ }
+
+ if err := remapIDs(idMappings, srcHdr); err != nil {
+ return 0, err
+ }
+
+ if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil {
+ return 0, err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ unpackedPaths[path] = struct{}{}
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+ if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return 0, err
+ }
+ }
+
+ return size, nil
+}
+
+// ApplyLayer parses a diff in the standard layer format from `layer`,
+// and applies it to the directory `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyLayer(dest string, layer io.Reader) (int64, error) {
+ return applyLayerHandler(dest, layer, &TarOptions{}, true)
+}
+
+// ApplyUncompressedLayer parses a diff in the standard layer format from
+// `layer`, and applies it to the directory `dest`. The stream `layer`
+// can only be uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
+ return applyLayerHandler(dest, layer, options, false)
+}
+
+// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
+func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
+ dest = filepath.Clean(dest)
+
+ // We need to be able to set any perms
+ oldmask, err := system.Umask(0)
+ if err != nil {
+ return 0, err
+ }
+ defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
+
+ if decompress {
+ layer, err = DecompressStream(layer)
+ if err != nil {
+ return 0, err
+ }
+ }
+ return UnpackLayer(dest, layer, options)
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/time_linux.go b/vendor/github.com/containers/storage/pkg/archive/time_linux.go
new file mode 100644
index 000000000..3448569b1
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/time_linux.go
@@ -0,0 +1,16 @@
+package archive
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ if time.IsZero() {
+ // Return UTIME_OMIT special value
+ ts.Sec = 0
+ ts.Nsec = ((1 << 30) - 2)
+ return
+ }
+ return syscall.NsecToTimespec(time.UnixNano())
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go
new file mode 100644
index 000000000..e85aac054
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go
@@ -0,0 +1,16 @@
+// +build !linux
+
+package archive
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ nsec := int64(0)
+ if !time.IsZero() {
+ nsec = time.UnixNano()
+ }
+ return syscall.NsecToTimespec(nsec)
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/whiteouts.go b/vendor/github.com/containers/storage/pkg/archive/whiteouts.go
new file mode 100644
index 000000000..d20478a10
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/whiteouts.go
@@ -0,0 +1,23 @@
+package archive
+
+// Whiteouts are files with a special meaning for the layered filesystem.
+// Docker uses AUFS whiteout files inside exported archives. In other
+// filesystems these files are generated/handled on tar creation/extraction.
+
+// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
+// filename this means that file has been removed from the base layer.
+const WhiteoutPrefix = ".wh."
+
+// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
+// for removing an actual file. Normally these files are excluded from exported
+// archives.
+const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
+
+// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
+// layers. Normally these should not go into exported archives and all changed
+// hardlinks should be copied to the top layer.
+const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
+
+// WhiteoutOpaqueDir file means directory has been made opaque - meaning
+// readdir calls to this directory do not follow to lower layers.
+const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
diff --git a/vendor/github.com/containers/storage/pkg/archive/wrap.go b/vendor/github.com/containers/storage/pkg/archive/wrap.go
new file mode 100644
index 000000000..b39d12c87
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/wrap.go
@@ -0,0 +1,59 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "io"
+)
+
+// Generate generates a new archive from the content provided
+// as input.
+//
+// `files` is a sequence of path/content pairs. A new file is
+// added to the archive for each pair.
+// If the last pair is incomplete, the file is created with an
+// empty content. For example:
+//
+// Generate("foo.txt", "hello world", "emptyfile")
+//
+// The above call will return an archive with 2 files:
+// * ./foo.txt with content "hello world"
+// * ./empty with empty content
+//
+// FIXME: stream content instead of buffering
+// FIXME: specify permissions and other archive metadata
+func Generate(input ...string) (io.Reader, error) {
+ files := parseStringPairs(input...)
+ buf := new(bytes.Buffer)
+ tw := tar.NewWriter(buf)
+ for _, file := range files {
+ name, content := file[0], file[1]
+ hdr := &tar.Header{
+ Name: name,
+ Size: int64(len(content)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return nil, err
+ }
+ if _, err := tw.Write([]byte(content)); err != nil {
+ return nil, err
+ }
+ }
+ if err := tw.Close(); err != nil {
+ return nil, err
+ }
+ return buf, nil
+}
+
+func parseStringPairs(input ...string) (output [][2]string) {
+ output = make([][2]string, 0, len(input)/2+1)
+ for i := 0; i < len(input); i += 2 {
+ var pair [2]string
+ pair[0] = input[i]
+ if i+1 < len(input) {
+ pair[1] = input[i+1]
+ }
+ output = append(output, pair)
+ }
+ return
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
new file mode 100644
index 000000000..2735f1400
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
@@ -0,0 +1,70 @@
+package chrootarchive
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/idtools"
+)
+
+// NewArchiver returns a new Archiver which uses chrootarchive.Untar
+func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver {
+ if idMappings == nil {
+ idMappings = &idtools.IDMappings{}
+ }
+ return &archive.Archiver{Untar: Untar, IDMappings: idMappings}
+}
+
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive may be compressed with one of the following algorithms:
+// identity (uncompressed), gzip, bzip2, xz.
+func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
+ return untarHandler(tarArchive, dest, options, true)
+}
+
+// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive must be an uncompressed stream.
+func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
+ return untarHandler(tarArchive, dest, options, false)
+}
+
+// Handler for teasing out the automatic decompression
+func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error {
+ if tarArchive == nil {
+ return fmt.Errorf("Empty archive")
+ }
+ if options == nil {
+ options = &archive.TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+
+ idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
+ rootIDs := idMappings.RootPair()
+
+ dest = filepath.Clean(dest)
+ if _, err := os.Stat(dest); os.IsNotExist(err) {
+ if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil {
+ return err
+ }
+ }
+
+ r := ioutil.NopCloser(tarArchive)
+ if decompress {
+ decompressedArchive, err := archive.DecompressStream(tarArchive)
+ if err != nil {
+ return err
+ }
+ defer decompressedArchive.Close()
+ r = decompressedArchive
+ }
+
+ return invokeUnpack(r, dest, options)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
new file mode 100644
index 000000000..e04ed787c
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
@@ -0,0 +1,86 @@
+// +build !windows
+
+package chrootarchive
+
+import (
+ "bytes"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "runtime"
+
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/reexec"
+)
+
+// untar is the entry-point for storage-untar on re-exec. This is not used on
+// Windows as it does not support chroot, hence no point sandboxing through
+// chroot and rexec.
+func untar() {
+ runtime.LockOSThread()
+ flag.Parse()
+
+ var options *archive.TarOptions
+
+ //read the options from the pipe "ExtraFiles"
+ if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
+ fatal(err)
+ }
+
+ if err := chroot(flag.Arg(0)); err != nil {
+ fatal(err)
+ }
+
+ if err := archive.Unpack(os.Stdin, "/", options); err != nil {
+ fatal(err)
+ }
+ // fully consume stdin in case it is zero padded
+ if _, err := flush(os.Stdin); err != nil {
+ fatal(err)
+ }
+
+ os.Exit(0)
+}
+
+func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error {
+
+ // We can't pass a potentially large exclude list directly via cmd line
+ // because we easily overrun the kernel's max argument/environment size
+ // when the full image list is passed (e.g. when this is used by
+ // `docker load`). We will marshall the options via a pipe to the
+ // child
+ r, w, err := os.Pipe()
+ if err != nil {
+ return fmt.Errorf("Untar pipe failure: %v", err)
+ }
+
+ cmd := reexec.Command("storage-untar", dest)
+ cmd.Stdin = decompressedArchive
+
+ cmd.ExtraFiles = append(cmd.ExtraFiles, r)
+ output := bytes.NewBuffer(nil)
+ cmd.Stdout = output
+ cmd.Stderr = output
+
+ if err := cmd.Start(); err != nil {
+ return fmt.Errorf("Untar error on re-exec cmd: %v", err)
+ }
+ //write the options to the pipe for the untar exec to read
+ if err := json.NewEncoder(w).Encode(options); err != nil {
+ return fmt.Errorf("Untar json encode to pipe failed: %v", err)
+ }
+ w.Close()
+
+ if err := cmd.Wait(); err != nil {
+ // when `xz -d -c -q | storage-untar ...` failed on storage-untar side,
+ // we need to exhaust `xz`'s output, otherwise the `xz` side will be
+ // pending on write pipe forever
+ io.Copy(ioutil.Discard, decompressedArchive)
+
+ return fmt.Errorf("Error processing tar file(%v): %s", err, output)
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
new file mode 100644
index 000000000..93fde4220
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
@@ -0,0 +1,22 @@
+package chrootarchive
+
+import (
+ "io"
+
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/longpath"
+)
+
+// chroot is not supported by Windows
+func chroot(path string) error {
+ return nil
+}
+
+func invokeUnpack(decompressedArchive io.ReadCloser,
+ dest string,
+ options *archive.TarOptions) error {
+ // Windows is different to Linux here because Windows does not support
+ // chroot. Hence there is no point sandboxing a chrooted process to
+ // do the unpack. We call inline instead within the daemon process.
+ return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
new file mode 100644
index 000000000..e8bd22e36
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
@@ -0,0 +1,108 @@
+package chrootarchive
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/storage/pkg/mount"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
+ "golang.org/x/sys/unix"
+)
+
+// chroot on linux uses pivot_root instead of chroot
+// pivot_root takes a new root and an old root.
+// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root.
+// New root is where the new rootfs is set to.
+// Old root is removed after the call to pivot_root so it is no longer available under the new root.
+// This is similar to how libcontainer sets up a container's rootfs
+func chroot(path string) (err error) {
+ // if the engine is running in a user namespace we need to use actual chroot
+ if rsystem.RunningInUserNS() {
+ return realChroot(path)
+ }
+ if err := unix.Unshare(unix.CLONE_NEWNS); err != nil {
+ return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
+ }
+
+ // make everything in new ns private
+ if err := mount.MakeRPrivate("/"); err != nil {
+ return err
+ }
+
+ if mounted, _ := mount.Mounted(path); !mounted {
+ if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil {
+ return realChroot(path)
+ }
+ }
+
+ // setup oldRoot for pivot_root
+ pivotDir, err := ioutil.TempDir(path, ".pivot_root")
+ if err != nil {
+ return fmt.Errorf("Error setting up pivot dir: %v", err)
+ }
+
+ var mounted bool
+ defer func() {
+ if mounted {
+ // make sure pivotDir is not mounted before we try to remove it
+ if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil {
+ if err == nil {
+ err = errCleanup
+ }
+ return
+ }
+ }
+
+ errCleanup := os.Remove(pivotDir)
+ // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful
+ // because we already cleaned it up on failed pivot_root
+ if errCleanup != nil && !os.IsNotExist(errCleanup) {
+ errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup)
+ if err == nil {
+ err = errCleanup
+ }
+ }
+ }()
+
+ if err := unix.PivotRoot(path, pivotDir); err != nil {
+ // If pivot fails, fall back to the normal chroot after cleaning up temp dir
+ if err := os.Remove(pivotDir); err != nil {
+ return fmt.Errorf("Error cleaning up after failed pivot: %v", err)
+ }
+ return realChroot(path)
+ }
+ mounted = true
+
+ // This is the new path for where the old root (prior to the pivot) has been moved to
+ // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction
+ pivotDir = filepath.Join("/", filepath.Base(pivotDir))
+
+ if err := unix.Chdir("/"); err != nil {
+ return fmt.Errorf("Error changing to new root: %v", err)
+ }
+
+ // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host
+ if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil {
+ return fmt.Errorf("Error making old root private after pivot: %v", err)
+ }
+
+ // Now unmount the old root so it's no longer visible from the new root
+ if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil {
+ return fmt.Errorf("Error while unmounting old root after pivot: %v", err)
+ }
+ mounted = false
+
+ return nil
+}
+
+func realChroot(path string) error {
+ if err := unix.Chroot(path); err != nil {
+ return fmt.Errorf("Error after fallback to chroot: %v", err)
+ }
+ if err := unix.Chdir("/"); err != nil {
+ return fmt.Errorf("Error changing to new root after chroot: %v", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
new file mode 100644
index 000000000..f9b5dece8
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
@@ -0,0 +1,12 @@
+// +build !windows,!linux
+
+package chrootarchive
+
+import "golang.org/x/sys/unix"
+
+func chroot(path string) error {
+ if err := unix.Chroot(path); err != nil {
+ return err
+ }
+ return unix.Chdir("/")
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go
new file mode 100644
index 000000000..68b8f74f7
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go
@@ -0,0 +1,23 @@
+package chrootarchive
+
+import (
+ "io"
+
+ "github.com/containers/storage/pkg/archive"
+)
+
+// ApplyLayer parses a diff in the standard layer format from `layer`,
+// and applies it to the directory `dest`. The stream `layer` can only be
+// uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyLayer(dest string, layer io.Reader) (size int64, err error) {
+ return applyLayerHandler(dest, layer, &archive.TarOptions{}, true)
+}
+
+// ApplyUncompressedLayer parses a diff in the standard layer format from
+// `layer`, and applies it to the directory `dest`. The stream `layer`
+// can only be uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) {
+ return applyLayerHandler(dest, layer, options, false)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
new file mode 100644
index 000000000..4369f30c9
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
@@ -0,0 +1,130 @@
+//+build !windows
+
+package chrootarchive
+
+import (
+ "bytes"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/reexec"
+ "github.com/containers/storage/pkg/system"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
+)
+
+type applyLayerResponse struct {
+ LayerSize int64 `json:"layerSize"`
+}
+
+// applyLayer is the entry-point for storage-applylayer on re-exec. This is not
+// used on Windows as it does not support chroot, hence no point sandboxing
+// through chroot and rexec.
+func applyLayer() {
+
+ var (
+ tmpDir string
+ err error
+ options *archive.TarOptions
+ )
+ runtime.LockOSThread()
+ flag.Parse()
+
+ inUserns := rsystem.RunningInUserNS()
+ if err := chroot(flag.Arg(0)); err != nil {
+ fatal(err)
+ }
+
+ // We need to be able to set any perms
+ oldmask, err := system.Umask(0)
+ defer system.Umask(oldmask)
+ if err != nil {
+ fatal(err)
+ }
+
+ if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil {
+ fatal(err)
+ }
+
+ if inUserns {
+ options.InUserNS = true
+ }
+
+ if tmpDir, err = ioutil.TempDir("/", "temp-storage-extract"); err != nil {
+ fatal(err)
+ }
+
+ os.Setenv("TMPDIR", tmpDir)
+ size, err := archive.UnpackLayer("/", os.Stdin, options)
+ os.RemoveAll(tmpDir)
+ if err != nil {
+ fatal(err)
+ }
+
+ encoder := json.NewEncoder(os.Stdout)
+ if err := encoder.Encode(applyLayerResponse{size}); err != nil {
+ fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err))
+ }
+
+ if _, err := flush(os.Stdin); err != nil {
+ fatal(err)
+ }
+
+ os.Exit(0)
+}
+
+// applyLayerHandler parses a diff in the standard layer format from `layer`, and
+// applies it to the directory `dest`. Returns the size in bytes of the
+// contents of the layer.
+func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
+ dest = filepath.Clean(dest)
+ if decompress {
+ decompressed, err := archive.DecompressStream(layer)
+ if err != nil {
+ return 0, err
+ }
+ defer decompressed.Close()
+
+ layer = decompressed
+ }
+ if options == nil {
+ options = &archive.TarOptions{}
+ if rsystem.RunningInUserNS() {
+ options.InUserNS = true
+ }
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+
+ data, err := json.Marshal(options)
+ if err != nil {
+ return 0, fmt.Errorf("ApplyLayer json encode: %v", err)
+ }
+
+ cmd := reexec.Command("storage-applyLayer", dest)
+ cmd.Stdin = layer
+ cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data))
+
+ outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer)
+ cmd.Stdout, cmd.Stderr = outBuf, errBuf
+
+ if err = cmd.Run(); err != nil {
+ return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf)
+ }
+
+ // Stdout should be a valid JSON struct representing an applyLayerResponse.
+ response := applyLayerResponse{}
+ decoder := json.NewDecoder(outBuf)
+ if err = decoder.Decode(&response); err != nil {
+ return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err)
+ }
+
+ return response.LayerSize, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go
new file mode 100644
index 000000000..8f8e88bfb
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go
@@ -0,0 +1,45 @@
+package chrootarchive
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/longpath"
+)
+
+// applyLayerHandler parses a diff in the standard layer format from `layer`, and
+// applies it to the directory `dest`. Returns the size in bytes of the
+// contents of the layer.
+func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
+ dest = filepath.Clean(dest)
+
+ // Ensure it is a Windows-style volume path
+ dest = longpath.AddPrefix(dest)
+
+ if decompress {
+ decompressed, err := archive.DecompressStream(layer)
+ if err != nil {
+ return 0, err
+ }
+ defer decompressed.Close()
+
+ layer = decompressed
+ }
+
+ tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-storage-extract")
+ if err != nil {
+ return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err)
+ }
+
+ s, err := archive.UnpackLayer(dest, layer, nil)
+ os.RemoveAll(tmpDir)
+ if err != nil {
+ return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err)
+ }
+
+ return s, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
new file mode 100644
index 000000000..21cd87992
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
@@ -0,0 +1,28 @@
+// +build !windows
+
+package chrootarchive
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+
+ "github.com/containers/storage/pkg/reexec"
+)
+
+func init() {
+ reexec.Register("storage-applyLayer", applyLayer)
+ reexec.Register("storage-untar", untar)
+}
+
+func fatal(err error) {
+ fmt.Fprint(os.Stderr, err)
+ os.Exit(1)
+}
+
+// flush consumes all the bytes from the reader discarding
+// any errors
+func flush(r io.Reader) (bytes int64, err error) {
+ return io.Copy(ioutil.Discard, r)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go
new file mode 100644
index 000000000..fa17c9bf8
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go
@@ -0,0 +1,4 @@
+package chrootarchive
+
+func init() {
+}
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go
new file mode 100644
index 000000000..6a0ac2464
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go
@@ -0,0 +1,821 @@
+// +build linux,cgo
+
+package devicemapper
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "unsafe"
+
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+// Same as DM_DEVICE_* enum values from libdevmapper.h
+// nolint: deadcode
+const (
+ deviceCreate TaskType = iota
+ deviceReload
+ deviceRemove
+ deviceRemoveAll
+ deviceSuspend
+ deviceResume
+ deviceInfo
+ deviceDeps
+ deviceRename
+ deviceVersion
+ deviceStatus
+ deviceTable
+ deviceWaitevent
+ deviceList
+ deviceClear
+ deviceMknodes
+ deviceListVersions
+ deviceTargetMsg
+ deviceSetGeometry
+)
+
+const (
+ addNodeOnResume AddNodeType = iota
+ addNodeOnCreate
+)
+
+// List of errors returned when using devicemapper.
+var (
+ ErrTaskRun = errors.New("dm_task_run failed")
+ ErrTaskSetName = errors.New("dm_task_set_name failed")
+ ErrTaskSetMessage = errors.New("dm_task_set_message failed")
+ ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed")
+ ErrTaskSetRo = errors.New("dm_task_set_ro failed")
+ ErrTaskAddTarget = errors.New("dm_task_add_target failed")
+ ErrTaskSetSector = errors.New("dm_task_set_sector failed")
+ ErrTaskGetDeps = errors.New("dm_task_get_deps failed")
+ ErrTaskGetInfo = errors.New("dm_task_get_info failed")
+ ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed")
+ ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed")
+ ErrTaskSetCookie = errors.New("dm_task_set_cookie failed")
+ ErrNilCookie = errors.New("cookie ptr can't be nil")
+ ErrGetBlockSize = errors.New("Can't get block size")
+ ErrUdevWait = errors.New("wait on udev cookie failed")
+ ErrSetDevDir = errors.New("dm_set_dev_dir failed")
+ ErrGetLibraryVersion = errors.New("dm_get_library_version failed")
+ ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove")
+ ErrRunRemoveDevice = errors.New("running RemoveDevice failed")
+ ErrInvalidAddNode = errors.New("Invalid AddNode type")
+ ErrBusy = errors.New("Device is Busy")
+ ErrDeviceIDExists = errors.New("Device Id Exists")
+ ErrEnxio = errors.New("No such device or address")
+)
+
+var (
+ dmSawBusy bool
+ dmSawExist bool
+ dmSawEnxio bool // No Such Device or Address
+)
+
+type (
+ // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl
+ // command to execute.
+ Task struct {
+ unmanaged *cdmTask
+ }
+ // Deps represents dependents (layer) of a device.
+ Deps struct {
+ Count uint32
+ Filler uint32
+ Device []uint64
+ }
+ // Info represents information about a device.
+ Info struct {
+ Exists int
+ Suspended int
+ LiveTable int
+ InactiveTable int
+ OpenCount int32
+ EventNr uint32
+ Major uint32
+ Minor uint32
+ ReadOnly int
+ TargetCount int32
+ DeferredRemove int
+ }
+ // TaskType represents a type of task
+ TaskType int
+ // AddNodeType represents a type of node to be added
+ AddNodeType int
+)
+
+// DeviceIDExists returns whether error conveys the information about device Id already
+// exist or not. This will be true if device creation or snap creation
+// operation fails if device or snap device already exists in pool.
+// Current implementation is little crude as it scans the error string
+// for exact pattern match. Replacing it with more robust implementation
+// is desirable.
+func DeviceIDExists(err error) bool {
+ return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists)
+}
+
+func (t *Task) destroy() {
+ if t != nil {
+ DmTaskDestroy(t.unmanaged)
+ runtime.SetFinalizer(t, nil)
+ }
+}
+
+// TaskCreateNamed is a convenience function for TaskCreate when a name
+// will be set on the task as well
+func TaskCreateNamed(t TaskType, name string) (*Task, error) {
+ task := TaskCreate(t)
+ if task == nil {
+ return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t))
+ }
+ if err := task.setName(name); err != nil {
+ return nil, fmt.Errorf("devicemapper: Can't set task name %s", name)
+ }
+ return task, nil
+}
+
+// TaskCreate initializes a devicemapper task of tasktype
+func TaskCreate(tasktype TaskType) *Task {
+ Ctask := DmTaskCreate(int(tasktype))
+ if Ctask == nil {
+ return nil
+ }
+ task := &Task{unmanaged: Ctask}
+ runtime.SetFinalizer(task, (*Task).destroy)
+ return task
+}
+
+func (t *Task) run() error {
+ if res := DmTaskRun(t.unmanaged); res != 1 {
+ return ErrTaskRun
+ }
+ runtime.KeepAlive(t)
+ return nil
+}
+
+func (t *Task) setName(name string) error {
+ if res := DmTaskSetName(t.unmanaged, name); res != 1 {
+ return ErrTaskSetName
+ }
+ return nil
+}
+
+func (t *Task) setMessage(message string) error {
+ if res := DmTaskSetMessage(t.unmanaged, message); res != 1 {
+ return ErrTaskSetMessage
+ }
+ return nil
+}
+
+func (t *Task) setSector(sector uint64) error {
+ if res := DmTaskSetSector(t.unmanaged, sector); res != 1 {
+ return ErrTaskSetSector
+ }
+ return nil
+}
+
+func (t *Task) setCookie(cookie *uint, flags uint16) error {
+ if cookie == nil {
+ return ErrNilCookie
+ }
+ if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 {
+ return ErrTaskSetCookie
+ }
+ return nil
+}
+
+func (t *Task) setAddNode(addNode AddNodeType) error {
+ if addNode != addNodeOnResume && addNode != addNodeOnCreate {
+ return ErrInvalidAddNode
+ }
+ if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 {
+ return ErrTaskSetAddNode
+ }
+ return nil
+}
+
+func (t *Task) setRo() error {
+ if res := DmTaskSetRo(t.unmanaged); res != 1 {
+ return ErrTaskSetRo
+ }
+ return nil
+}
+
+func (t *Task) addTarget(start, size uint64, ttype, params string) error {
+ if res := DmTaskAddTarget(t.unmanaged, start, size,
+ ttype, params); res != 1 {
+ return ErrTaskAddTarget
+ }
+ return nil
+}
+
+func (t *Task) getDeps() (*Deps, error) {
+ var deps *Deps
+ if deps = DmTaskGetDeps(t.unmanaged); deps == nil {
+ return nil, ErrTaskGetDeps
+ }
+ return deps, nil
+}
+
+func (t *Task) getInfo() (*Info, error) {
+ info := &Info{}
+ if res := DmTaskGetInfo(t.unmanaged, info); res != 1 {
+ return nil, ErrTaskGetInfo
+ }
+ return info, nil
+}
+
+func (t *Task) getInfoWithDeferred() (*Info, error) {
+ info := &Info{}
+ if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 {
+ return nil, ErrTaskGetInfo
+ }
+ return info, nil
+}
+
+func (t *Task) getDriverVersion() (string, error) {
+ res := DmTaskGetDriverVersion(t.unmanaged)
+ if res == "" {
+ return "", ErrTaskGetDriverVersion
+ }
+ return res, nil
+}
+
+func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64,
+ length uint64, targetType string, params string) {
+
+ return DmGetNextTarget(t.unmanaged, next, &start, &length,
+ &targetType, &params),
+ start, length, targetType, params
+}
+
+// UdevWait waits for any processes that are waiting for udev to complete the specified cookie.
+func UdevWait(cookie *uint) error {
+ if res := DmUdevWait(*cookie); res != 1 {
+ logrus.Debugf("devicemapper: Failed to wait on udev cookie %d, %d", *cookie, res)
+ return ErrUdevWait
+ }
+ return nil
+}
+
+// SetDevDir sets the dev folder for the device mapper library (usually /dev).
+func SetDevDir(dir string) error {
+ if res := DmSetDevDir(dir); res != 1 {
+ logrus.Debug("devicemapper: Error dm_set_dev_dir")
+ return ErrSetDevDir
+ }
+ return nil
+}
+
+// GetLibraryVersion returns the device mapper library version.
+func GetLibraryVersion() (string, error) {
+ var version string
+ if res := DmGetLibraryVersion(&version); res != 1 {
+ return "", ErrGetLibraryVersion
+ }
+ return version, nil
+}
+
+// UdevSyncSupported returns whether device-mapper is able to sync with udev
+//
+// This is essential otherwise race conditions can arise where both udev and
+// device-mapper attempt to create and destroy devices.
+func UdevSyncSupported() bool {
+ return DmUdevGetSyncSupport() != 0
+}
+
+// UdevSetSyncSupport allows setting whether the udev sync should be enabled.
+// The return bool indicates the state of whether the sync is enabled.
+func UdevSetSyncSupport(enable bool) bool {
+ if enable {
+ DmUdevSetSyncSupport(1)
+ } else {
+ DmUdevSetSyncSupport(0)
+ }
+
+ return UdevSyncSupported()
+}
+
+// CookieSupported returns whether the version of device-mapper supports the
+// use of cookie's in the tasks.
+// This is largely a lower level call that other functions use.
+func CookieSupported() bool {
+ return DmCookieSupported() != 0
+}
+
+// RemoveDevice is a useful helper for cleaning up a device.
+func RemoveDevice(name string) error {
+ task, err := TaskCreateNamed(deviceRemove, name)
+ if task == nil {
+ return err
+ }
+
+ cookie := new(uint)
+ if err := task.setCookie(cookie, 0); err != nil {
+ return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
+ }
+ defer UdevWait(cookie)
+
+ dmSawBusy = false // reset before the task is run
+ dmSawEnxio = false
+ if err = task.run(); err != nil {
+ if dmSawBusy {
+ return ErrBusy
+ }
+ if dmSawEnxio {
+ return ErrEnxio
+ }
+ return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err)
+ }
+
+ return nil
+}
+
+// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred.
+func RemoveDeviceDeferred(name string) error {
+ logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name)
+ defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name)
+ task, err := TaskCreateNamed(deviceRemove, name)
+ if task == nil {
+ return err
+ }
+
+ if err := DmTaskDeferredRemove(task.unmanaged); err != 1 {
+ return ErrTaskDeferredRemove
+ }
+
+ // set a task cookie and disable library fallback, or else libdevmapper will
+ // disable udev dm rules and delete the symlink under /dev/mapper by itself,
+ // even if the removal is deferred by the kernel.
+ cookie := new(uint)
+ var flags uint16
+ flags = DmUdevDisableLibraryFallback
+ if err := task.setCookie(cookie, flags); err != nil {
+ return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
+ }
+
+ // libdevmapper and udev relies on System V semaphore for synchronization,
+ // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`.
+ // So these two function call must come in pairs, otherwise semaphores will
+ // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem`
+ // will be reached, which will eventually make all following calls to 'task.SetCookie'
+ // fail.
+ // this call will not wait for the deferred removal's final executing, since no
+ // udev event will be generated, and the semaphore's value will not be incremented
+ // by udev, what UdevWait is just cleaning up the semaphore.
+ defer UdevWait(cookie)
+
+ dmSawEnxio = false
+ if err = task.run(); err != nil {
+ if dmSawEnxio {
+ return ErrEnxio
+ }
+ return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err)
+ }
+
+ return nil
+}
+
+// CancelDeferredRemove cancels a deferred remove for a device.
+func CancelDeferredRemove(deviceName string) error {
+ task, err := TaskCreateNamed(deviceTargetMsg, deviceName)
+ if task == nil {
+ return err
+ }
+
+ if err := task.setSector(0); err != nil {
+ return fmt.Errorf("devicemapper: Can't set sector %s", err)
+ }
+
+ if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil {
+ return fmt.Errorf("devicemapper: Can't set message %s", err)
+ }
+
+ dmSawBusy = false
+ dmSawEnxio = false
+ if err := task.run(); err != nil {
+ // A device might be being deleted already
+ if dmSawBusy {
+ return ErrBusy
+ } else if dmSawEnxio {
+ return ErrEnxio
+ }
+ return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err)
+
+ }
+ return nil
+}
+
+// GetBlockDeviceSize returns the size of a block device identified by the specified file.
+func GetBlockDeviceSize(file *os.File) (uint64, error) {
+ size, err := ioctlBlkGetSize64(file.Fd())
+ if err != nil {
+ logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err)
+ return 0, ErrGetBlockSize
+ }
+ return uint64(size), nil
+}
+
+// BlockDeviceDiscard runs discard for the given path.
+// This is used as a workaround for the kernel not discarding block so
+// on the thin pool when we remove a thinp device, so we do it
+// manually
+func BlockDeviceDiscard(path string) error {
+ file, err := os.OpenFile(path, os.O_RDWR, 0)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ size, err := GetBlockDeviceSize(file)
+ if err != nil {
+ return err
+ }
+
+ if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil {
+ return err
+ }
+
+ // Without this sometimes the remove of the device that happens after
+ // discard fails with EBUSY.
+ unix.Sync()
+
+ return nil
+}
+
+// CreatePool is the programmatic example of "dmsetup create".
+// It creates a device with the specified poolName, data and metadata file and block size.
+func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
+ task, err := TaskCreateNamed(deviceCreate, poolName)
+ if task == nil {
+ return err
+ }
+
+ size, err := GetBlockDeviceSize(dataFile)
+ if err != nil {
+ return fmt.Errorf("devicemapper: Can't get data size %s", err)
+ }
+
+ params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize)
+ if err := task.addTarget(0, size/512, "thin-pool", params); err != nil {
+ return fmt.Errorf("devicemapper: Can't add target %s", err)
+ }
+
+ cookie := new(uint)
+ var flags uint16
+ flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag
+ if err := task.setCookie(cookie, flags); err != nil {
+ return fmt.Errorf("devicemapper: Can't set cookie %s", err)
+ }
+ defer UdevWait(cookie)
+
+ if err := task.run(); err != nil {
+ return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err)
+ }
+
+ return nil
+}
+
+// ReloadPool is the programmatic example of "dmsetup reload".
+// It reloads the table with the specified poolName, data and metadata file and block size.
+func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
+ task, err := TaskCreateNamed(deviceReload, poolName)
+ if task == nil {
+ return err
+ }
+
+ size, err := GetBlockDeviceSize(dataFile)
+ if err != nil {
+ return fmt.Errorf("devicemapper: Can't get data size %s", err)
+ }
+
+ params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize)
+ if err := task.addTarget(0, size/512, "thin-pool", params); err != nil {
+ return fmt.Errorf("devicemapper: Can't add target %s", err)
+ }
+
+ if err := task.run(); err != nil {
+ return fmt.Errorf("devicemapper: Error running ReloadPool %s", err)
+ }
+
+ return nil
+}
+
+// GetDeps is the programmatic example of "dmsetup deps".
+// It outputs a list of devices referenced by the live table for the specified device.
+func GetDeps(name string) (*Deps, error) {
+ task, err := TaskCreateNamed(deviceDeps, name)
+ if task == nil {
+ return nil, err
+ }
+ if err := task.run(); err != nil {
+ return nil, err
+ }
+ return task.getDeps()
+}
+
+// GetInfo is the programmatic example of "dmsetup info".
+// It outputs some brief information about the device.
+func GetInfo(name string) (*Info, error) {
+ task, err := TaskCreateNamed(deviceInfo, name)
+ if task == nil {
+ return nil, err
+ }
+ if err := task.run(); err != nil {
+ return nil, err
+ }
+ return task.getInfo()
+}
+
+// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred.
+// It outputs some brief information about the device.
+func GetInfoWithDeferred(name string) (*Info, error) {
+ task, err := TaskCreateNamed(deviceInfo, name)
+ if task == nil {
+ return nil, err
+ }
+ if err := task.run(); err != nil {
+ return nil, err
+ }
+ return task.getInfoWithDeferred()
+}
+
+// GetDriverVersion is the programmatic example of "dmsetup version".
+// It outputs version information of the driver.
+func GetDriverVersion() (string, error) {
+ task := TaskCreate(deviceVersion)
+ if task == nil {
+ return "", fmt.Errorf("devicemapper: Can't create deviceVersion task")
+ }
+ if err := task.run(); err != nil {
+ return "", err
+ }
+ return task.getDriverVersion()
+}
+
+// GetStatus is the programmatic example of "dmsetup status".
+// It outputs status information for the specified device name.
+func GetStatus(name string) (uint64, uint64, string, string, error) {
+ task, err := TaskCreateNamed(deviceStatus, name)
+ if task == nil {
+ logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err)
+ return 0, 0, "", "", err
+ }
+ if err := task.run(); err != nil {
+ logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err)
+ return 0, 0, "", "", err
+ }
+
+ devinfo, err := task.getInfo()
+ if err != nil {
+ logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err)
+ return 0, 0, "", "", err
+ }
+ if devinfo.Exists == 0 {
+ logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name)
+ return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name)
+ }
+
+ _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil))
+ return start, length, targetType, params, nil
+}
+
+// GetTable is the programmatic example for "dmsetup table".
+// It outputs the current table for the specified device name.
+func GetTable(name string) (uint64, uint64, string, string, error) {
+ task, err := TaskCreateNamed(deviceTable, name)
+ if task == nil {
+ logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err)
+ return 0, 0, "", "", err
+ }
+ if err := task.run(); err != nil {
+ logrus.Debugf("devicemapper: GetTable() Error Run: %s", err)
+ return 0, 0, "", "", err
+ }
+
+ devinfo, err := task.getInfo()
+ if err != nil {
+ logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err)
+ return 0, 0, "", "", err
+ }
+ if devinfo.Exists == 0 {
+ logrus.Debugf("devicemapper: GetTable() Non existing device %s", name)
+ return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name)
+ }
+
+ _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil))
+ return start, length, targetType, params, nil
+}
+
+// SetTransactionID sets a transaction id for the specified device name.
+func SetTransactionID(poolName string, oldID uint64, newID uint64) error {
+ task, err := TaskCreateNamed(deviceTargetMsg, poolName)
+ if task == nil {
+ return err
+ }
+
+ if err := task.setSector(0); err != nil {
+ return fmt.Errorf("devicemapper: Can't set sector %s", err)
+ }
+
+ if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil {
+ return fmt.Errorf("devicemapper: Can't set message %s", err)
+ }
+
+ if err := task.run(); err != nil {
+ return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err)
+ }
+ return nil
+}
+
+// SuspendDevice is the programmatic example of "dmsetup suspend".
+// It suspends the specified device.
+func SuspendDevice(name string) error {
+ task, err := TaskCreateNamed(deviceSuspend, name)
+ if task == nil {
+ return err
+ }
+ if err := task.run(); err != nil {
+ return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err)
+ }
+ return nil
+}
+
+// ResumeDevice is the programmatic example of "dmsetup resume".
+// It un-suspends the specified device.
+func ResumeDevice(name string) error {
+ task, err := TaskCreateNamed(deviceResume, name)
+ if task == nil {
+ return err
+ }
+
+ cookie := new(uint)
+ if err := task.setCookie(cookie, 0); err != nil {
+ return fmt.Errorf("devicemapper: Can't set cookie %s", err)
+ }
+ defer UdevWait(cookie)
+
+ if err := task.run(); err != nil {
+ return fmt.Errorf("devicemapper: Error running deviceResume %s", err)
+ }
+
+ return nil
+}
+
+// CreateDevice creates a device with the specified poolName with the specified device id.
+func CreateDevice(poolName string, deviceID int) error {
+ logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID)
+ task, err := TaskCreateNamed(deviceTargetMsg, poolName)
+ if task == nil {
+ return err
+ }
+
+ if err := task.setSector(0); err != nil {
+ return fmt.Errorf("devicemapper: Can't set sector %s", err)
+ }
+
+ if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil {
+ return fmt.Errorf("devicemapper: Can't set message %s", err)
+ }
+
+ dmSawExist = false // reset before the task is run
+ if err := task.run(); err != nil {
+ // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id.
+ if dmSawExist {
+ return ErrDeviceIDExists
+ }
+
+ return fmt.Errorf("devicemapper: Error running CreateDevice %s", err)
+
+ }
+ return nil
+}
+
+// DeleteDevice deletes a device with the specified poolName with the specified device id.
+func DeleteDevice(poolName string, deviceID int) error {
+ task, err := TaskCreateNamed(deviceTargetMsg, poolName)
+ if task == nil {
+ return err
+ }
+
+ if err := task.setSector(0); err != nil {
+ return fmt.Errorf("devicemapper: Can't set sector %s", err)
+ }
+
+ if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil {
+ return fmt.Errorf("devicemapper: Can't set message %s", err)
+ }
+
+ dmSawBusy = false
+ if err := task.run(); err != nil {
+ if dmSawBusy {
+ return ErrBusy
+ }
+ return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err)
+ }
+ return nil
+}
+
+// ActivateDevice activates the device identified by the specified
+// poolName, name and deviceID with the specified size.
+func ActivateDevice(poolName string, name string, deviceID int, size uint64) error {
+ return activateDevice(poolName, name, deviceID, size, "")
+}
+
+// ActivateDeviceWithExternal activates the device identified by the specified
+// poolName, name and deviceID with the specified size.
+func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error {
+ return activateDevice(poolName, name, deviceID, size, external)
+}
+
+func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error {
+ task, err := TaskCreateNamed(deviceCreate, name)
+ if task == nil {
+ return err
+ }
+
+ var params string
+ if len(external) > 0 {
+ params = fmt.Sprintf("%s %d %s", poolName, deviceID, external)
+ } else {
+ params = fmt.Sprintf("%s %d", poolName, deviceID)
+ }
+ if err := task.addTarget(0, size/512, "thin", params); err != nil {
+ return fmt.Errorf("devicemapper: Can't add target %s", err)
+ }
+ if err := task.setAddNode(addNodeOnCreate); err != nil {
+ return fmt.Errorf("devicemapper: Can't add node %s", err)
+ }
+
+ cookie := new(uint)
+ if err := task.setCookie(cookie, 0); err != nil {
+ return fmt.Errorf("devicemapper: Can't set cookie %s", err)
+ }
+
+ defer UdevWait(cookie)
+
+ if err := task.run(); err != nil {
+ return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err)
+ }
+
+ return nil
+}
+
+// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active.
+func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error {
+ task, err := TaskCreateNamed(deviceTargetMsg, poolName)
+ if task == nil {
+ return err
+ }
+
+ if err := task.setSector(0); err != nil {
+ return fmt.Errorf("devicemapper: Can't set sector %s", err)
+ }
+
+ if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil {
+ return fmt.Errorf("devicemapper: Can't set message %s", err)
+ }
+
+ dmSawExist = false // reset before the task is run
+ if err := task.run(); err != nil {
+ // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id.
+ if dmSawExist {
+ return ErrDeviceIDExists
+ }
+ return fmt.Errorf("devicemapper: Error running deviceCreate (CreateSnapDeviceRaw) %s", err)
+ }
+
+ return nil
+}
+
+// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId,
+func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error {
+ devinfo, _ := GetInfo(baseName)
+ doSuspend := devinfo != nil && devinfo.Exists != 0
+
+ if doSuspend {
+ if err := SuspendDevice(baseName); err != nil {
+ return err
+ }
+ }
+
+ if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil {
+ if doSuspend {
+ if err2 := ResumeDevice(baseName); err2 != nil {
+ return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: (%v)", err, err2)
+ }
+ }
+ return err
+ }
+
+ if doSuspend {
+ if err := ResumeDevice(baseName); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go
new file mode 100644
index 000000000..b540281fa
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go
@@ -0,0 +1,121 @@
+// +build linux,cgo
+
+package devicemapper
+
+import "C"
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/sirupsen/logrus"
+)
+
+// DevmapperLogger defines methods required to register as a callback for
+// logging events recieved from devicemapper. Note that devicemapper will send
+// *all* logs regardless to callbacks (including debug logs) so it's
+// recommended to not spam the console with the outputs.
+type DevmapperLogger interface {
+ // DMLog is the logging callback containing all of the information from
+ // devicemapper. The interface is identical to the C libdm counterpart.
+ DMLog(level int, file string, line int, dmError int, message string)
+}
+
+// dmLogger is the current logger in use that is being forwarded our messages.
+var dmLogger DevmapperLogger
+
+// LogInit changes the logging callback called after processing libdm logs for
+// error message information. The default logger simply forwards all logs to
+// logrus. Calling LogInit(nil) disables the calling of callbacks.
+func LogInit(logger DevmapperLogger) {
+ dmLogger = logger
+}
+
+// Due to the way cgo works this has to be in a separate file, as devmapper.go has
+// definitions in the cgo block, which is incompatible with using "//export"
+
+// StorageDevmapperLogCallback exports the devmapper log callback for cgo. Note that
+// because we are using callbacks, this function will be called for *every* log
+// in libdm (even debug ones because there's no way of setting the verbosity
+// level for an external logging callback).
+//export StorageDevmapperLogCallback
+func StorageDevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass C.int, message *C.char) {
+ msg := C.GoString(message)
+
+ // Track what errno libdm saw, because the library only gives us 0 or 1.
+ if level < LogLevelDebug {
+ if strings.Contains(msg, "busy") {
+ dmSawBusy = true
+ }
+
+ if strings.Contains(msg, "File exists") {
+ dmSawExist = true
+ }
+
+ if strings.Contains(msg, "No such device or address") {
+ dmSawEnxio = true
+ }
+ }
+
+ if dmLogger != nil {
+ dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg)
+ }
+}
+
+// DefaultLogger is the default logger used by pkg/devicemapper. It forwards
+// all logs that are of higher or equal priority to the given level to the
+// corresponding logrus level.
+type DefaultLogger struct {
+ // Level corresponds to the highest libdm level that will be forwarded to
+ // logrus. In order to change this, register a new DefaultLogger.
+ Level int
+}
+
+// DMLog is the logging callback containing all of the information from
+// devicemapper. The interface is identical to the C libdm counterpart.
+func (l DefaultLogger) DMLog(level int, file string, line, dmError int, message string) {
+ if level <= l.Level {
+ // Forward the log to the correct logrus level, if allowed by dmLogLevel.
+ logMsg := fmt.Sprintf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
+ switch level {
+ case LogLevelFatal, LogLevelErr:
+ logrus.Error(logMsg)
+ case LogLevelWarn:
+ logrus.Warn(logMsg)
+ case LogLevelNotice, LogLevelInfo:
+ logrus.Info(logMsg)
+ case LogLevelDebug:
+ logrus.Debug(logMsg)
+ default:
+ // Don't drop any "unknown" levels.
+ logrus.Info(logMsg)
+ }
+ }
+}
+
+// registerLogCallback registers our own logging callback function for libdm
+// (which is StorageDevmapperLogCallback).
+//
+// Because libdm only gives us {0,1} error codes we need to parse the logs
+// produced by libdm (to set dmSawBusy and so on). Note that by registering a
+// callback using StorageDevmapperLogCallback, libdm will no longer output logs to
+// stderr so we have to log everything ourselves. None of this handling is
+// optional because we depend on log callbacks to parse the logs, and if we
+// don't forward the log information we'll be in a lot of trouble when
+// debugging things.
+func registerLogCallback() {
+ LogWithErrnoInit()
+}
+
+func init() {
+ // Use the default logger by default. We only allow LogLevelFatal by
+ // default, because internally we mask a lot of libdm errors by retrying
+ // and similar tricks. Also, libdm is very chatty and we don't want to
+ // worry users for no reason.
+ dmLogger = DefaultLogger{
+ Level: LogLevelFatal,
+ }
+
+ // Register as early as possible so we don't miss anything.
+ registerLogCallback()
+}
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go
new file mode 100644
index 000000000..190d83d49
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go
@@ -0,0 +1,252 @@
+// +build linux,cgo
+
+package devicemapper
+
+/*
+#define _GNU_SOURCE
+#include <libdevmapper.h>
+#include <linux/fs.h> // FIXME: present only for BLKGETSIZE64, maybe we can remove it?
+
+// FIXME: Can't we find a way to do the logging in pure Go?
+extern void StorageDevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str);
+
+static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...)
+{
+ char *buffer = NULL;
+ va_list ap;
+ int ret;
+
+ va_start(ap, f);
+ ret = vasprintf(&buffer, f, ap);
+ va_end(ap);
+ if (ret < 0) {
+ // memory allocation failed -- should never happen?
+ return;
+ }
+
+ StorageDevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer);
+ free(buffer);
+}
+
+static void log_with_errno_init()
+{
+ dm_log_with_errno_init(log_cb);
+}
+*/
+import "C"
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+type (
+ cdmTask C.struct_dm_task
+)
+
+// IOCTL consts
+const (
+ BlkGetSize64 = C.BLKGETSIZE64
+ BlkDiscard = C.BLKDISCARD
+)
+
+// Devicemapper cookie flags.
+const (
+ DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG
+ DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG
+ DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG
+ DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK
+)
+
+// DeviceMapper mapped functions.
+var (
+ DmGetLibraryVersion = dmGetLibraryVersionFct
+ DmGetNextTarget = dmGetNextTargetFct
+ DmSetDevDir = dmSetDevDirFct
+ DmTaskAddTarget = dmTaskAddTargetFct
+ DmTaskCreate = dmTaskCreateFct
+ DmTaskDestroy = dmTaskDestroyFct
+ DmTaskGetDeps = dmTaskGetDepsFct
+ DmTaskGetInfo = dmTaskGetInfoFct
+ DmTaskGetDriverVersion = dmTaskGetDriverVersionFct
+ DmTaskRun = dmTaskRunFct
+ DmTaskSetAddNode = dmTaskSetAddNodeFct
+ DmTaskSetCookie = dmTaskSetCookieFct
+ DmTaskSetMessage = dmTaskSetMessageFct
+ DmTaskSetName = dmTaskSetNameFct
+ DmTaskSetRo = dmTaskSetRoFct
+ DmTaskSetSector = dmTaskSetSectorFct
+ DmUdevWait = dmUdevWaitFct
+ DmUdevSetSyncSupport = dmUdevSetSyncSupportFct
+ DmUdevGetSyncSupport = dmUdevGetSyncSupportFct
+ DmCookieSupported = dmCookieSupportedFct
+ LogWithErrnoInit = logWithErrnoInitFct
+ DmTaskDeferredRemove = dmTaskDeferredRemoveFct
+ DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct
+)
+
+func free(p *C.char) {
+ C.free(unsafe.Pointer(p))
+}
+
+func dmTaskDestroyFct(task *cdmTask) {
+ C.dm_task_destroy((*C.struct_dm_task)(task))
+}
+
+func dmTaskCreateFct(taskType int) *cdmTask {
+ return (*cdmTask)(C.dm_task_create(C.int(taskType)))
+}
+
+func dmTaskRunFct(task *cdmTask) int {
+ ret, _ := C.dm_task_run((*C.struct_dm_task)(task))
+ return int(ret)
+}
+
+func dmTaskSetNameFct(task *cdmTask, name string) int {
+ Cname := C.CString(name)
+ defer free(Cname)
+
+ return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname))
+}
+
+func dmTaskSetMessageFct(task *cdmTask, message string) int {
+ Cmessage := C.CString(message)
+ defer free(Cmessage)
+
+ return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage))
+}
+
+func dmTaskSetSectorFct(task *cdmTask, sector uint64) int {
+ return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector)))
+}
+
+func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int {
+ cCookie := C.uint32_t(*cookie)
+ defer func() {
+ *cookie = uint(cCookie)
+ }()
+ return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags)))
+}
+
+func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int {
+ return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode)))
+}
+
+func dmTaskSetRoFct(task *cdmTask) int {
+ return int(C.dm_task_set_ro((*C.struct_dm_task)(task)))
+}
+
+func dmTaskAddTargetFct(task *cdmTask,
+ start, size uint64, ttype, params string) int {
+
+ Cttype := C.CString(ttype)
+ defer free(Cttype)
+
+ Cparams := C.CString(params)
+ defer free(Cparams)
+
+ return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams))
+}
+
+func dmTaskGetDepsFct(task *cdmTask) *Deps {
+ Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task))
+ if Cdeps == nil {
+ return nil
+ }
+
+ // golang issue: https://github.com/golang/go/issues/11925
+ hdr := reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))),
+ Len: int(Cdeps.count),
+ Cap: int(Cdeps.count),
+ }
+ devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr))
+
+ deps := &Deps{
+ Count: uint32(Cdeps.count),
+ Filler: uint32(Cdeps.filler),
+ }
+ for _, device := range devices {
+ deps.Device = append(deps.Device, uint64(device))
+ }
+ return deps
+}
+
+func dmTaskGetInfoFct(task *cdmTask, info *Info) int {
+ Cinfo := C.struct_dm_info{}
+ defer func() {
+ info.Exists = int(Cinfo.exists)
+ info.Suspended = int(Cinfo.suspended)
+ info.LiveTable = int(Cinfo.live_table)
+ info.InactiveTable = int(Cinfo.inactive_table)
+ info.OpenCount = int32(Cinfo.open_count)
+ info.EventNr = uint32(Cinfo.event_nr)
+ info.Major = uint32(Cinfo.major)
+ info.Minor = uint32(Cinfo.minor)
+ info.ReadOnly = int(Cinfo.read_only)
+ info.TargetCount = int32(Cinfo.target_count)
+ }()
+ return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo))
+}
+
+func dmTaskGetDriverVersionFct(task *cdmTask) string {
+ buffer := C.malloc(128)
+ defer C.free(buffer)
+ res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128)
+ if res == 0 {
+ return ""
+ }
+ return C.GoString((*C.char)(buffer))
+}
+
+func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer {
+ var (
+ Cstart, Clength C.uint64_t
+ CtargetType, Cparams *C.char
+ )
+ defer func() {
+ *start = uint64(Cstart)
+ *length = uint64(Clength)
+ *target = C.GoString(CtargetType)
+ *params = C.GoString(Cparams)
+ }()
+
+ nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams)
+ return nextp
+}
+
+func dmUdevSetSyncSupportFct(syncWithUdev int) {
+ (C.dm_udev_set_sync_support(C.int(syncWithUdev)))
+}
+
+func dmUdevGetSyncSupportFct() int {
+ return int(C.dm_udev_get_sync_support())
+}
+
+func dmUdevWaitFct(cookie uint) int {
+ return int(C.dm_udev_wait(C.uint32_t(cookie)))
+}
+
+func dmCookieSupportedFct() int {
+ return int(C.dm_cookie_supported())
+}
+
+func logWithErrnoInitFct() {
+ C.log_with_errno_init()
+}
+
+func dmSetDevDirFct(dir string) int {
+ Cdir := C.CString(dir)
+ defer free(Cdir)
+
+ return int(C.dm_set_dev_dir(Cdir))
+}
+
+func dmGetLibraryVersionFct(version *string) int {
+ buffer := C.CString(string(make([]byte, 128)))
+ defer free(buffer)
+ defer func() {
+ *version = C.GoString(buffer)
+ }()
+ return int(C.dm_get_library_version(buffer, 128))
+}
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go
new file mode 100644
index 000000000..7f793c270
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go
@@ -0,0 +1,31 @@
+// +build linux,cgo,!libdm_no_deferred_remove
+
+package devicemapper
+
+// #include <libdevmapper.h>
+import "C"
+
+// LibraryDeferredRemovalSupport tells if the feature is enabled in the build
+const LibraryDeferredRemovalSupport = true
+
+func dmTaskDeferredRemoveFct(task *cdmTask) int {
+ return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task)))
+}
+
+func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int {
+ Cinfo := C.struct_dm_info{}
+ defer func() {
+ info.Exists = int(Cinfo.exists)
+ info.Suspended = int(Cinfo.suspended)
+ info.LiveTable = int(Cinfo.live_table)
+ info.InactiveTable = int(Cinfo.inactive_table)
+ info.OpenCount = int32(Cinfo.open_count)
+ info.EventNr = uint32(Cinfo.event_nr)
+ info.Major = uint32(Cinfo.major)
+ info.Minor = uint32(Cinfo.minor)
+ info.ReadOnly = int(Cinfo.read_only)
+ info.TargetCount = int32(Cinfo.target_count)
+ info.DeferredRemove = int(Cinfo.deferred_remove)
+ }()
+ return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo))
+}
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go
new file mode 100644
index 000000000..7d8450898
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go
@@ -0,0 +1,6 @@
+// +build linux,cgo,!static_build
+
+package devicemapper
+
+// #cgo pkg-config: devmapper
+import "C"
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go
new file mode 100644
index 000000000..a880fec8c
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go
@@ -0,0 +1,15 @@
+// +build linux,cgo,libdm_no_deferred_remove
+
+package devicemapper
+
+// LibraryDeferredRemovalSupport tells if the feature is enabled in the build
+const LibraryDeferredRemovalSupport = false
+
+func dmTaskDeferredRemoveFct(task *cdmTask) int {
+ // Error. Nobody should be calling it.
+ return -1
+}
+
+func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int {
+ return -1
+}
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go
new file mode 100644
index 000000000..cf7f26a4c
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go
@@ -0,0 +1,6 @@
+// +build linux,cgo,static_build
+
+package devicemapper
+
+// #cgo pkg-config: --static devmapper
+import "C"
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go
new file mode 100644
index 000000000..50ea7c482
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go
@@ -0,0 +1,28 @@
+// +build linux,cgo
+
+package devicemapper
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+func ioctlBlkGetSize64(fd uintptr) (int64, error) {
+ var size int64
+ if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 {
+ return 0, err
+ }
+ return size, nil
+}
+
+func ioctlBlkDiscard(fd uintptr, offset, length uint64) error {
+ var r [2]uint64
+ r[0] = offset
+ r[1] = length
+
+ if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/log.go b/vendor/github.com/containers/storage/pkg/devicemapper/log.go
new file mode 100644
index 000000000..cee5e5454
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/devicemapper/log.go
@@ -0,0 +1,11 @@
+package devicemapper
+
+// definitions from lvm2 lib/log/log.h
+const (
+ LogLevelFatal = 2 + iota // _LOG_FATAL
+ LogLevelErr // _LOG_ERR
+ LogLevelWarn // _LOG_WARN
+ LogLevelNotice // _LOG_NOTICE
+ LogLevelInfo // _LOG_INFO
+ LogLevelDebug // _LOG_DEBUG
+)
diff --git a/vendor/github.com/containers/storage/pkg/directory/directory.go b/vendor/github.com/containers/storage/pkg/directory/directory.go
new file mode 100644
index 000000000..1715ef45d
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/directory/directory.go
@@ -0,0 +1,26 @@
+package directory
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path
+func MoveToSubdir(oldpath, subdir string) error {
+
+ infos, err := ioutil.ReadDir(oldpath)
+ if err != nil {
+ return err
+ }
+ for _, info := range infos {
+ if info.Name() != subdir {
+ oldName := filepath.Join(oldpath, info.Name())
+ newName := filepath.Join(oldpath, subdir, info.Name())
+ if err := os.Rename(oldName, newName); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go
new file mode 100644
index 000000000..397251bdb
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go
@@ -0,0 +1,48 @@
+// +build linux freebsd solaris
+
+package directory
+
+import (
+ "os"
+ "path/filepath"
+ "syscall"
+)
+
+// Size walks a directory tree and returns its total size in bytes.
+func Size(dir string) (size int64, err error) {
+ data := make(map[uint64]struct{})
+ err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error {
+ if err != nil {
+ // if dir does not exist, Size() returns the error.
+ // if dir/x disappeared while walking, Size() ignores dir/x.
+ if os.IsNotExist(err) && d != dir {
+ return nil
+ }
+ return err
+ }
+
+ // Ignore directory sizes
+ if fileInfo == nil {
+ return nil
+ }
+
+ s := fileInfo.Size()
+ if fileInfo.IsDir() || s == 0 {
+ return nil
+ }
+
+ // Check inode to handle hard links correctly
+ inode := fileInfo.Sys().(*syscall.Stat_t).Ino
+ // inode is not a uint64 on all platforms. Cast it to avoid issues.
+ if _, exists := data[uint64(inode)]; exists {
+ return nil
+ }
+ // inode is not a uint64 on all platforms. Cast it to avoid issues.
+ data[uint64(inode)] = struct{}{}
+
+ size += s
+
+ return nil
+ })
+ return
+}
diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go
new file mode 100644
index 000000000..6fb0917c4
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go
@@ -0,0 +1,37 @@
+// +build windows
+
+package directory
+
+import (
+ "os"
+ "path/filepath"
+)
+
+// Size walks a directory tree and returns its total size in bytes.
+func Size(dir string) (size int64, err error) {
+ err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error {
+ if err != nil {
+ // if dir does not exist, Size() returns the error.
+ // if dir/x disappeared while walking, Size() ignores dir/x.
+ if os.IsNotExist(err) && d != dir {
+ return nil
+ }
+ return err
+ }
+
+ // Ignore directory sizes
+ if fileInfo == nil {
+ return nil
+ }
+
+ s := fileInfo.Size()
+ if fileInfo.IsDir() || s == 0 {
+ return nil
+ }
+
+ size += s
+
+ return nil
+ })
+ return
+}
diff --git a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go
new file mode 100644
index 000000000..7df7f3d43
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go
@@ -0,0 +1,20 @@
+// +build linux
+
+package dmesg
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// Dmesg returns last messages from the kernel log, up to size bytes
+func Dmesg(size int) []byte {
+ t := uintptr(3) // SYSLOG_ACTION_READ_ALL
+ b := make([]byte, size)
+ amt, _, err := unix.Syscall(unix.SYS_SYSLOG, t, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)))
+ if err != 0 {
+ return []byte{}
+ }
+ return b[:amt]
+}
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
new file mode 100644
index 000000000..a129e654e
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
@@ -0,0 +1,298 @@
+package fileutils
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "text/scanner"
+
+ "github.com/sirupsen/logrus"
+)
+
+// PatternMatcher allows checking paths agaist a list of patterns
+type PatternMatcher struct {
+ patterns []*Pattern
+ exclusions bool
+}
+
+// NewPatternMatcher creates a new matcher object for specific patterns that can
+// be used later to match against patterns against paths
+func NewPatternMatcher(patterns []string) (*PatternMatcher, error) {
+ pm := &PatternMatcher{
+ patterns: make([]*Pattern, 0, len(patterns)),
+ }
+ for _, p := range patterns {
+ // Eliminate leading and trailing whitespace.
+ p = strings.TrimSpace(p)
+ if p == "" {
+ continue
+ }
+ p = filepath.Clean(p)
+ newp := &Pattern{}
+ if p[0] == '!' {
+ if len(p) == 1 {
+ return nil, errors.New("illegal exclusion pattern: \"!\"")
+ }
+ newp.exclusion = true
+ p = p[1:]
+ pm.exclusions = true
+ }
+ // Do some syntax checking on the pattern.
+ // filepath's Match() has some really weird rules that are inconsistent
+ // so instead of trying to dup their logic, just call Match() for its
+ // error state and if there is an error in the pattern return it.
+ // If this becomes an issue we can remove this since its really only
+ // needed in the error (syntax) case - which isn't really critical.
+ if _, err := filepath.Match(p, "."); err != nil {
+ return nil, err
+ }
+ newp.cleanedPattern = p
+ newp.dirs = strings.Split(p, string(os.PathSeparator))
+ pm.patterns = append(pm.patterns, newp)
+ }
+ return pm, nil
+}
+
+// Matches matches path against all the patterns. Matches is not safe to be
+// called concurrently
+func (pm *PatternMatcher) Matches(file string) (bool, error) {
+ matched := false
+ file = filepath.FromSlash(file)
+ parentPath := filepath.Dir(file)
+ parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
+
+ for _, pattern := range pm.patterns {
+ negative := false
+
+ if pattern.exclusion {
+ negative = true
+ }
+
+ match, err := pattern.match(file)
+ if err != nil {
+ return false, err
+ }
+
+ if !match && parentPath != "." {
+ // Check to see if the pattern matches one of our parent dirs.
+ if len(pattern.dirs) <= len(parentPathDirs) {
+ match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator)))
+ }
+ }
+
+ if match {
+ matched = !negative
+ }
+ }
+
+ if matched {
+ logrus.Debugf("Skipping excluded path: %s", file)
+ }
+
+ return matched, nil
+}
+
+// Exclusions returns true if any of the patterns define exclusions
+func (pm *PatternMatcher) Exclusions() bool {
+ return pm.exclusions
+}
+
+// Patterns returns array of active patterns
+func (pm *PatternMatcher) Patterns() []*Pattern {
+ return pm.patterns
+}
+
+// Pattern defines a single regexp used used to filter file paths.
+type Pattern struct {
+ cleanedPattern string
+ dirs []string
+ regexp *regexp.Regexp
+ exclusion bool
+}
+
+func (p *Pattern) String() string {
+ return p.cleanedPattern
+}
+
+// Exclusion returns true if this pattern defines exclusion
+func (p *Pattern) Exclusion() bool {
+ return p.exclusion
+}
+
+func (p *Pattern) match(path string) (bool, error) {
+
+ if p.regexp == nil {
+ if err := p.compile(); err != nil {
+ return false, filepath.ErrBadPattern
+ }
+ }
+
+ b := p.regexp.MatchString(path)
+
+ return b, nil
+}
+
+func (p *Pattern) compile() error {
+ regStr := "^"
+ pattern := p.cleanedPattern
+ // Go through the pattern and convert it to a regexp.
+ // We use a scanner so we can support utf-8 chars.
+ var scan scanner.Scanner
+ scan.Init(strings.NewReader(pattern))
+
+ sl := string(os.PathSeparator)
+ escSL := sl
+ if sl == `\` {
+ escSL += `\`
+ }
+
+ for scan.Peek() != scanner.EOF {
+ ch := scan.Next()
+
+ if ch == '*' {
+ if scan.Peek() == '*' {
+ // is some flavor of "**"
+ scan.Next()
+
+ // Treat **/ as ** so eat the "/"
+ if string(scan.Peek()) == sl {
+ scan.Next()
+ }
+
+ if scan.Peek() == scanner.EOF {
+ // is "**EOF" - to align with .gitignore just accept all
+ regStr += ".*"
+ } else {
+ // is "**"
+ // Note that this allows for any # of /'s (even 0) because
+ // the .* will eat everything, even /'s
+ regStr += "(.*" + escSL + ")?"
+ }
+ } else {
+ // is "*" so map it to anything but "/"
+ regStr += "[^" + escSL + "]*"
+ }
+ } else if ch == '?' {
+ // "?" is any char except "/"
+ regStr += "[^" + escSL + "]"
+ } else if ch == '.' || ch == '$' {
+ // Escape some regexp special chars that have no meaning
+ // in golang's filepath.Match
+ regStr += `\` + string(ch)
+ } else if ch == '\\' {
+ // escape next char. Note that a trailing \ in the pattern
+ // will be left alone (but need to escape it)
+ if sl == `\` {
+ // On windows map "\" to "\\", meaning an escaped backslash,
+ // and then just continue because filepath.Match on
+ // Windows doesn't allow escaping at all
+ regStr += escSL
+ continue
+ }
+ if scan.Peek() != scanner.EOF {
+ regStr += `\` + string(scan.Next())
+ } else {
+ regStr += `\`
+ }
+ } else {
+ regStr += string(ch)
+ }
+ }
+
+ regStr += "$"
+
+ re, err := regexp.Compile(regStr)
+ if err != nil {
+ return err
+ }
+
+ p.regexp = re
+ return nil
+}
+
+// Matches returns true if file matches any of the patterns
+// and isn't excluded by any of the subsequent patterns.
+func Matches(file string, patterns []string) (bool, error) {
+ pm, err := NewPatternMatcher(patterns)
+ if err != nil {
+ return false, err
+ }
+ file = filepath.Clean(file)
+
+ if file == "." {
+ // Don't let them exclude everything, kind of silly.
+ return false, nil
+ }
+
+ return pm.Matches(file)
+}
+
+// CopyFile copies from src to dst until either EOF is reached
+// on src or an error occurs. It verifies src exists and removes
+// the dst if it exists.
+func CopyFile(src, dst string) (int64, error) {
+ cleanSrc := filepath.Clean(src)
+ cleanDst := filepath.Clean(dst)
+ if cleanSrc == cleanDst {
+ return 0, nil
+ }
+ sf, err := os.Open(cleanSrc)
+ if err != nil {
+ return 0, err
+ }
+ defer sf.Close()
+ if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
+ return 0, err
+ }
+ df, err := os.Create(cleanDst)
+ if err != nil {
+ return 0, err
+ }
+ defer df.Close()
+ return io.Copy(df, sf)
+}
+
+// ReadSymlinkedDirectory returns the target directory of a symlink.
+// The target of the symbolic link may not be a file.
+func ReadSymlinkedDirectory(path string) (string, error) {
+ var realPath string
+ var err error
+ if realPath, err = filepath.Abs(path); err != nil {
+ return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
+ }
+ if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
+ return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
+ }
+ realPathInfo, err := os.Stat(realPath)
+ if err != nil {
+ return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
+ }
+ if !realPathInfo.Mode().IsDir() {
+ return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
+ }
+ return realPath, nil
+}
+
+// CreateIfNotExists creates a file or a directory only if it does not already exist.
+func CreateIfNotExists(path string, isDir bool) error {
+ if _, err := os.Stat(path); err != nil {
+ if os.IsNotExist(err) {
+ if isDir {
+ return os.MkdirAll(path, 0755)
+ }
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return err
+ }
+ f, err := os.OpenFile(path, os.O_CREATE, 0755)
+ if err != nil {
+ return err
+ }
+ f.Close()
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go
new file mode 100644
index 000000000..ccd648fac
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go
@@ -0,0 +1,27 @@
+package fileutils
+
+import (
+ "os"
+ "os/exec"
+ "strconv"
+ "strings"
+)
+
+// GetTotalUsedFds returns the number of used File Descriptors by
+// executing `lsof -p PID`
+func GetTotalUsedFds() int {
+ pid := os.Getpid()
+
+ cmd := exec.Command("lsof", "-p", strconv.Itoa(pid))
+
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return -1
+ }
+
+ outputStr := strings.TrimSpace(string(output))
+
+ fds := strings.Split(outputStr, "\n")
+
+ return len(fds) - 1
+}
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go
new file mode 100644
index 000000000..0f2cb7ab9
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go
@@ -0,0 +1,7 @@
+package fileutils
+
+// GetTotalUsedFds Returns the number of used File Descriptors.
+// On Solaris these limits are per process and not systemwide
+func GetTotalUsedFds() int {
+ return -1
+}
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go
new file mode 100644
index 000000000..9e0e97bd6
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go
@@ -0,0 +1,22 @@
+// +build linux freebsd
+
+package fileutils
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/sirupsen/logrus"
+)
+
+// GetTotalUsedFds Returns the number of used File Descriptors by
+// reading it via /proc filesystem.
+func GetTotalUsedFds() int {
+ if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
+ logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
+ } else {
+ return len(fds)
+ }
+ return -1
+}
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go
new file mode 100644
index 000000000..5ec21cace
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go
@@ -0,0 +1,7 @@
+package fileutils
+
+// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
+// on Windows.
+func GetTotalUsedFds() int {
+ return -1
+}
diff --git a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go
new file mode 100644
index 000000000..e6094b55b
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go
@@ -0,0 +1,88 @@
+// +build linux
+
+package fsutils
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+func locateDummyIfEmpty(path string) (string, error) {
+ children, err := ioutil.ReadDir(path)
+ if err != nil {
+ return "", err
+ }
+ if len(children) != 0 {
+ return "", nil
+ }
+ dummyFile, err := ioutil.TempFile(path, "fsutils-dummy")
+ if err != nil {
+ return "", err
+ }
+ name := dummyFile.Name()
+ err = dummyFile.Close()
+ return name, err
+}
+
+// SupportsDType returns whether the filesystem mounted on path supports d_type
+func SupportsDType(path string) (bool, error) {
+ // locate dummy so that we have at least one dirent
+ dummy, err := locateDummyIfEmpty(path)
+ if err != nil {
+ return false, err
+ }
+ if dummy != "" {
+ defer os.Remove(dummy)
+ }
+
+ visited := 0
+ supportsDType := true
+ fn := func(ent *unix.Dirent) bool {
+ visited++
+ if ent.Type == unix.DT_UNKNOWN {
+ supportsDType = false
+ // stop iteration
+ return true
+ }
+ // continue iteration
+ return false
+ }
+ if err = iterateReadDir(path, fn); err != nil {
+ return false, err
+ }
+ if visited == 0 {
+ return false, fmt.Errorf("did not hit any dirent during iteration %s", path)
+ }
+ return supportsDType, nil
+}
+
+func iterateReadDir(path string, fn func(*unix.Dirent) bool) error {
+ d, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer d.Close()
+ fd := int(d.Fd())
+ buf := make([]byte, 4096)
+ for {
+ nbytes, err := unix.ReadDirent(fd, buf)
+ if err != nil {
+ return err
+ }
+ if nbytes == 0 {
+ break
+ }
+ for off := 0; off < nbytes; {
+ ent := (*unix.Dirent)(unsafe.Pointer(&buf[off]))
+ if stop := fn(ent); stop {
+ return nil
+ }
+ off += int(ent.Reclen)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go
new file mode 100644
index 000000000..c001fbecb
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go
@@ -0,0 +1,23 @@
+// +build linux
+
+package homedir
+
+import (
+ "os"
+
+ "github.com/containers/storage/pkg/idtools"
+)
+
+// GetStatic returns the home directory for the current user without calling
+// os/user.Current(). This is useful for static-linked binary on glibc-based
+// system, because a call to os/user.Current() in a static binary leads to
+// segfault due to a glibc issue that won't be fixed in a short term.
+// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341)
+func GetStatic() (string, error) {
+ uid := os.Getuid()
+ usr, err := idtools.LookupUID(uid)
+ if err != nil {
+ return "", err
+ }
+ return usr.Home, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
new file mode 100644
index 000000000..6b96b856f
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
@@ -0,0 +1,13 @@
+// +build !linux
+
+package homedir
+
+import (
+ "errors"
+)
+
+// GetStatic is not needed for non-linux systems.
+// (Precisely, it is needed only for glibc-based linux systems.)
+func GetStatic() (string, error) {
+ return "", errors.New("homedir.GetStatic() is not supported on this system")
+}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
new file mode 100644
index 000000000..f2a20ea8f
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
@@ -0,0 +1,34 @@
+// +build !windows
+
+package homedir
+
+import (
+ "os"
+
+ "github.com/opencontainers/runc/libcontainer/user"
+)
+
+// Key returns the env var name for the user's home dir based on
+// the platform being run on
+func Key() string {
+ return "HOME"
+}
+
+// Get returns the home directory of the current user with the help of
+// environment variables depending on the target operating system.
+// Returned path should be used with "path/filepath" to form new paths.
+func Get() string {
+ home := os.Getenv(Key())
+ if home == "" {
+ if u, err := user.CurrentUser(); err == nil {
+ return u.Home
+ }
+ }
+ return home
+}
+
+// GetShortcutString returns the string that is shortcut to user's home directory
+// in the native shell of the platform running on.
+func GetShortcutString() string {
+ return "~"
+}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
new file mode 100644
index 000000000..fafdb2bbf
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
@@ -0,0 +1,24 @@
+package homedir
+
+import (
+ "os"
+)
+
+// Key returns the env var name for the user's home dir based on
+// the platform being run on
+func Key() string {
+ return "USERPROFILE"
+}
+
+// Get returns the home directory of the current user with the help of
+// environment variables depending on the target operating system.
+// Returned path should be used with "path/filepath" to form new paths.
+func Get() string {
+ return os.Getenv(Key())
+}
+
+// GetShortcutString returns the string that is shortcut to user's home directory
+// in the native shell of the platform running on.
+func GetShortcutString() string {
+ return "%USERPROFILE%" // be careful while using in format functions
+}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
new file mode 100644
index 000000000..68a072db2
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
@@ -0,0 +1,279 @@
+package idtools
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// IDMap contains a single entry for user namespace range remapping. An array
+// of IDMap entries represents the structure that will be provided to the Linux
+// kernel for creating a user namespace.
+type IDMap struct {
+ ContainerID int `json:"container_id"`
+ HostID int `json:"host_id"`
+ Size int `json:"size"`
+}
+
+type subIDRange struct {
+ Start int
+ Length int
+}
+
+type ranges []subIDRange
+
+func (e ranges) Len() int { return len(e) }
+func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
+func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
+
+const (
+ subuidFileName string = "/etc/subuid"
+ subgidFileName string = "/etc/subgid"
+)
+
+// MkdirAllAs creates a directory (include any along the path) and then modifies
+// ownership to the requested uid/gid. If the directory already exists, this
+// function will still change ownership to the requested uid/gid pair.
+// Deprecated: Use MkdirAllAndChown
+func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
+ return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
+}
+
+// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
+// If the directory already exists, this function still changes ownership
+// Deprecated: Use MkdirAndChown with a IDPair
+func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
+ return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
+}
+
+// MkdirAllAndChown creates a directory (include any along the path) and then modifies
+// ownership to the requested uid/gid. If the directory already exists, this
+// function will still change ownership to the requested uid/gid pair.
+func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error {
+ return mkdirAs(path, mode, ids.UID, ids.GID, true, true)
+}
+
+// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
+// If the directory already exists, this function still changes ownership
+func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error {
+ return mkdirAs(path, mode, ids.UID, ids.GID, false, true)
+}
+
+// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies
+// ownership ONLY of newly created directories to the requested uid/gid. If the
+// directories along the path exist, no change of ownership will be performed
+func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error {
+ return mkdirAs(path, mode, ids.UID, ids.GID, true, false)
+}
+
+// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
+// If the maps are empty, then the root uid/gid will default to "real" 0/0
+func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
+ uid, err := toHost(0, uidMap)
+ if err != nil {
+ return -1, -1, err
+ }
+ gid, err := toHost(0, gidMap)
+ if err != nil {
+ return -1, -1, err
+ }
+ return uid, gid, nil
+}
+
+// toContainer takes an id mapping, and uses it to translate a
+// host ID to the remapped ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id
+func toContainer(hostID int, idMap []IDMap) (int, error) {
+ if idMap == nil {
+ return hostID, nil
+ }
+ for _, m := range idMap {
+ if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
+ contID := m.ContainerID + (hostID - m.HostID)
+ return contID, nil
+ }
+ }
+ return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
+}
+
+// toHost takes an id mapping and a remapped ID, and translates the
+// ID to the mapped host ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id #
+func toHost(contID int, idMap []IDMap) (int, error) {
+ if idMap == nil {
+ return contID, nil
+ }
+ for _, m := range idMap {
+ if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
+ hostID := m.HostID + (contID - m.ContainerID)
+ return hostID, nil
+ }
+ }
+ return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
+}
+
+// IDPair is a UID and GID pair
+type IDPair struct {
+ UID int
+ GID int
+}
+
+// IDMappings contains a mappings of UIDs and GIDs
+type IDMappings struct {
+ uids []IDMap
+ gids []IDMap
+}
+
+// NewIDMappings takes a requested user and group name and
+// using the data from /etc/sub{uid,gid} ranges, creates the
+// proper uid and gid remapping ranges for that user/group pair
+func NewIDMappings(username, groupname string) (*IDMappings, error) {
+ subuidRanges, err := parseSubuid(username)
+ if err != nil {
+ return nil, err
+ }
+ subgidRanges, err := parseSubgid(groupname)
+ if err != nil {
+ return nil, err
+ }
+ if len(subuidRanges) == 0 {
+ return nil, fmt.Errorf("No subuid ranges found for user %q", username)
+ }
+ if len(subgidRanges) == 0 {
+ return nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
+ }
+
+ return &IDMappings{
+ uids: createIDMap(subuidRanges),
+ gids: createIDMap(subgidRanges),
+ }, nil
+}
+
+// NewIDMappingsFromMaps creates a new mapping from two slices
+// Deprecated: this is a temporary shim while transitioning to IDMapping
+func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings {
+ return &IDMappings{uids: uids, gids: gids}
+}
+
+// RootPair returns a uid and gid pair for the root user. The error is ignored
+// because a root user always exists, and the defaults are correct when the uid
+// and gid maps are empty.
+func (i *IDMappings) RootPair() IDPair {
+ uid, gid, _ := GetRootUIDGID(i.uids, i.gids)
+ return IDPair{UID: uid, GID: gid}
+}
+
+// ToHost returns the host UID and GID for the container uid, gid.
+// Remapping is only performed if the ids aren't already the remapped root ids
+func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) {
+ var err error
+ target := i.RootPair()
+
+ if pair.UID != target.UID {
+ target.UID, err = toHost(pair.UID, i.uids)
+ if err != nil {
+ return target, err
+ }
+ }
+
+ if pair.GID != target.GID {
+ target.GID, err = toHost(pair.GID, i.gids)
+ }
+ return target, err
+}
+
+// ToContainer returns the container UID and GID for the host uid and gid
+func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) {
+ uid, err := toContainer(pair.UID, i.uids)
+ if err != nil {
+ return -1, -1, err
+ }
+ gid, err := toContainer(pair.GID, i.gids)
+ return uid, gid, err
+}
+
+// Empty returns true if there are no id mappings
+func (i *IDMappings) Empty() bool {
+ return len(i.uids) == 0 && len(i.gids) == 0
+}
+
+// UIDs return the UID mapping
+// TODO: remove this once everything has been refactored to use pairs
+func (i *IDMappings) UIDs() []IDMap {
+ return i.uids
+}
+
+// GIDs return the UID mapping
+// TODO: remove this once everything has been refactored to use pairs
+func (i *IDMappings) GIDs() []IDMap {
+ return i.gids
+}
+
+func createIDMap(subidRanges ranges) []IDMap {
+ idMap := []IDMap{}
+
+ // sort the ranges by lowest ID first
+ sort.Sort(subidRanges)
+ containerID := 0
+ for _, idrange := range subidRanges {
+ idMap = append(idMap, IDMap{
+ ContainerID: containerID,
+ HostID: idrange.Start,
+ Size: idrange.Length,
+ })
+ containerID = containerID + idrange.Length
+ }
+ return idMap
+}
+
+func parseSubuid(username string) (ranges, error) {
+ return parseSubidFile(subuidFileName, username)
+}
+
+func parseSubgid(username string) (ranges, error) {
+ return parseSubidFile(subgidFileName, username)
+}
+
+// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
+// and return all found ranges for a specified username. If the special value
+// "ALL" is supplied for username, then all ranges in the file will be returned
+func parseSubidFile(path, username string) (ranges, error) {
+ var rangeList ranges
+
+ subidFile, err := os.Open(path)
+ if err != nil {
+ return rangeList, err
+ }
+ defer subidFile.Close()
+
+ s := bufio.NewScanner(subidFile)
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return rangeList, err
+ }
+
+ text := strings.TrimSpace(s.Text())
+ if text == "" || strings.HasPrefix(text, "#") {
+ continue
+ }
+ parts := strings.Split(text, ":")
+ if len(parts) != 3 {
+ return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
+ }
+ if parts[0] == username || username == "ALL" {
+ startid, err := strconv.Atoi(parts[1])
+ if err != nil {
+ return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ }
+ length, err := strconv.Atoi(parts[2])
+ if err != nil {
+ return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ }
+ rangeList = append(rangeList, subIDRange{startid, length})
+ }
+ }
+ return rangeList, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
new file mode 100644
index 000000000..b5870506a
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
@@ -0,0 +1,204 @@
+// +build !windows
+
+package idtools
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/containers/storage/pkg/system"
+ "github.com/opencontainers/runc/libcontainer/user"
+)
+
+var (
+ entOnce sync.Once
+ getentCmd string
+)
+
+func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
+ // make an array containing the original path asked for, plus (for mkAll == true)
+ // all path components leading up to the complete path that don't exist before we MkdirAll
+ // so that we can chown all of them properly at the end. If chownExisting is false, we won't
+ // chown the full directory path if it exists
+ var paths []string
+ if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
+ paths = []string{path}
+ } else if err == nil && chownExisting {
+ // short-circuit--we were called with an existing directory and chown was requested
+ return os.Chown(path, ownerUID, ownerGID)
+ } else if err == nil {
+ // nothing to do; directory path fully exists already and chown was NOT requested
+ return nil
+ }
+
+ if mkAll {
+ // walk back to "/" looking for directories which do not exist
+ // and add them to the paths array for chown after creation
+ dirPath := path
+ for {
+ dirPath = filepath.Dir(dirPath)
+ if dirPath == "/" {
+ break
+ }
+ if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
+ paths = append(paths, dirPath)
+ }
+ }
+ if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) {
+ return err
+ }
+ } else {
+ if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
+ return err
+ }
+ }
+ // even if it existed, we will chown the requested path + any subpaths that
+ // didn't exist when we called MkdirAll
+ for _, pathComponent := range paths {
+ if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
+// if that uid, gid pair has access (execute bit) to the directory
+func CanAccess(path string, pair IDPair) bool {
+ statInfo, err := system.Stat(path)
+ if err != nil {
+ return false
+ }
+ fileMode := os.FileMode(statInfo.Mode())
+ permBits := fileMode.Perm()
+ return accessible(statInfo.UID() == uint32(pair.UID),
+ statInfo.GID() == uint32(pair.GID), permBits)
+}
+
+func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
+ if isOwner && (perms&0100 == 0100) {
+ return true
+ }
+ if isGroup && (perms&0010 == 0010) {
+ return true
+ }
+ if perms&0001 == 0001 {
+ return true
+ }
+ return false
+}
+
+// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupUser(username string) (user.User, error) {
+ // first try a local system files lookup using existing capabilities
+ usr, err := user.LookupUser(username)
+ if err == nil {
+ return usr, nil
+ }
+ // local files lookup failed; attempt to call `getent` to query configured passwd dbs
+ usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username))
+ if err != nil {
+ return user.User{}, err
+ }
+ return usr, nil
+}
+
+// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupUID(uid int) (user.User, error) {
+ // first try a local system files lookup using existing capabilities
+ usr, err := user.LookupUid(uid)
+ if err == nil {
+ return usr, nil
+ }
+ // local files lookup failed; attempt to call `getent` to query configured passwd dbs
+ return getentUser(fmt.Sprintf("%s %d", "passwd", uid))
+}
+
+func getentUser(args string) (user.User, error) {
+ reader, err := callGetent(args)
+ if err != nil {
+ return user.User{}, err
+ }
+ users, err := user.ParsePasswd(reader)
+ if err != nil {
+ return user.User{}, err
+ }
+ if len(users) == 0 {
+ return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1])
+ }
+ return users[0], nil
+}
+
+// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupGroup(groupname string) (user.Group, error) {
+ // first try a local system files lookup using existing capabilities
+ group, err := user.LookupGroup(groupname)
+ if err == nil {
+ return group, nil
+ }
+ // local files lookup failed; attempt to call `getent` to query configured group dbs
+ return getentGroup(fmt.Sprintf("%s %s", "group", groupname))
+}
+
+// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupGID(gid int) (user.Group, error) {
+ // first try a local system files lookup using existing capabilities
+ group, err := user.LookupGid(gid)
+ if err == nil {
+ return group, nil
+ }
+ // local files lookup failed; attempt to call `getent` to query configured group dbs
+ return getentGroup(fmt.Sprintf("%s %d", "group", gid))
+}
+
+func getentGroup(args string) (user.Group, error) {
+ reader, err := callGetent(args)
+ if err != nil {
+ return user.Group{}, err
+ }
+ groups, err := user.ParseGroup(reader)
+ if err != nil {
+ return user.Group{}, err
+ }
+ if len(groups) == 0 {
+ return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1])
+ }
+ return groups[0], nil
+}
+
+func callGetent(args string) (io.Reader, error) {
+ entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") })
+ // if no `getent` command on host, can't do anything else
+ if getentCmd == "" {
+ return nil, fmt.Errorf("")
+ }
+ out, err := execCmd(getentCmd, args)
+ if err != nil {
+ exitCode, errC := system.GetExitCode(err)
+ if errC != nil {
+ return nil, err
+ }
+ switch exitCode {
+ case 1:
+ return nil, fmt.Errorf("getent reported invalid parameters/database unknown")
+ case 2:
+ terms := strings.Split(args, " ")
+ return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0])
+ case 3:
+ return nil, fmt.Errorf("getent database doesn't support enumeration")
+ default:
+ return nil, err
+ }
+
+ }
+ return bytes.NewReader(out), nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
new file mode 100644
index 000000000..dbf6bc4c9
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
@@ -0,0 +1,25 @@
+// +build windows
+
+package idtools
+
+import (
+ "os"
+
+ "github.com/containers/storage/pkg/system"
+)
+
+// Platforms such as Windows do not support the UID/GID concept. So make this
+// just a wrapper around system.MkdirAll.
+func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
+ if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) {
+ return err
+ }
+ return nil
+}
+
+// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
+// if that uid, gid pair has access (execute bit) to the directory
+// Windows does not require/support this function, so always return true
+func CanAccess(path string, pair IDPair) bool {
+ return true
+}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go
new file mode 100644
index 000000000..9da7975e2
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go
@@ -0,0 +1,164 @@
+package idtools
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// add a user and/or group to Linux /etc/passwd, /etc/group using standard
+// Linux distribution commands:
+// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group <username>
+// useradd -r -s /bin/false <username>
+
+var (
+ once sync.Once
+ userCommand string
+
+ cmdTemplates = map[string]string{
+ "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
+ "useradd": "-r -s /bin/false %s",
+ "usermod": "-%s %d-%d %s",
+ }
+
+ idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
+ // default length for a UID/GID subordinate range
+ defaultRangeLen = 65536
+ defaultRangeStart = 100000
+ userMod = "usermod"
+)
+
+// AddNamespaceRangesUser takes a username and uses the standard system
+// utility to create a system user/group pair used to hold the
+// /etc/sub{uid,gid} ranges which will be used for user namespace
+// mapping ranges in containers.
+func AddNamespaceRangesUser(name string) (int, int, error) {
+ if err := addUser(name); err != nil {
+ return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
+ }
+
+ // Query the system for the created uid and gid pair
+ out, err := execCmd("id", name)
+ if err != nil {
+ return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
+ }
+ matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
+ if len(matches) != 3 {
+ return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
+ }
+ uid, err := strconv.Atoi(matches[1])
+ if err != nil {
+ return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
+ }
+ gid, err := strconv.Atoi(matches[2])
+ if err != nil {
+ return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
+ }
+
+ // Now we need to create the subuid/subgid ranges for our new user/group (system users
+ // do not get auto-created ranges in subuid/subgid)
+
+ if err := createSubordinateRanges(name); err != nil {
+ return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
+ }
+ return uid, gid, nil
+}
+
+func addUser(userName string) error {
+ once.Do(func() {
+ // set up which commands are used for adding users/groups dependent on distro
+ if _, err := resolveBinary("adduser"); err == nil {
+ userCommand = "adduser"
+ } else if _, err := resolveBinary("useradd"); err == nil {
+ userCommand = "useradd"
+ }
+ })
+ if userCommand == "" {
+ return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
+ }
+ args := fmt.Sprintf(cmdTemplates[userCommand], userName)
+ out, err := execCmd(userCommand, args)
+ if err != nil {
+ return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
+ }
+ return nil
+}
+
+func createSubordinateRanges(name string) error {
+
+ // first, we should verify that ranges weren't automatically created
+ // by the distro tooling
+ ranges, err := parseSubuid(name)
+ if err != nil {
+ return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
+ }
+ if len(ranges) == 0 {
+ // no UID ranges; let's create one
+ startID, err := findNextUIDRange()
+ if err != nil {
+ return fmt.Errorf("Can't find available subuid range: %v", err)
+ }
+ out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
+ if err != nil {
+ return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
+ }
+ }
+
+ ranges, err = parseSubgid(name)
+ if err != nil {
+ return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
+ }
+ if len(ranges) == 0 {
+ // no GID ranges; let's create one
+ startID, err := findNextGIDRange()
+ if err != nil {
+ return fmt.Errorf("Can't find available subgid range: %v", err)
+ }
+ out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
+ if err != nil {
+ return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
+ }
+ }
+ return nil
+}
+
+func findNextUIDRange() (int, error) {
+ ranges, err := parseSubuid("ALL")
+ if err != nil {
+ return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
+ }
+ sort.Sort(ranges)
+ return findNextRangeStart(ranges)
+}
+
+func findNextGIDRange() (int, error) {
+ ranges, err := parseSubgid("ALL")
+ if err != nil {
+ return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
+ }
+ sort.Sort(ranges)
+ return findNextRangeStart(ranges)
+}
+
+func findNextRangeStart(rangeList ranges) (int, error) {
+ startID := defaultRangeStart
+ for _, arange := range rangeList {
+ if wouldOverlap(arange, startID) {
+ startID = arange.Start + arange.Length
+ }
+ }
+ return startID, nil
+}
+
+func wouldOverlap(arange subIDRange, ID int) bool {
+ low := ID
+ high := ID + defaultRangeLen
+ if (low >= arange.Start && low <= arange.Start+arange.Length) ||
+ (high <= arange.Start+arange.Length && high >= arange.Start) {
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go
new file mode 100644
index 000000000..d98b354cb
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go
@@ -0,0 +1,12 @@
+// +build !linux
+
+package idtools
+
+import "fmt"
+
+// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
+// and calls the appropriate helper function to add the group and then
+// the user to the group in /etc/group and /etc/passwd respectively.
+func AddNamespaceRangesUser(name string) (int, int, error) {
+ return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
+}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go
new file mode 100644
index 000000000..9703ecbd9
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go
@@ -0,0 +1,32 @@
+// +build !windows
+
+package idtools
+
+import (
+ "fmt"
+ "os/exec"
+ "path/filepath"
+ "strings"
+)
+
+func resolveBinary(binname string) (string, error) {
+ binaryPath, err := exec.LookPath(binname)
+ if err != nil {
+ return "", err
+ }
+ resolvedPath, err := filepath.EvalSymlinks(binaryPath)
+ if err != nil {
+ return "", err
+ }
+ //only return no error if the final resolved binary basename
+ //matches what was searched for
+ if filepath.Base(resolvedPath) == binname {
+ return resolvedPath, nil
+ }
+ return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
+}
+
+func execCmd(cmd, args string) ([]byte, error) {
+ execCmd := exec.Command(cmd, strings.Split(args, " ")...)
+ return execCmd.CombinedOutput()
+}
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/buffer.go b/vendor/github.com/containers/storage/pkg/ioutils/buffer.go
new file mode 100644
index 000000000..3d737b3e1
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/ioutils/buffer.go
@@ -0,0 +1,51 @@
+package ioutils
+
+import (
+ "errors"
+ "io"
+)
+
+var errBufferFull = errors.New("buffer is full")
+
+type fixedBuffer struct {
+ buf []byte
+ pos int
+ lastRead int
+}
+
+func (b *fixedBuffer) Write(p []byte) (int, error) {
+ n := copy(b.buf[b.pos:cap(b.buf)], p)
+ b.pos += n
+
+ if n < len(p) {
+ if b.pos == cap(b.buf) {
+ return n, errBufferFull
+ }
+ return n, io.ErrShortWrite
+ }
+ return n, nil
+}
+
+func (b *fixedBuffer) Read(p []byte) (int, error) {
+ n := copy(p, b.buf[b.lastRead:b.pos])
+ b.lastRead += n
+ return n, nil
+}
+
+func (b *fixedBuffer) Len() int {
+ return b.pos - b.lastRead
+}
+
+func (b *fixedBuffer) Cap() int {
+ return cap(b.buf)
+}
+
+func (b *fixedBuffer) Reset() {
+ b.pos = 0
+ b.lastRead = 0
+ b.buf = b.buf[:0]
+}
+
+func (b *fixedBuffer) String() string {
+ return string(b.buf[b.lastRead:b.pos])
+}
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go
new file mode 100644
index 000000000..72a04f349
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go
@@ -0,0 +1,186 @@
+package ioutils
+
+import (
+ "errors"
+ "io"
+ "sync"
+)
+
+// maxCap is the highest capacity to use in byte slices that buffer data.
+const maxCap = 1e6
+
+// minCap is the lowest capacity to use in byte slices that buffer data
+const minCap = 64
+
+// blockThreshold is the minimum number of bytes in the buffer which will cause
+// a write to BytesPipe to block when allocating a new slice.
+const blockThreshold = 1e6
+
+var (
+ // ErrClosed is returned when Write is called on a closed BytesPipe.
+ ErrClosed = errors.New("write to closed BytesPipe")
+
+ bufPools = make(map[int]*sync.Pool)
+ bufPoolsLock sync.Mutex
+)
+
+// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
+// All written data may be read at most once. Also, BytesPipe allocates
+// and releases new byte slices to adjust to current needs, so the buffer
+// won't be overgrown after peak loads.
+type BytesPipe struct {
+ mu sync.Mutex
+ wait *sync.Cond
+ buf []*fixedBuffer
+ bufLen int
+ closeErr error // error to return from next Read. set to nil if not closed.
+}
+
+// NewBytesPipe creates new BytesPipe, initialized by specified slice.
+// If buf is nil, then it will be initialized with slice which cap is 64.
+// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
+func NewBytesPipe() *BytesPipe {
+ bp := &BytesPipe{}
+ bp.buf = append(bp.buf, getBuffer(minCap))
+ bp.wait = sync.NewCond(&bp.mu)
+ return bp
+}
+
+// Write writes p to BytesPipe.
+// It can allocate new []byte slices in a process of writing.
+func (bp *BytesPipe) Write(p []byte) (int, error) {
+ bp.mu.Lock()
+
+ written := 0
+loop0:
+ for {
+ if bp.closeErr != nil {
+ bp.mu.Unlock()
+ return written, ErrClosed
+ }
+
+ if len(bp.buf) == 0 {
+ bp.buf = append(bp.buf, getBuffer(64))
+ }
+ // get the last buffer
+ b := bp.buf[len(bp.buf)-1]
+
+ n, err := b.Write(p)
+ written += n
+ bp.bufLen += n
+
+ // errBufferFull is an error we expect to get if the buffer is full
+ if err != nil && err != errBufferFull {
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return written, err
+ }
+
+ // if there was enough room to write all then break
+ if len(p) == n {
+ break
+ }
+
+ // more data: write to the next slice
+ p = p[n:]
+
+ // make sure the buffer doesn't grow too big from this write
+ for bp.bufLen >= blockThreshold {
+ bp.wait.Wait()
+ if bp.closeErr != nil {
+ continue loop0
+ }
+ }
+
+ // add new byte slice to the buffers slice and continue writing
+ nextCap := b.Cap() * 2
+ if nextCap > maxCap {
+ nextCap = maxCap
+ }
+ bp.buf = append(bp.buf, getBuffer(nextCap))
+ }
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return written, nil
+}
+
+// CloseWithError causes further reads from a BytesPipe to return immediately.
+func (bp *BytesPipe) CloseWithError(err error) error {
+ bp.mu.Lock()
+ if err != nil {
+ bp.closeErr = err
+ } else {
+ bp.closeErr = io.EOF
+ }
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return nil
+}
+
+// Close causes further reads from a BytesPipe to return immediately.
+func (bp *BytesPipe) Close() error {
+ return bp.CloseWithError(nil)
+}
+
+// Read reads bytes from BytesPipe.
+// Data could be read only once.
+func (bp *BytesPipe) Read(p []byte) (n int, err error) {
+ bp.mu.Lock()
+ if bp.bufLen == 0 {
+ if bp.closeErr != nil {
+ bp.mu.Unlock()
+ return 0, bp.closeErr
+ }
+ bp.wait.Wait()
+ if bp.bufLen == 0 && bp.closeErr != nil {
+ err := bp.closeErr
+ bp.mu.Unlock()
+ return 0, err
+ }
+ }
+
+ for bp.bufLen > 0 {
+ b := bp.buf[0]
+ read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
+ n += read
+ bp.bufLen -= read
+
+ if b.Len() == 0 {
+ // it's empty so return it to the pool and move to the next one
+ returnBuffer(b)
+ bp.buf[0] = nil
+ bp.buf = bp.buf[1:]
+ }
+
+ if len(p) == read {
+ break
+ }
+
+ p = p[read:]
+ }
+
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return
+}
+
+func returnBuffer(b *fixedBuffer) {
+ b.Reset()
+ bufPoolsLock.Lock()
+ pool := bufPools[b.Cap()]
+ bufPoolsLock.Unlock()
+ if pool != nil {
+ pool.Put(b)
+ }
+}
+
+func getBuffer(size int) *fixedBuffer {
+ bufPoolsLock.Lock()
+ pool, ok := bufPools[size]
+ if !ok {
+ pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
+ bufPools[size] = pool
+ }
+ bufPoolsLock.Unlock()
+ return pool.Get().(*fixedBuffer)
+}
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
new file mode 100644
index 000000000..a56c46265
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
@@ -0,0 +1,162 @@
+package ioutils
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
+// temporary file and closing it atomically changes the temporary file to
+// destination path. Writing and closing concurrently is not allowed.
+func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
+ f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
+ if err != nil {
+ return nil, err
+ }
+
+ abspath, err := filepath.Abs(filename)
+ if err != nil {
+ return nil, err
+ }
+ return &atomicFileWriter{
+ f: f,
+ fn: abspath,
+ perm: perm,
+ }, nil
+}
+
+// AtomicWriteFile atomically writes data to a file named by filename.
+func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
+ f, err := NewAtomicFileWriter(filename, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ f.(*atomicFileWriter).writeErr = err
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+type atomicFileWriter struct {
+ f *os.File
+ fn string
+ writeErr error
+ perm os.FileMode
+}
+
+func (w *atomicFileWriter) Write(dt []byte) (int, error) {
+ n, err := w.f.Write(dt)
+ if err != nil {
+ w.writeErr = err
+ }
+ return n, err
+}
+
+func (w *atomicFileWriter) Close() (retErr error) {
+ defer func() {
+ if retErr != nil || w.writeErr != nil {
+ os.Remove(w.f.Name())
+ }
+ }()
+ if err := w.f.Sync(); err != nil {
+ w.f.Close()
+ return err
+ }
+ if err := w.f.Close(); err != nil {
+ return err
+ }
+ if err := os.Chmod(w.f.Name(), w.perm); err != nil {
+ return err
+ }
+ if w.writeErr == nil {
+ return os.Rename(w.f.Name(), w.fn)
+ }
+ return nil
+}
+
+// AtomicWriteSet is used to atomically write a set
+// of files and ensure they are visible at the same time.
+// Must be committed to a new directory.
+type AtomicWriteSet struct {
+ root string
+}
+
+// NewAtomicWriteSet creates a new atomic write set to
+// atomically create a set of files. The given directory
+// is used as the base directory for storing files before
+// commit. If no temporary directory is given the system
+// default is used.
+func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) {
+ td, err := ioutil.TempDir(tmpDir, "write-set-")
+ if err != nil {
+ return nil, err
+ }
+
+ return &AtomicWriteSet{
+ root: td,
+ }, nil
+}
+
+// WriteFile writes a file to the set, guaranteeing the file
+// has been synced.
+func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error {
+ f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+type syncFileCloser struct {
+ *os.File
+}
+
+func (w syncFileCloser) Close() error {
+ err := w.File.Sync()
+ if err1 := w.File.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+// FileWriter opens a file writer inside the set. The file
+// should be synced and closed before calling commit.
+func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) {
+ f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ return syncFileCloser{f}, nil
+}
+
+// Cancel cancels the set and removes all temporary data
+// created in the set.
+func (ws *AtomicWriteSet) Cancel() error {
+ return os.RemoveAll(ws.root)
+}
+
+// Commit moves all created files to the target directory. The
+// target directory must not exist and the parent of the target
+// directory must exist.
+func (ws *AtomicWriteSet) Commit(target string) error {
+ return os.Rename(ws.root, target)
+}
+
+// String returns the location the set is writing to.
+func (ws *AtomicWriteSet) String() string {
+ return ws.root
+}
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/readers.go b/vendor/github.com/containers/storage/pkg/ioutils/readers.go
new file mode 100644
index 000000000..63f3c07f4
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/ioutils/readers.go
@@ -0,0 +1,154 @@
+package ioutils
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "io"
+
+ "golang.org/x/net/context"
+)
+
+type readCloserWrapper struct {
+ io.Reader
+ closer func() error
+}
+
+func (r *readCloserWrapper) Close() error {
+ return r.closer()
+}
+
+// NewReadCloserWrapper returns a new io.ReadCloser.
+func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
+ return &readCloserWrapper{
+ Reader: r,
+ closer: closer,
+ }
+}
+
+type readerErrWrapper struct {
+ reader io.Reader
+ closer func()
+}
+
+func (r *readerErrWrapper) Read(p []byte) (int, error) {
+ n, err := r.reader.Read(p)
+ if err != nil {
+ r.closer()
+ }
+ return n, err
+}
+
+// NewReaderErrWrapper returns a new io.Reader.
+func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
+ return &readerErrWrapper{
+ reader: r,
+ closer: closer,
+ }
+}
+
+// HashData returns the sha256 sum of src.
+func HashData(src io.Reader) (string, error) {
+ h := sha256.New()
+ if _, err := io.Copy(h, src); err != nil {
+ return "", err
+ }
+ return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
+}
+
+// OnEOFReader wraps an io.ReadCloser and a function
+// the function will run at the end of file or close the file.
+type OnEOFReader struct {
+ Rc io.ReadCloser
+ Fn func()
+}
+
+func (r *OnEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.Rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+// Close closes the file and run the function.
+func (r *OnEOFReader) Close() error {
+ err := r.Rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *OnEOFReader) runFunc() {
+ if fn := r.Fn; fn != nil {
+ fn()
+ r.Fn = nil
+ }
+}
+
+// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
+// operations.
+type cancelReadCloser struct {
+ cancel func()
+ pR *io.PipeReader // Stream to read from
+ pW *io.PipeWriter
+}
+
+// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
+// context is cancelled. The returned io.ReadCloser must be closed when it is
+// no longer needed.
+func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
+ pR, pW := io.Pipe()
+
+ // Create a context used to signal when the pipe is closed
+ doneCtx, cancel := context.WithCancel(context.Background())
+
+ p := &cancelReadCloser{
+ cancel: cancel,
+ pR: pR,
+ pW: pW,
+ }
+
+ go func() {
+ _, err := io.Copy(pW, in)
+ select {
+ case <-ctx.Done():
+ // If the context was closed, p.closeWithError
+ // was already called. Calling it again would
+ // change the error that Read returns.
+ default:
+ p.closeWithError(err)
+ }
+ in.Close()
+ }()
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ p.closeWithError(ctx.Err())
+ case <-doneCtx.Done():
+ return
+ }
+ }
+ }()
+
+ return p
+}
+
+// Read wraps the Read method of the pipe that provides data from the wrapped
+// ReadCloser.
+func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
+ return p.pR.Read(buf)
+}
+
+// closeWithError closes the wrapper and its underlying reader. It will
+// cause future calls to Read to return err.
+func (p *cancelReadCloser) closeWithError(err error) {
+ p.pW.CloseWithError(err)
+ p.cancel()
+}
+
+// Close closes the wrapper its underlying reader. It will cause
+// future calls to Read to return io.EOF.
+func (p *cancelReadCloser) Close() error {
+ p.closeWithError(io.EOF)
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go
new file mode 100644
index 000000000..1539ad21b
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go
@@ -0,0 +1,10 @@
+// +build !windows
+
+package ioutils
+
+import "io/ioutil"
+
+// TempDir on Unix systems is equivalent to ioutil.TempDir.
+func TempDir(dir, prefix string) (string, error) {
+ return ioutil.TempDir(dir, prefix)
+}
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go
new file mode 100644
index 000000000..c719c120b
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go
@@ -0,0 +1,18 @@
+// +build windows
+
+package ioutils
+
+import (
+ "io/ioutil"
+
+ "github.com/containers/storage/pkg/longpath"
+)
+
+// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
+func TempDir(dir, prefix string) (string, error) {
+ tempDir, err := ioutil.TempDir(dir, prefix)
+ if err != nil {
+ return "", err
+ }
+ return longpath.AddPrefix(tempDir), nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go b/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go
new file mode 100644
index 000000000..52a4901ad
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go
@@ -0,0 +1,92 @@
+package ioutils
+
+import (
+ "io"
+ "sync"
+)
+
+// WriteFlusher wraps the Write and Flush operation ensuring that every write
+// is a flush. In addition, the Close method can be called to intercept
+// Read/Write calls if the targets lifecycle has already ended.
+type WriteFlusher struct {
+ w io.Writer
+ flusher flusher
+ flushed chan struct{}
+ flushedOnce sync.Once
+ closed chan struct{}
+ closeLock sync.Mutex
+}
+
+type flusher interface {
+ Flush()
+}
+
+var errWriteFlusherClosed = io.EOF
+
+func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
+ select {
+ case <-wf.closed:
+ return 0, errWriteFlusherClosed
+ default:
+ }
+
+ n, err = wf.w.Write(b)
+ wf.Flush() // every write is a flush.
+ return n, err
+}
+
+// Flush the stream immediately.
+func (wf *WriteFlusher) Flush() {
+ select {
+ case <-wf.closed:
+ return
+ default:
+ }
+
+ wf.flushedOnce.Do(func() {
+ close(wf.flushed)
+ })
+ wf.flusher.Flush()
+}
+
+// Flushed returns the state of flushed.
+// If it's flushed, return true, or else it return false.
+func (wf *WriteFlusher) Flushed() bool {
+ // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
+ // be used to detect whether or a response code has been issued or not.
+ // Another hook should be used instead.
+ var flushed bool
+ select {
+ case <-wf.flushed:
+ flushed = true
+ default:
+ }
+ return flushed
+}
+
+// Close closes the write flusher, disallowing any further writes to the
+// target. After the flusher is closed, all calls to write or flush will
+// result in an error.
+func (wf *WriteFlusher) Close() error {
+ wf.closeLock.Lock()
+ defer wf.closeLock.Unlock()
+
+ select {
+ case <-wf.closed:
+ return errWriteFlusherClosed
+ default:
+ close(wf.closed)
+ }
+ return nil
+}
+
+// NewWriteFlusher returns a new WriteFlusher.
+func NewWriteFlusher(w io.Writer) *WriteFlusher {
+ var fl flusher
+ if f, ok := w.(flusher); ok {
+ fl = f
+ } else {
+ fl = &NopFlusher{}
+ }
+ return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
+}
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/writers.go b/vendor/github.com/containers/storage/pkg/ioutils/writers.go
new file mode 100644
index 000000000..ccc7f9c23
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/ioutils/writers.go
@@ -0,0 +1,66 @@
+package ioutils
+
+import "io"
+
+// NopWriter represents a type which write operation is nop.
+type NopWriter struct{}
+
+func (*NopWriter) Write(buf []byte) (int, error) {
+ return len(buf), nil
+}
+
+type nopWriteCloser struct {
+ io.Writer
+}
+
+func (w *nopWriteCloser) Close() error { return nil }
+
+// NopWriteCloser returns a nopWriteCloser.
+func NopWriteCloser(w io.Writer) io.WriteCloser {
+ return &nopWriteCloser{w}
+}
+
+// NopFlusher represents a type which flush operation is nop.
+type NopFlusher struct{}
+
+// Flush is a nop operation.
+func (f *NopFlusher) Flush() {}
+
+type writeCloserWrapper struct {
+ io.Writer
+ closer func() error
+}
+
+func (r *writeCloserWrapper) Close() error {
+ return r.closer()
+}
+
+// NewWriteCloserWrapper returns a new io.WriteCloser.
+func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
+ return &writeCloserWrapper{
+ Writer: r,
+ closer: closer,
+ }
+}
+
+// WriteCounter wraps a concrete io.Writer and hold a count of the number
+// of bytes written to the writer during a "session".
+// This can be convenient when write return is masked
+// (e.g., json.Encoder.Encode())
+type WriteCounter struct {
+ Count int64
+ Writer io.Writer
+}
+
+// NewWriteCounter returns a new WriteCounter.
+func NewWriteCounter(w io.Writer) *WriteCounter {
+ return &WriteCounter{
+ Writer: w,
+ }
+}
+
+func (wc *WriteCounter) Write(p []byte) (count int, err error) {
+ count, err = wc.Writer.Write(p)
+ wc.Count += int64(count)
+ return
+}
diff --git a/vendor/github.com/containers/storage/pkg/locker/README.md b/vendor/github.com/containers/storage/pkg/locker/README.md
new file mode 100644
index 000000000..ad15e89af
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/locker/README.md
@@ -0,0 +1,65 @@
+Locker
+=====
+
+locker provides a mechanism for creating finer-grained locking to help
+free up more global locks to handle other tasks.
+
+The implementation looks close to a sync.Mutex, however, the user must provide a
+reference to use to refer to the underlying lock when locking and unlocking,
+and unlock may generate an error.
+
+If a lock with a given name does not exist when `Lock` is called, one is
+created.
+Lock references are automatically cleaned up on `Unlock` if nothing else is
+waiting for the lock.
+
+
+## Usage
+
+```go
+package important
+
+import (
+ "sync"
+ "time"
+
+ "github.com/containers/storage/pkg/locker"
+)
+
+type important struct {
+ locks *locker.Locker
+ data map[string]interface{}
+ mu sync.Mutex
+}
+
+func (i *important) Get(name string) interface{} {
+ i.locks.Lock(name)
+ defer i.locks.Unlock(name)
+ return data[name]
+}
+
+func (i *important) Create(name string, data interface{}) {
+ i.locks.Lock(name)
+ defer i.locks.Unlock(name)
+
+ i.createImportant(data)
+
+ s.mu.Lock()
+ i.data[name] = data
+ s.mu.Unlock()
+}
+
+func (i *important) createImportant(data interface{}) {
+ time.Sleep(10 * time.Second)
+}
+```
+
+For functions dealing with a given name, always lock at the beginning of the
+function (or before doing anything with the underlying state), this ensures any
+other function that is dealing with the same name will block.
+
+When needing to modify the underlying data, use the global lock to ensure nothing
+else is modifying it at the same time.
+Since name lock is already in place, no reads will occur while the modification
+is being performed.
+
diff --git a/vendor/github.com/containers/storage/pkg/locker/locker.go b/vendor/github.com/containers/storage/pkg/locker/locker.go
new file mode 100644
index 000000000..0b22ddfab
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/locker/locker.go
@@ -0,0 +1,112 @@
+/*
+Package locker provides a mechanism for creating finer-grained locking to help
+free up more global locks to handle other tasks.
+
+The implementation looks close to a sync.Mutex, however the user must provide a
+reference to use to refer to the underlying lock when locking and unlocking,
+and unlock may generate an error.
+
+If a lock with a given name does not exist when `Lock` is called, one is
+created.
+Lock references are automatically cleaned up on `Unlock` if nothing else is
+waiting for the lock.
+*/
+package locker
+
+import (
+ "errors"
+ "sync"
+ "sync/atomic"
+)
+
+// ErrNoSuchLock is returned when the requested lock does not exist
+var ErrNoSuchLock = errors.New("no such lock")
+
+// Locker provides a locking mechanism based on the passed in reference name
+type Locker struct {
+ mu sync.Mutex
+ locks map[string]*lockCtr
+}
+
+// lockCtr is used by Locker to represent a lock with a given name.
+type lockCtr struct {
+ mu sync.Mutex
+ // waiters is the number of waiters waiting to acquire the lock
+ // this is int32 instead of uint32 so we can add `-1` in `dec()`
+ waiters int32
+}
+
+// inc increments the number of waiters waiting for the lock
+func (l *lockCtr) inc() {
+ atomic.AddInt32(&l.waiters, 1)
+}
+
+// dec decrements the number of waiters waiting on the lock
+func (l *lockCtr) dec() {
+ atomic.AddInt32(&l.waiters, -1)
+}
+
+// count gets the current number of waiters
+func (l *lockCtr) count() int32 {
+ return atomic.LoadInt32(&l.waiters)
+}
+
+// Lock locks the mutex
+func (l *lockCtr) Lock() {
+ l.mu.Lock()
+}
+
+// Unlock unlocks the mutex
+func (l *lockCtr) Unlock() {
+ l.mu.Unlock()
+}
+
+// New creates a new Locker
+func New() *Locker {
+ return &Locker{
+ locks: make(map[string]*lockCtr),
+ }
+}
+
+// Lock locks a mutex with the given name. If it doesn't exist, one is created
+func (l *Locker) Lock(name string) {
+ l.mu.Lock()
+ if l.locks == nil {
+ l.locks = make(map[string]*lockCtr)
+ }
+
+ nameLock, exists := l.locks[name]
+ if !exists {
+ nameLock = &lockCtr{}
+ l.locks[name] = nameLock
+ }
+
+ // increment the nameLock waiters while inside the main mutex
+ // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently
+ nameLock.inc()
+ l.mu.Unlock()
+
+ // Lock the nameLock outside the main mutex so we don't block other operations
+ // once locked then we can decrement the number of waiters for this lock
+ nameLock.Lock()
+ nameLock.dec()
+}
+
+// Unlock unlocks the mutex with the given name
+// If the given lock is not being waited on by any other callers, it is deleted
+func (l *Locker) Unlock(name string) error {
+ l.mu.Lock()
+ nameLock, exists := l.locks[name]
+ if !exists {
+ l.mu.Unlock()
+ return ErrNoSuchLock
+ }
+
+ if nameLock.count() == 0 {
+ delete(l.locks, name)
+ }
+ nameLock.Unlock()
+
+ l.mu.Unlock()
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/longpath/longpath.go b/vendor/github.com/containers/storage/pkg/longpath/longpath.go
new file mode 100644
index 000000000..9b15bfff4
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/longpath/longpath.go
@@ -0,0 +1,26 @@
+// longpath introduces some constants and helper functions for handling long paths
+// in Windows, which are expected to be prepended with `\\?\` and followed by either
+// a drive letter, a UNC server\share, or a volume identifier.
+
+package longpath
+
+import (
+ "strings"
+)
+
+// Prefix is the longpath prefix for Windows file paths.
+const Prefix = `\\?\`
+
+// AddPrefix will add the Windows long path prefix to the path provided if
+// it does not already have it.
+func AddPrefix(path string) string {
+ if !strings.HasPrefix(path, Prefix) {
+ if strings.HasPrefix(path, `\\`) {
+ // This is a UNC path, so we need to add 'UNC' to the path as well.
+ path = Prefix + `UNC` + path[1:]
+ } else {
+ path = Prefix + path
+ }
+ }
+ return path
+}
diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
new file mode 100644
index 000000000..34c80548d
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
@@ -0,0 +1,157 @@
+// +build linux
+
+package loopback
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "syscall"
+
+ "github.com/sirupsen/logrus"
+)
+
+// Loopback related errors
+var (
+ ErrAttachLoopbackDevice = errors.New("loopback attach failed")
+ ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file")
+ ErrSetCapacity = errors.New("Unable set loopback capacity")
+)
+
+func stringToLoopName(src string) [LoNameSize]uint8 {
+ var dst [LoNameSize]uint8
+ copy(dst[:], src[:])
+ return dst
+}
+
+func getNextFreeLoopbackIndex() (int, error) {
+ f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644)
+ if err != nil {
+ return 0, err
+ }
+ defer f.Close()
+
+ index, err := ioctlLoopCtlGetFree(f.Fd())
+ if index < 0 {
+ index = 0
+ }
+ return index, err
+}
+
+func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File) (loopFile *os.File, err error) {
+ // Read information about the loopback file.
+ var st syscall.Stat_t
+ err = syscall.Fstat(int(sparseFile.Fd()), &st)
+ if err != nil {
+ logrus.Errorf("Error reading information about loopback file %s: %v", sparseName, err)
+ return nil, ErrAttachLoopbackDevice
+ }
+
+ // Start looking for a free /dev/loop
+ for {
+ target := fmt.Sprintf("/dev/loop%d", index)
+ index++
+
+ fi, err := os.Stat(target)
+ if err != nil {
+ if os.IsNotExist(err) {
+ logrus.Error("There are no more loopback devices available.")
+ }
+ return nil, ErrAttachLoopbackDevice
+ }
+
+ if fi.Mode()&os.ModeDevice != os.ModeDevice {
+ logrus.Errorf("Loopback device %s is not a block device.", target)
+ continue
+ }
+
+ // OpenFile adds O_CLOEXEC
+ loopFile, err = os.OpenFile(target, os.O_RDWR, 0644)
+ if err != nil {
+ logrus.Errorf("Error opening loopback device: %s", err)
+ return nil, ErrAttachLoopbackDevice
+ }
+
+ // Try to attach to the loop file
+ if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil {
+ loopFile.Close()
+
+ // If the error is EBUSY, then try the next loopback
+ if err != syscall.EBUSY {
+ logrus.Errorf("Cannot set up loopback device %s: %s", target, err)
+ return nil, ErrAttachLoopbackDevice
+ }
+
+ // Otherwise, we keep going with the loop
+ continue
+ }
+
+ // Check if the loopback driver and underlying filesystem agree on the loopback file's
+ // device and inode numbers.
+ dev, ino, err := getLoopbackBackingFile(loopFile)
+ if err != nil {
+ logrus.Errorf("Error getting loopback backing file: %s", err)
+ return nil, ErrGetLoopbackBackingFile
+ }
+ if dev != st.Dev || ino != st.Ino {
+ logrus.Errorf("Loopback device and filesystem disagree on device/inode for %q: %#x(%d):%#x(%d) vs %#x(%d):%#x(%d)", sparseName, dev, dev, ino, ino, st.Dev, st.Dev, st.Ino, st.Ino)
+ }
+
+ // In case of success, we finished. Break the loop.
+ break
+ }
+
+ // This can't happen, but let's be sure
+ if loopFile == nil {
+ logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name())
+ return nil, ErrAttachLoopbackDevice
+ }
+
+ return loopFile, nil
+}
+
+// AttachLoopDevice attaches the given sparse file to the next
+// available loopback device. It returns an opened *os.File.
+func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
+
+ // Try to retrieve the next available loopback device via syscall.
+ // If it fails, we discard error and start looping for a
+ // loopback from index 0.
+ startIndex, err := getNextFreeLoopbackIndex()
+ if err != nil {
+ logrus.Debugf("Error retrieving the next available loopback: %s", err)
+ }
+
+ // OpenFile adds O_CLOEXEC
+ sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644)
+ if err != nil {
+ logrus.Errorf("Error opening sparse file %s: %s", sparseName, err)
+ return nil, ErrAttachLoopbackDevice
+ }
+ defer sparseFile.Close()
+
+ loopFile, err := openNextAvailableLoopback(startIndex, sparseName, sparseFile)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set the status of the loopback device
+ loopInfo := &loopInfo64{
+ loFileName: stringToLoopName(loopFile.Name()),
+ loOffset: 0,
+ loFlags: LoFlagsAutoClear,
+ }
+
+ if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil {
+ logrus.Errorf("Cannot set up loopback device info: %s", err)
+
+ // If the call failed, then free the loopback device
+ if err := ioctlLoopClrFd(loopFile.Fd()); err != nil {
+ logrus.Error("Error while cleaning up the loopback device")
+ }
+ loopFile.Close()
+ return nil, ErrAttachLoopbackDevice
+ }
+
+ return loopFile, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go
new file mode 100644
index 000000000..0714eb5f8
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go
@@ -0,0 +1,53 @@
+// +build linux
+
+package loopback
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func ioctlLoopCtlGetFree(fd uintptr) (int, error) {
+ index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0)
+ if err != 0 {
+ return 0, err
+ }
+ return int(index), nil
+}
+
+func ioctlLoopSetFd(loopFd, sparseFd uintptr) error {
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 {
+ return err
+ }
+ return nil
+}
+
+func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error {
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 {
+ return err
+ }
+ return nil
+}
+
+func ioctlLoopClrFd(loopFd uintptr) error {
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 {
+ return err
+ }
+ return nil
+}
+
+func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) {
+ loopInfo := &loopInfo64{}
+
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 {
+ return nil, err
+ }
+ return loopInfo, nil
+}
+
+func ioctlLoopSetCapacity(loopFd uintptr, value int) error {
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go
new file mode 100644
index 000000000..e1100ce15
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go
@@ -0,0 +1,52 @@
+// +build linux
+
+package loopback
+
+/*
+#include <linux/loop.h> // FIXME: present only for defines, maybe we can remove it?
+
+#ifndef LOOP_CTL_GET_FREE
+ #define LOOP_CTL_GET_FREE 0x4C82
+#endif
+
+#ifndef LO_FLAGS_PARTSCAN
+ #define LO_FLAGS_PARTSCAN 8
+#endif
+
+*/
+import "C"
+
+type loopInfo64 struct {
+ loDevice uint64 /* ioctl r/o */
+ loInode uint64 /* ioctl r/o */
+ loRdevice uint64 /* ioctl r/o */
+ loOffset uint64
+ loSizelimit uint64 /* bytes, 0 == max available */
+ loNumber uint32 /* ioctl r/o */
+ loEncryptType uint32
+ loEncryptKeySize uint32 /* ioctl w/o */
+ loFlags uint32 /* ioctl r/o */
+ loFileName [LoNameSize]uint8
+ loCryptName [LoNameSize]uint8
+ loEncryptKey [LoKeySize]uint8 /* ioctl w/o */
+ loInit [2]uint64
+}
+
+// IOCTL consts
+const (
+ LoopSetFd = C.LOOP_SET_FD
+ LoopCtlGetFree = C.LOOP_CTL_GET_FREE
+ LoopGetStatus64 = C.LOOP_GET_STATUS64
+ LoopSetStatus64 = C.LOOP_SET_STATUS64
+ LoopClrFd = C.LOOP_CLR_FD
+ LoopSetCapacity = C.LOOP_SET_CAPACITY
+)
+
+// LOOP consts.
+const (
+ LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR
+ LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY
+ LoFlagsPartScan = C.LO_FLAGS_PARTSCAN
+ LoKeySize = C.LO_KEY_SIZE
+ LoNameSize = C.LO_NAME_SIZE
+)
diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback.go b/vendor/github.com/containers/storage/pkg/loopback/loopback.go
new file mode 100644
index 000000000..a8ec3c616
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/loopback/loopback.go
@@ -0,0 +1,63 @@
+// +build linux
+
+package loopback
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+
+ "github.com/sirupsen/logrus"
+)
+
+func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
+ loopInfo, err := ioctlLoopGetStatus64(file.Fd())
+ if err != nil {
+ logrus.Errorf("Error get loopback backing file: %s", err)
+ return 0, 0, ErrGetLoopbackBackingFile
+ }
+ return loopInfo.loDevice, loopInfo.loInode, nil
+}
+
+// SetCapacity reloads the size for the loopback device.
+func SetCapacity(file *os.File) error {
+ if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil {
+ logrus.Errorf("Error loopbackSetCapacity: %s", err)
+ return ErrSetCapacity
+ }
+ return nil
+}
+
+// FindLoopDeviceFor returns a loopback device file for the specified file which
+// is backing file of a loop back device.
+func FindLoopDeviceFor(file *os.File) *os.File {
+ stat, err := file.Stat()
+ if err != nil {
+ return nil
+ }
+ targetInode := stat.Sys().(*syscall.Stat_t).Ino
+ targetDevice := stat.Sys().(*syscall.Stat_t).Dev
+
+ for i := 0; true; i++ {
+ path := fmt.Sprintf("/dev/loop%d", i)
+
+ file, err := os.OpenFile(path, os.O_RDWR, 0)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+
+ // Ignore all errors until the first not-exist
+ // we want to continue looking for the file
+ continue
+ }
+
+ dev, inode, err := getLoopbackBackingFile(file)
+ if err == nil && dev == targetDevice && inode == targetInode {
+ return file
+ }
+ file.Close()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/mount/flags.go b/vendor/github.com/containers/storage/pkg/mount/flags.go
new file mode 100644
index 000000000..607dbed43
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/flags.go
@@ -0,0 +1,149 @@
+package mount
+
+import (
+ "fmt"
+ "strings"
+)
+
+var flags = map[string]struct {
+ clear bool
+ flag int
+}{
+ "defaults": {false, 0},
+ "ro": {false, RDONLY},
+ "rw": {true, RDONLY},
+ "suid": {true, NOSUID},
+ "nosuid": {false, NOSUID},
+ "dev": {true, NODEV},
+ "nodev": {false, NODEV},
+ "exec": {true, NOEXEC},
+ "noexec": {false, NOEXEC},
+ "sync": {false, SYNCHRONOUS},
+ "async": {true, SYNCHRONOUS},
+ "dirsync": {false, DIRSYNC},
+ "remount": {false, REMOUNT},
+ "mand": {false, MANDLOCK},
+ "nomand": {true, MANDLOCK},
+ "atime": {true, NOATIME},
+ "noatime": {false, NOATIME},
+ "diratime": {true, NODIRATIME},
+ "nodiratime": {false, NODIRATIME},
+ "bind": {false, BIND},
+ "rbind": {false, RBIND},
+ "unbindable": {false, UNBINDABLE},
+ "runbindable": {false, RUNBINDABLE},
+ "private": {false, PRIVATE},
+ "rprivate": {false, RPRIVATE},
+ "shared": {false, SHARED},
+ "rshared": {false, RSHARED},
+ "slave": {false, SLAVE},
+ "rslave": {false, RSLAVE},
+ "relatime": {false, RELATIME},
+ "norelatime": {true, RELATIME},
+ "strictatime": {false, STRICTATIME},
+ "nostrictatime": {true, STRICTATIME},
+}
+
+var validFlags = map[string]bool{
+ "": true,
+ "size": true,
+ "mode": true,
+ "uid": true,
+ "gid": true,
+ "nr_inodes": true,
+ "nr_blocks": true,
+ "mpol": true,
+}
+
+var propagationFlags = map[string]bool{
+ "bind": true,
+ "rbind": true,
+ "unbindable": true,
+ "runbindable": true,
+ "private": true,
+ "rprivate": true,
+ "shared": true,
+ "rshared": true,
+ "slave": true,
+ "rslave": true,
+}
+
+// MergeTmpfsOptions merge mount options to make sure there is no duplicate.
+func MergeTmpfsOptions(options []string) ([]string, error) {
+ // We use collisions maps to remove duplicates.
+ // For flag, the key is the flag value (the key for propagation flag is -1)
+ // For data=value, the key is the data
+ flagCollisions := map[int]bool{}
+ dataCollisions := map[string]bool{}
+
+ var newOptions []string
+ // We process in reverse order
+ for i := len(options) - 1; i >= 0; i-- {
+ option := options[i]
+ if option == "defaults" {
+ continue
+ }
+ if f, ok := flags[option]; ok && f.flag != 0 {
+ // There is only one propagation mode
+ key := f.flag
+ if propagationFlags[option] {
+ key = -1
+ }
+ // Check to see if there is collision for flag
+ if !flagCollisions[key] {
+ // We prepend the option and add to collision map
+ newOptions = append([]string{option}, newOptions...)
+ flagCollisions[key] = true
+ }
+ continue
+ }
+ opt := strings.SplitN(option, "=", 2)
+ if len(opt) != 2 || !validFlags[opt[0]] {
+ return nil, fmt.Errorf("Invalid tmpfs option %q", opt)
+ }
+ if !dataCollisions[opt[0]] {
+ // We prepend the option and add to collision map
+ newOptions = append([]string{option}, newOptions...)
+ dataCollisions[opt[0]] = true
+ }
+ }
+
+ return newOptions, nil
+}
+
+// Parse fstab type mount options into mount() flags
+// and device specific data
+func parseOptions(options string) (int, string) {
+ var (
+ flag int
+ data []string
+ )
+
+ for _, o := range strings.Split(options, ",") {
+ // If the option does not exist in the flags table or the flag
+ // is not supported on the platform,
+ // then it is a data value for a specific fs type
+ if f, exists := flags[o]; exists && f.flag != 0 {
+ if f.clear {
+ flag &= ^f.flag
+ } else {
+ flag |= f.flag
+ }
+ } else {
+ data = append(data, o)
+ }
+ }
+ return flag, strings.Join(data, ",")
+}
+
+// ParseTmpfsOptions parse fstab type mount options into flags and data
+func ParseTmpfsOptions(options string) (int, string, error) {
+ flags, data := parseOptions(options)
+ for _, o := range strings.Split(data, ",") {
+ opt := strings.SplitN(o, "=", 2)
+ if !validFlags[opt[0]] {
+ return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt)
+ }
+ }
+ return flags, data, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go
new file mode 100644
index 000000000..5f76f331b
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go
@@ -0,0 +1,49 @@
+// +build freebsd,cgo
+
+package mount
+
+/*
+#include <sys/mount.h>
+*/
+import "C"
+
+const (
+ // RDONLY will mount the filesystem as read-only.
+ RDONLY = C.MNT_RDONLY
+
+ // NOSUID will not allow set-user-identifier or set-group-identifier bits to
+ // take effect.
+ NOSUID = C.MNT_NOSUID
+
+ // NOEXEC will not allow execution of any binaries on the mounted file system.
+ NOEXEC = C.MNT_NOEXEC
+
+ // SYNCHRONOUS will allow any I/O to the file system to be done synchronously.
+ SYNCHRONOUS = C.MNT_SYNCHRONOUS
+
+ // NOATIME will not update the file access time when reading from a file.
+ NOATIME = C.MNT_NOATIME
+)
+
+// These flags are unsupported.
+const (
+ BIND = 0
+ DIRSYNC = 0
+ MANDLOCK = 0
+ NODEV = 0
+ NODIRATIME = 0
+ UNBINDABLE = 0
+ RUNBINDABLE = 0
+ PRIVATE = 0
+ RPRIVATE = 0
+ SHARED = 0
+ RSHARED = 0
+ SLAVE = 0
+ RSLAVE = 0
+ RBIND = 0
+ RELATIVE = 0
+ RELATIME = 0
+ REMOUNT = 0
+ STRICTATIME = 0
+ mntDetach = 0
+)
diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_linux.go b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go
new file mode 100644
index 000000000..0425d0dd6
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go
@@ -0,0 +1,87 @@
+package mount
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // RDONLY will mount the file system read-only.
+ RDONLY = unix.MS_RDONLY
+
+ // NOSUID will not allow set-user-identifier or set-group-identifier bits to
+ // take effect.
+ NOSUID = unix.MS_NOSUID
+
+ // NODEV will not interpret character or block special devices on the file
+ // system.
+ NODEV = unix.MS_NODEV
+
+ // NOEXEC will not allow execution of any binaries on the mounted file system.
+ NOEXEC = unix.MS_NOEXEC
+
+ // SYNCHRONOUS will allow I/O to the file system to be done synchronously.
+ SYNCHRONOUS = unix.MS_SYNCHRONOUS
+
+ // DIRSYNC will force all directory updates within the file system to be done
+ // synchronously. This affects the following system calls: create, link,
+ // unlink, symlink, mkdir, rmdir, mknod and rename.
+ DIRSYNC = unix.MS_DIRSYNC
+
+ // REMOUNT will attempt to remount an already-mounted file system. This is
+ // commonly used to change the mount flags for a file system, especially to
+ // make a readonly file system writeable. It does not change device or mount
+ // point.
+ REMOUNT = unix.MS_REMOUNT
+
+ // MANDLOCK will force mandatory locks on a filesystem.
+ MANDLOCK = unix.MS_MANDLOCK
+
+ // NOATIME will not update the file access time when reading from a file.
+ NOATIME = unix.MS_NOATIME
+
+ // NODIRATIME will not update the directory access time.
+ NODIRATIME = unix.MS_NODIRATIME
+
+ // BIND remounts a subtree somewhere else.
+ BIND = unix.MS_BIND
+
+ // RBIND remounts a subtree and all possible submounts somewhere else.
+ RBIND = unix.MS_BIND | unix.MS_REC
+
+ // UNBINDABLE creates a mount which cannot be cloned through a bind operation.
+ UNBINDABLE = unix.MS_UNBINDABLE
+
+ // RUNBINDABLE marks the entire mount tree as UNBINDABLE.
+ RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC
+
+ // PRIVATE creates a mount which carries no propagation abilities.
+ PRIVATE = unix.MS_PRIVATE
+
+ // RPRIVATE marks the entire mount tree as PRIVATE.
+ RPRIVATE = unix.MS_PRIVATE | unix.MS_REC
+
+ // SLAVE creates a mount which receives propagation from its master, but not
+ // vice versa.
+ SLAVE = unix.MS_SLAVE
+
+ // RSLAVE marks the entire mount tree as SLAVE.
+ RSLAVE = unix.MS_SLAVE | unix.MS_REC
+
+ // SHARED creates a mount which provides the ability to create mirrors of
+ // that mount such that mounts and unmounts within any of the mirrors
+ // propagate to the other mirrors.
+ SHARED = unix.MS_SHARED
+
+ // RSHARED marks the entire mount tree as SHARED.
+ RSHARED = unix.MS_SHARED | unix.MS_REC
+
+ // RELATIME updates inode access times relative to modify or change time.
+ RELATIME = unix.MS_RELATIME
+
+ // STRICTATIME allows to explicitly request full atime updates. This makes
+ // it possible for the kernel to default to relatime or noatime but still
+ // allow userspace to override it.
+ STRICTATIME = unix.MS_STRICTATIME
+
+ mntDetach = unix.MNT_DETACH
+)
diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go
new file mode 100644
index 000000000..9ed741e3f
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go
@@ -0,0 +1,31 @@
+// +build !linux,!freebsd freebsd,!cgo solaris,!cgo
+
+package mount
+
+// These flags are unsupported.
+const (
+ BIND = 0
+ DIRSYNC = 0
+ MANDLOCK = 0
+ NOATIME = 0
+ NODEV = 0
+ NODIRATIME = 0
+ NOEXEC = 0
+ NOSUID = 0
+ UNBINDABLE = 0
+ RUNBINDABLE = 0
+ PRIVATE = 0
+ RPRIVATE = 0
+ SHARED = 0
+ RSHARED = 0
+ SLAVE = 0
+ RSLAVE = 0
+ RBIND = 0
+ RELATIME = 0
+ RELATIVE = 0
+ REMOUNT = 0
+ STRICTATIME = 0
+ SYNCHRONOUS = 0
+ RDONLY = 0
+ mntDetach = 0
+)
diff --git a/vendor/github.com/containers/storage/pkg/mount/mount.go b/vendor/github.com/containers/storage/pkg/mount/mount.go
new file mode 100644
index 000000000..d3caa16bd
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/mount.go
@@ -0,0 +1,106 @@
+package mount
+
+import (
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/containers/storage/pkg/fileutils"
+)
+
+// GetMounts retrieves a list of mounts for the current running process.
+func GetMounts() ([]*Info, error) {
+ return parseMountTable()
+}
+
+// Mounted determines if a specified mountpoint has been mounted.
+// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab.
+func Mounted(mountpoint string) (bool, error) {
+ entries, err := parseMountTable()
+ if err != nil {
+ return false, err
+ }
+
+ mountpoint, err = fileutils.ReadSymlinkedDirectory(mountpoint)
+ if err != nil {
+ return false, err
+ }
+ // Search the table for the mountpoint
+ for _, e := range entries {
+ if e.Mountpoint == mountpoint {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// Mount will mount filesystem according to the specified configuration, on the
+// condition that the target path is *not* already mounted. Options must be
+// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
+// flags.go for supported option flags.
+func Mount(device, target, mType, options string) error {
+ flag, _ := parseOptions(options)
+ if flag&REMOUNT != REMOUNT {
+ if mounted, err := Mounted(target); err != nil || mounted {
+ return err
+ }
+ }
+ return ForceMount(device, target, mType, options)
+}
+
+// ForceMount will mount a filesystem according to the specified configuration,
+// *regardless* if the target path is not already mounted. Options must be
+// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
+// flags.go for supported option flags.
+func ForceMount(device, target, mType, options string) error {
+ flag, data := parseOptions(options)
+ return mount(device, target, mType, uintptr(flag), data)
+}
+
+// Unmount lazily unmounts a filesystem on supported platforms, otherwise
+// does a normal unmount.
+func Unmount(target string) error {
+ if mounted, err := Mounted(target); err != nil || !mounted {
+ return err
+ }
+ return ForceUnmount(target)
+}
+
+// RecursiveUnmount unmounts the target and all mounts underneath, starting with
+// the deepsest mount first.
+func RecursiveUnmount(target string) error {
+ mounts, err := GetMounts()
+ if err != nil {
+ return err
+ }
+
+ // Make the deepest mount be first
+ sort.Sort(sort.Reverse(byMountpoint(mounts)))
+
+ for i, m := range mounts {
+ if !strings.HasPrefix(m.Mountpoint, target) {
+ continue
+ }
+ if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 {
+ if mounted, err := Mounted(m.Mountpoint); err != nil || mounted {
+ return err
+ }
+ // Ignore errors for submounts and continue trying to unmount others
+ // The final unmount should fail if there ane any submounts remaining
+ }
+ }
+ return nil
+}
+
+// ForceUnmount will force an unmount of the target filesystem, regardless if
+// it is mounted or not.
+func ForceUnmount(target string) (err error) {
+ // Simple retry logic for unmount
+ for i := 0; i < 10; i++ {
+ if err = unmount(target, 0); err == nil {
+ return nil
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
new file mode 100644
index 000000000..814896cc9
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
@@ -0,0 +1,60 @@
+package mount
+
+/*
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/_iovec.h>
+#include <sys/mount.h>
+#include <sys/param.h>
+*/
+import "C"
+
+import (
+ "fmt"
+ "strings"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+func allocateIOVecs(options []string) []C.struct_iovec {
+ out := make([]C.struct_iovec, len(options))
+ for i, option := range options {
+ out[i].iov_base = unsafe.Pointer(C.CString(option))
+ out[i].iov_len = C.size_t(len(option) + 1)
+ }
+ return out
+}
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ isNullFS := false
+
+ xs := strings.Split(data, ",")
+ for _, x := range xs {
+ if x == "bind" {
+ isNullFS = true
+ }
+ }
+
+ options := []string{"fspath", target}
+ if isNullFS {
+ options = append(options, "fstype", "nullfs", "target", device)
+ } else {
+ options = append(options, "fstype", mType, "from", device)
+ }
+ rawOptions := allocateIOVecs(options)
+ for _, rawOption := range rawOptions {
+ defer C.free(rawOption.iov_base)
+ }
+
+ if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
+ reason := C.GoString(C.strerror(*C.__error()))
+ return fmt.Errorf("Failed to call nmount: %s", reason)
+ }
+ return nil
+}
+
+func unmount(target string, flag int) error {
+ return unix.Unmount(target, flag)
+}
diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go
new file mode 100644
index 000000000..39c36d472
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go
@@ -0,0 +1,57 @@
+package mount
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // ptypes is the set propagation types.
+ ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE
+
+ // pflags is the full set valid flags for a change propagation call.
+ pflags = ptypes | unix.MS_REC | unix.MS_SILENT
+
+ // broflags is the combination of bind and read only
+ broflags = unix.MS_BIND | unix.MS_RDONLY
+)
+
+// isremount returns true if either device name or flags identify a remount request, false otherwise.
+func isremount(device string, flags uintptr) bool {
+ switch {
+ // We treat device "" and "none" as a remount request to provide compatibility with
+ // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts.
+ case flags&unix.MS_REMOUNT != 0, device == "", device == "none":
+ return true
+ default:
+ return false
+ }
+}
+
+func mount(device, target, mType string, flags uintptr, data string) error {
+ oflags := flags &^ ptypes
+ if !isremount(device, flags) || data != "" {
+ // Initial call applying all non-propagation flags for mount
+ // or remount with changed data
+ if err := unix.Mount(device, target, mType, oflags, data); err != nil {
+ return err
+ }
+ }
+
+ if flags&ptypes != 0 {
+ // Change the propagation type.
+ if err := unix.Mount("", target, "", flags&pflags, ""); err != nil {
+ return err
+ }
+ }
+
+ if oflags&broflags == broflags {
+ // Remount the bind to apply read only.
+ return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "")
+ }
+
+ return nil
+}
+
+func unmount(target string, flag int) error {
+ return unix.Unmount(target, flag)
+}
diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go b/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go
new file mode 100644
index 000000000..48b86771e
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go
@@ -0,0 +1,34 @@
+// +build solaris,cgo
+
+package mount
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// #include <stdlib.h>
+// #include <stdio.h>
+// #include <sys/mount.h>
+// int Mount(const char *spec, const char *dir, int mflag,
+// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) {
+// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen);
+// }
+import "C"
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ spec := C.CString(device)
+ dir := C.CString(target)
+ fstype := C.CString(mType)
+ _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0)
+ C.free(unsafe.Pointer(spec))
+ C.free(unsafe.Pointer(dir))
+ C.free(unsafe.Pointer(fstype))
+ return err
+}
+
+func unmount(target string, flag int) error {
+ err := unix.Unmount(target, flag)
+ return err
+}
diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go
new file mode 100644
index 000000000..a2a3bb457
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go
@@ -0,0 +1,11 @@
+// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
+
+package mount
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ panic("Not implemented")
+}
+
+func unmount(target string, flag int) error {
+ panic("Not implemented")
+}
diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go
new file mode 100644
index 000000000..ff4cc1d86
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go
@@ -0,0 +1,54 @@
+package mount
+
+// Info reveals information about a particular mounted filesystem. This
+// struct is populated from the content in the /proc/<pid>/mountinfo file.
+type Info struct {
+ // ID is a unique identifier of the mount (may be reused after umount).
+ ID int
+
+ // Parent indicates the ID of the mount parent (or of self for the top of the
+ // mount tree).
+ Parent int
+
+ // Major indicates one half of the device ID which identifies the device class.
+ Major int
+
+ // Minor indicates one half of the device ID which identifies a specific
+ // instance of device.
+ Minor int
+
+ // Root of the mount within the filesystem.
+ Root string
+
+ // Mountpoint indicates the mount point relative to the process's root.
+ Mountpoint string
+
+ // Opts represents mount-specific options.
+ Opts string
+
+ // Optional represents optional fields.
+ Optional string
+
+ // Fstype indicates the type of filesystem, such as EXT3.
+ Fstype string
+
+ // Source indicates filesystem specific information or "none".
+ Source string
+
+ // VfsOpts represents per super block options.
+ VfsOpts string
+}
+
+type byMountpoint []*Info
+
+func (by byMountpoint) Len() int {
+ return len(by)
+}
+
+func (by byMountpoint) Less(i, j int) bool {
+ return by[i].Mountpoint < by[j].Mountpoint
+}
+
+func (by byMountpoint) Swap(i, j int) {
+ by[i], by[j] = by[j], by[i]
+}
diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go
new file mode 100644
index 000000000..4f32edcd9
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go
@@ -0,0 +1,41 @@
+package mount
+
+/*
+#include <sys/param.h>
+#include <sys/ucred.h>
+#include <sys/mount.h>
+*/
+import "C"
+
+import (
+ "fmt"
+ "reflect"
+ "unsafe"
+)
+
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
+// bind mounts.
+func parseMountTable() ([]*Info, error) {
+ var rawEntries *C.struct_statfs
+
+ count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
+ if count == 0 {
+ return nil, fmt.Errorf("Failed to call getmntinfo")
+ }
+
+ var entries []C.struct_statfs
+ header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
+ header.Cap = count
+ header.Len = count
+ header.Data = uintptr(unsafe.Pointer(rawEntries))
+
+ var out []*Info
+ for _, entry := range entries {
+ var mountinfo Info
+ mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
+ mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
+ mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
+ out = append(out, &mountinfo)
+ }
+ return out, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go
new file mode 100644
index 000000000..be69fee1d
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go
@@ -0,0 +1,95 @@
+// +build linux
+
+package mount
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+const (
+ /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
+ (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
+
+ (1) mount ID: unique identifier of the mount (may be reused after umount)
+ (2) parent ID: ID of parent (or of self for the top of the mount tree)
+ (3) major:minor: value of st_dev for files on filesystem
+ (4) root: root of the mount within the filesystem
+ (5) mount point: mount point relative to the process's root
+ (6) mount options: per mount options
+ (7) optional fields: zero or more fields of the form "tag[:value]"
+ (8) separator: marks the end of the optional fields
+ (9) filesystem type: name of filesystem of the form "type[.subtype]"
+ (10) mount source: filesystem specific information or "none"
+ (11) super options: per super block options*/
+ mountinfoFormat = "%d %d %d:%d %s %s %s %s"
+)
+
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
+// bind mounts
+func parseMountTable() ([]*Info, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseInfoFile(f)
+}
+
+func parseInfoFile(r io.Reader) ([]*Info, error) {
+ var (
+ s = bufio.NewScanner(r)
+ out = []*Info{}
+ )
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ var (
+ p = &Info{}
+ text = s.Text()
+ optionalFields string
+ )
+
+ if _, err := fmt.Sscanf(text, mountinfoFormat,
+ &p.ID, &p.Parent, &p.Major, &p.Minor,
+ &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil {
+ return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
+ }
+ // Safe as mountinfo encodes mountpoints with spaces as \040.
+ index := strings.Index(text, " - ")
+ postSeparatorFields := strings.Fields(text[index+3:])
+ if len(postSeparatorFields) < 3 {
+ return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
+ }
+
+ if optionalFields != "-" {
+ p.Optional = optionalFields
+ }
+
+ p.Fstype = postSeparatorFields[0]
+ p.Source = postSeparatorFields[1]
+ p.VfsOpts = strings.Join(postSeparatorFields[2:], " ")
+ out = append(out, p)
+ }
+ return out, nil
+}
+
+// PidMountInfo collects the mounts for a specific process ID. If the process
+// ID is unknown, it is better to use `GetMounts` which will inspect
+// "/proc/self/mountinfo" instead.
+func PidMountInfo(pid int) ([]*Info, error) {
+ f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseInfoFile(f)
+}
diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_solaris.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_solaris.go
new file mode 100644
index 000000000..ad9ab57f8
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_solaris.go
@@ -0,0 +1,37 @@
+// +build solaris,cgo
+
+package mount
+
+/*
+#include <stdio.h>
+#include <sys/mnttab.h>
+*/
+import "C"
+
+import (
+ "fmt"
+)
+
+func parseMountTable() ([]*Info, error) {
+ mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r"))
+ if mnttab == nil {
+ return nil, fmt.Errorf("Failed to open %s", C.MNTTAB)
+ }
+
+ var out []*Info
+ var mp C.struct_mnttab
+
+ ret := C.getmntent(mnttab, &mp)
+ for ret == 0 {
+ var mountinfo Info
+ mountinfo.Mountpoint = C.GoString(mp.mnt_mountp)
+ mountinfo.Source = C.GoString(mp.mnt_special)
+ mountinfo.Fstype = C.GoString(mp.mnt_fstype)
+ mountinfo.Opts = C.GoString(mp.mnt_mntopts)
+ out = append(out, &mountinfo)
+ ret = C.getmntent(mnttab, &mp)
+ }
+
+ C.fclose(mnttab)
+ return out, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go
new file mode 100644
index 000000000..7fbcf1921
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go
@@ -0,0 +1,12 @@
+// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
+
+package mount
+
+import (
+ "fmt"
+ "runtime"
+)
+
+func parseMountTable() ([]*Info, error) {
+ return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_windows.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_windows.go
new file mode 100644
index 000000000..dab8a37ed
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_windows.go
@@ -0,0 +1,6 @@
+package mount
+
+func parseMountTable() ([]*Info, error) {
+ // Do NOT return an error!
+ return nil, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go
new file mode 100644
index 000000000..8ceec84bc
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go
@@ -0,0 +1,69 @@
+// +build linux
+
+package mount
+
+// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "shared")
+}
+
+// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rshared")
+}
+
+// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakePrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "private")
+}
+
+// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeRPrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rprivate")
+}
+
+// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "slave")
+}
+
+// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rslave")
+}
+
+// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "unbindable")
+}
+
+// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
+// option enabled. See the supported options in flags.go for further reference.
+func MakeRUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "runbindable")
+}
+
+func ensureMountedAs(mountPoint, options string) error {
+ mounted, err := Mounted(mountPoint)
+ if err != nil {
+ return err
+ }
+
+ if !mounted {
+ if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil {
+ return err
+ }
+ }
+ if _, err = Mounted(mountPoint); err != nil {
+ return err
+ }
+
+ return ForceMount("", mountPoint, "none", options)
+}
diff --git a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go
new file mode 100644
index 000000000..09f6b03cb
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go
@@ -0,0 +1,58 @@
+// +build solaris
+
+package mount
+
+// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "shared")
+}
+
+// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rshared")
+}
+
+// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakePrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "private")
+}
+
+// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeRPrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rprivate")
+}
+
+// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "slave")
+}
+
+// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rslave")
+}
+
+// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "unbindable")
+}
+
+// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
+// option enabled. See the supported options in flags.go for further reference.
+func MakeRUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "runbindable")
+}
+
+func ensureMountedAs(mountPoint, options string) error {
+ // TODO: Solaris does not support bind mounts.
+ // Evaluate lofs and also look at the relevant
+ // mount flags to be supported.
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go
new file mode 100644
index 000000000..7738fc741
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go
@@ -0,0 +1,74 @@
+// +build !windows
+
+// Package kernel provides helper function to get, parse and compare kernel
+// versions for different platforms.
+package kernel
+
+import (
+ "errors"
+ "fmt"
+)
+
+// VersionInfo holds information about the kernel.
+type VersionInfo struct {
+ Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4)
+ Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1)
+ Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2)
+ Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic)
+}
+
+func (k *VersionInfo) String() string {
+ return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor)
+}
+
+// CompareKernelVersion compares two kernel.VersionInfo structs.
+// Returns -1 if a < b, 0 if a == b, 1 it a > b
+func CompareKernelVersion(a, b VersionInfo) int {
+ if a.Kernel < b.Kernel {
+ return -1
+ } else if a.Kernel > b.Kernel {
+ return 1
+ }
+
+ if a.Major < b.Major {
+ return -1
+ } else if a.Major > b.Major {
+ return 1
+ }
+
+ if a.Minor < b.Minor {
+ return -1
+ } else if a.Minor > b.Minor {
+ return 1
+ }
+
+ return 0
+}
+
+// ParseRelease parses a string and creates a VersionInfo based on it.
+func ParseRelease(release string) (*VersionInfo, error) {
+ var (
+ kernel, major, minor, parsed int
+ flavor, partial string
+ )
+
+ // Ignore error from Sscanf to allow an empty flavor. Instead, just
+ // make sure we got all the version numbers.
+ parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial)
+ if parsed < 2 {
+ return nil, errors.New("Can't parse kernel version " + release)
+ }
+
+ // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64
+ parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor)
+ if parsed < 1 {
+ flavor = partial
+ }
+
+ return &VersionInfo{
+ Kernel: kernel,
+ Major: major,
+ Minor: minor,
+ Flavor: flavor,
+ }, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go
new file mode 100644
index 000000000..71f205b28
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go
@@ -0,0 +1,56 @@
+// +build darwin
+
+// Package kernel provides helper function to get, parse and compare kernel
+// versions for different platforms.
+package kernel
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+
+ "github.com/mattn/go-shellwords"
+)
+
+// GetKernelVersion gets the current kernel version.
+func GetKernelVersion() (*VersionInfo, error) {
+ release, err := getRelease()
+ if err != nil {
+ return nil, err
+ }
+
+ return ParseRelease(release)
+}
+
+// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version
+func getRelease() (string, error) {
+ cmd := exec.Command("system_profiler", "SPSoftwareDataType")
+ osName, err := cmd.Output()
+ if err != nil {
+ return "", err
+ }
+
+ var release string
+ data := strings.Split(string(osName), "\n")
+ for _, line := range data {
+ if strings.Contains(line, "Kernel Version") {
+ // It has the format like ' Kernel Version: Darwin 14.5.0'
+ content := strings.SplitN(line, ":", 2)
+ if len(content) != 2 {
+ return "", fmt.Errorf("Kernel Version is invalid")
+ }
+
+ prettyNames, err := shellwords.Parse(content[1])
+ if err != nil {
+ return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error())
+ }
+
+ if len(prettyNames) != 2 {
+ return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ")
+ }
+ release = prettyNames[1]
+ }
+ }
+
+ return release, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go
new file mode 100644
index 000000000..76e1e499f
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go
@@ -0,0 +1,45 @@
+// +build linux freebsd solaris openbsd
+
+// Package kernel provides helper function to get, parse and compare kernel
+// versions for different platforms.
+package kernel
+
+import (
+ "bytes"
+
+ "github.com/sirupsen/logrus"
+)
+
+// GetKernelVersion gets the current kernel version.
+func GetKernelVersion() (*VersionInfo, error) {
+ uts, err := uname()
+ if err != nil {
+ return nil, err
+ }
+
+ release := make([]byte, len(uts.Release))
+
+ i := 0
+ for _, c := range uts.Release {
+ release[i] = byte(c)
+ i++
+ }
+
+ // Remove the \x00 from the release for Atoi to parse correctly
+ release = release[:bytes.IndexByte(release, 0)]
+
+ return ParseRelease(string(release))
+}
+
+// CheckKernelVersion checks if current kernel is newer than (or equal to)
+// the given version.
+func CheckKernelVersion(k, major, minor int) bool {
+ if v, err := GetKernelVersion(); err != nil {
+ logrus.Warnf("error getting kernel version: %s", err)
+ } else {
+ if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go
new file mode 100644
index 000000000..e59867277
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go
@@ -0,0 +1,70 @@
+// +build windows
+
+package kernel
+
+import (
+ "fmt"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+// VersionInfo holds information about the kernel.
+type VersionInfo struct {
+ kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6)
+ major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1)
+ minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601)
+ build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592)
+}
+
+func (k *VersionInfo) String() string {
+ return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi)
+}
+
+// GetKernelVersion gets the current kernel version.
+func GetKernelVersion() (*VersionInfo, error) {
+
+ var (
+ h windows.Handle
+ dwVersion uint32
+ err error
+ )
+
+ KVI := &VersionInfo{"Unknown", 0, 0, 0}
+
+ if err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE,
+ windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`),
+ 0,
+ windows.KEY_READ,
+ &h); err != nil {
+ return KVI, err
+ }
+ defer windows.RegCloseKey(h)
+
+ var buf [1 << 10]uint16
+ var typ uint32
+ n := uint32(len(buf) * 2) // api expects array of bytes, not uint16
+
+ if err = windows.RegQueryValueEx(h,
+ windows.StringToUTF16Ptr("BuildLabEx"),
+ nil,
+ &typ,
+ (*byte)(unsafe.Pointer(&buf[0])),
+ &n); err != nil {
+ return KVI, err
+ }
+
+ KVI.kvi = windows.UTF16ToString(buf[:])
+
+ // Important - docker.exe MUST be manifested for this API to return
+ // the correct information.
+ if dwVersion, err = windows.GetVersion(); err != nil {
+ return KVI, err
+ }
+
+ KVI.major = int(dwVersion & 0xFF)
+ KVI.minor = int((dwVersion & 0XFF00) >> 8)
+ KVI.build = int((dwVersion & 0xFFFF0000) >> 16)
+
+ return KVI, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go
new file mode 100644
index 000000000..e913fad00
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go
@@ -0,0 +1,17 @@
+package kernel
+
+import "golang.org/x/sys/unix"
+
+// Utsname represents the system name structure.
+// It is passthrough for unix.Utsname in order to make it portable with
+// other platforms where it is not available.
+type Utsname unix.Utsname
+
+func uname() (*unix.Utsname, error) {
+ uts := &unix.Utsname{}
+
+ if err := unix.Uname(uts); err != nil {
+ return nil, err
+ }
+ return uts, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go
new file mode 100644
index 000000000..49370bd3d
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go
@@ -0,0 +1,14 @@
+package kernel
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+func uname() (*unix.Utsname, error) {
+ uts := &unix.Utsname{}
+
+ if err := unix.Uname(uts); err != nil {
+ return nil, err
+ }
+ return uts, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go
new file mode 100644
index 000000000..1da3f239f
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go
@@ -0,0 +1,18 @@
+// +build !linux,!solaris
+
+package kernel
+
+import (
+ "errors"
+)
+
+// Utsname represents the system name structure.
+// It is defined here to make it portable as it is available on linux but not
+// on windows.
+type Utsname struct {
+ Release [65]byte
+}
+
+func uname() (*Utsname, error) {
+ return nil, errors.New("Kernel version detection is available only on linux")
+}
diff --git a/vendor/github.com/containers/storage/pkg/parsers/parsers.go b/vendor/github.com/containers/storage/pkg/parsers/parsers.go
new file mode 100644
index 000000000..acc897168
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/parsers/parsers.go
@@ -0,0 +1,69 @@
+// Package parsers provides helper functions to parse and validate different type
+// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel
+// operating system versions.
+package parsers
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value)
+func ParseKeyValueOpt(opt string) (string, string, error) {
+ parts := strings.SplitN(opt, "=", 2)
+ if len(parts) != 2 {
+ return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt)
+ }
+ return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
+}
+
+// ParseUintList parses and validates the specified string as the value
+// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be
+// one of the formats below. Note that duplicates are actually allowed in the
+// input string. It returns a `map[int]bool` with available elements from `val`
+// set to `true`.
+// Supported formats:
+// 7
+// 1-6
+// 0,3-4,7,8-10
+// 0-0,0,1-7
+// 03,1-3 <- this is gonna get parsed as [1,2,3]
+// 3,2,1
+// 0-2,3,1
+func ParseUintList(val string) (map[int]bool, error) {
+ if val == "" {
+ return map[int]bool{}, nil
+ }
+
+ availableInts := make(map[int]bool)
+ split := strings.Split(val, ",")
+ errInvalidFormat := fmt.Errorf("invalid format: %s", val)
+
+ for _, r := range split {
+ if !strings.Contains(r, "-") {
+ v, err := strconv.Atoi(r)
+ if err != nil {
+ return nil, errInvalidFormat
+ }
+ availableInts[v] = true
+ } else {
+ split := strings.SplitN(r, "-", 2)
+ min, err := strconv.Atoi(split[0])
+ if err != nil {
+ return nil, errInvalidFormat
+ }
+ max, err := strconv.Atoi(split[1])
+ if err != nil {
+ return nil, errInvalidFormat
+ }
+ if max < min {
+ return nil, errInvalidFormat
+ }
+ for i := min; i <= max; i++ {
+ availableInts[i] = true
+ }
+ }
+ }
+ return availableInts, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/pools/pools.go b/vendor/github.com/containers/storage/pkg/pools/pools.go
new file mode 100644
index 000000000..a15e3688b
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/pools/pools.go
@@ -0,0 +1,119 @@
+// Package pools provides a collection of pools which provide various
+// data types with buffers. These can be used to lower the number of
+// memory allocations and reuse buffers.
+//
+// New pools should be added to this package to allow them to be
+// shared across packages.
+//
+// Utility functions which operate on pools should be added to this
+// package to allow them to be reused.
+package pools
+
+import (
+ "bufio"
+ "io"
+ "sync"
+
+ "github.com/containers/storage/pkg/ioutils"
+)
+
+var (
+ // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
+ BufioReader32KPool *BufioReaderPool
+ // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
+ BufioWriter32KPool *BufioWriterPool
+)
+
+const buffer32K = 32 * 1024
+
+// BufioReaderPool is a bufio reader that uses sync.Pool.
+type BufioReaderPool struct {
+ pool *sync.Pool
+}
+
+func init() {
+ BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
+ BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
+}
+
+// newBufioReaderPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
+ pool := &sync.Pool{
+ New: func() interface{} { return bufio.NewReaderSize(nil, size) },
+ }
+ return &BufioReaderPool{pool: pool}
+}
+
+// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
+func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
+ buf := bufPool.pool.Get().(*bufio.Reader)
+ buf.Reset(r)
+ return buf
+}
+
+// Put puts the bufio.Reader back into the pool.
+func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
+func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
+ buf := BufioReader32KPool.Get(src)
+ written, err = io.Copy(dst, buf)
+ BufioReader32KPool.Put(buf)
+ return
+}
+
+// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
+// into the pool and closes the reader if it's an io.ReadCloser.
+func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
+ return ioutils.NewReadCloserWrapper(r, func() error {
+ if readCloser, ok := r.(io.ReadCloser); ok {
+ readCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
+
+// BufioWriterPool is a bufio writer that uses sync.Pool.
+type BufioWriterPool struct {
+ pool *sync.Pool
+}
+
+// newBufioWriterPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
+ pool := &sync.Pool{
+ New: func() interface{} { return bufio.NewWriterSize(nil, size) },
+ }
+ return &BufioWriterPool{pool: pool}
+}
+
+// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
+func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
+ buf := bufPool.pool.Get().(*bufio.Writer)
+ buf.Reset(w)
+ return buf
+}
+
+// Put puts the bufio.Writer back into the pool.
+func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
+// into the pool and closes the writer if it's an io.Writecloser.
+func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
+ return ioutils.NewWriteCloserWrapper(w, func() error {
+ buf.Flush()
+ if writeCloser, ok := w.(io.WriteCloser); ok {
+ writeCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
diff --git a/vendor/github.com/containers/storage/pkg/promise/promise.go b/vendor/github.com/containers/storage/pkg/promise/promise.go
new file mode 100644
index 000000000..dd52b9082
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/promise/promise.go
@@ -0,0 +1,11 @@
+package promise
+
+// Go is a basic promise implementation: it wraps calls a function in a goroutine,
+// and returns a channel which will later return the function's return value.
+func Go(f func() error) chan error {
+ ch := make(chan error, 1)
+ go func() {
+ ch <- f()
+ }()
+ return ch
+}
diff --git a/vendor/github.com/containers/storage/pkg/reexec/README.md b/vendor/github.com/containers/storage/pkg/reexec/README.md
new file mode 100644
index 000000000..6658f69b6
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/reexec/README.md
@@ -0,0 +1,5 @@
+# reexec
+
+The `reexec` package facilitates the busybox style reexec of the docker binary that we require because
+of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of
+the exec of the binary will be used to find and execute custom init paths.
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
new file mode 100644
index 000000000..05319eacc
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
@@ -0,0 +1,30 @@
+// +build linux
+
+package reexec
+
+import (
+ "os/exec"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+// Self returns the path to the current process's binary.
+// Returns "/proc/self/exe".
+func Self() string {
+ return "/proc/self/exe"
+}
+
+// Command returns *exec.Cmd which has Path as current binary. Also it setting
+// SysProcAttr.Pdeathsig to SIGTERM.
+// This will use the in-memory version (/proc/self/exe) of the current binary,
+// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
+func Command(args ...string) *exec.Cmd {
+ return &exec.Cmd{
+ Path: Self(),
+ Args: args,
+ SysProcAttr: &syscall.SysProcAttr{
+ Pdeathsig: unix.SIGTERM,
+ },
+ }
+}
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go
new file mode 100644
index 000000000..778a720e3
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go
@@ -0,0 +1,23 @@
+// +build freebsd solaris darwin
+
+package reexec
+
+import (
+ "os/exec"
+)
+
+// Self returns the path to the current process's binary.
+// Uses os.Args[0].
+func Self() string {
+ return naiveSelf()
+}
+
+// Command returns *exec.Cmd which has Path as current binary.
+// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
+// be set to "/usr/bin/docker".
+func Command(args ...string) *exec.Cmd {
+ return &exec.Cmd{
+ Path: Self(),
+ Args: args,
+ }
+}
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go
new file mode 100644
index 000000000..76edd8242
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go
@@ -0,0 +1,12 @@
+// +build !linux,!windows,!freebsd,!solaris,!darwin
+
+package reexec
+
+import (
+ "os/exec"
+)
+
+// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin.
+func Command(args ...string) *exec.Cmd {
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go
new file mode 100644
index 000000000..ca871c422
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go
@@ -0,0 +1,23 @@
+// +build windows
+
+package reexec
+
+import (
+ "os/exec"
+)
+
+// Self returns the path to the current process's binary.
+// Uses os.Args[0].
+func Self() string {
+ return naiveSelf()
+}
+
+// Command returns *exec.Cmd which has Path as current binary.
+// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
+// be set to "C:\docker.exe".
+func Command(args ...string) *exec.Cmd {
+ return &exec.Cmd{
+ Path: Self(),
+ Args: args,
+ }
+}
diff --git a/vendor/github.com/containers/storage/pkg/reexec/reexec.go b/vendor/github.com/containers/storage/pkg/reexec/reexec.go
new file mode 100644
index 000000000..c56671d91
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/reexec/reexec.go
@@ -0,0 +1,47 @@
+package reexec
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+)
+
+var registeredInitializers = make(map[string]func())
+
+// Register adds an initialization func under the specified name
+func Register(name string, initializer func()) {
+ if _, exists := registeredInitializers[name]; exists {
+ panic(fmt.Sprintf("reexec func already registered under name %q", name))
+ }
+
+ registeredInitializers[name] = initializer
+}
+
+// Init is called as the first part of the exec process and returns true if an
+// initialization function was called.
+func Init() bool {
+ initializer, exists := registeredInitializers[os.Args[0]]
+ if exists {
+ initializer()
+
+ return true
+ }
+ return false
+}
+
+func naiveSelf() string {
+ name := os.Args[0]
+ if filepath.Base(name) == name {
+ if lp, err := exec.LookPath(name); err == nil {
+ return lp
+ }
+ }
+ // handle conversion of relative paths to absolute
+ if absName, err := filepath.Abs(name); err == nil {
+ return absName
+ }
+ // if we couldn't get absolute name, return original
+ // (NOTE: Go only errors on Abs() if os.Getwd fails)
+ return name
+}
diff --git a/vendor/github.com/containers/storage/pkg/stringid/README.md b/vendor/github.com/containers/storage/pkg/stringid/README.md
new file mode 100644
index 000000000..37a5098fd
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/stringid/README.md
@@ -0,0 +1 @@
+This package provides helper functions for dealing with string identifiers
diff --git a/vendor/github.com/containers/storage/pkg/stringid/stringid.go b/vendor/github.com/containers/storage/pkg/stringid/stringid.go
new file mode 100644
index 000000000..a0c7c42a0
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/stringid/stringid.go
@@ -0,0 +1,99 @@
+// Package stringid provides helper functions for dealing with string identifiers
+package stringid
+
+import (
+ cryptorand "crypto/rand"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "math/rand"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const shortLen = 12
+
+var (
+ validShortID = regexp.MustCompile("^[a-f0-9]{12}$")
+ validHex = regexp.MustCompile(`^[a-f0-9]{64}$`)
+)
+
+// IsShortID determines if an arbitrary string *looks like* a short ID.
+func IsShortID(id string) bool {
+ return validShortID.MatchString(id)
+}
+
+// TruncateID returns a shorthand version of a string identifier for convenience.
+// A collision with other shorthands is very unlikely, but possible.
+// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
+// will need to use a longer prefix, or the full-length Id.
+func TruncateID(id string) string {
+ if i := strings.IndexRune(id, ':'); i >= 0 {
+ id = id[i+1:]
+ }
+ if len(id) > shortLen {
+ id = id[:shortLen]
+ }
+ return id
+}
+
+func generateID(r io.Reader) string {
+ b := make([]byte, 32)
+ for {
+ if _, err := io.ReadFull(r, b); err != nil {
+ panic(err) // This shouldn't happen
+ }
+ id := hex.EncodeToString(b)
+ // if we try to parse the truncated for as an int and we don't have
+ // an error then the value is all numeric and causes issues when
+ // used as a hostname. ref #3869
+ if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil {
+ continue
+ }
+ return id
+ }
+}
+
+// GenerateRandomID returns a unique id.
+func GenerateRandomID() string {
+ return generateID(cryptorand.Reader)
+}
+
+// GenerateNonCryptoID generates unique id without using cryptographically
+// secure sources of random.
+// It helps you to save entropy.
+func GenerateNonCryptoID() string {
+ return generateID(readerFunc(rand.Read))
+}
+
+// ValidateID checks whether an ID string is a valid image ID.
+func ValidateID(id string) error {
+ if ok := validHex.MatchString(id); !ok {
+ return fmt.Errorf("image ID %q is invalid", id)
+ }
+ return nil
+}
+
+func init() {
+ // safely set the seed globally so we generate random ids. Tries to use a
+ // crypto seed before falling back to time.
+ var seed int64
+ if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil {
+ // This should not happen, but worst-case fallback to time-based seed.
+ seed = time.Now().UnixNano()
+ } else {
+ seed = cryptoseed.Int64()
+ }
+
+ rand.Seed(seed)
+}
+
+type readerFunc func(p []byte) (int, error)
+
+func (fn readerFunc) Read(p []byte) (int, error) {
+ return fn(p)
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes.go b/vendor/github.com/containers/storage/pkg/system/chtimes.go
new file mode 100644
index 000000000..056d19954
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/chtimes.go
@@ -0,0 +1,35 @@
+package system
+
+import (
+ "os"
+ "time"
+)
+
+// Chtimes changes the access time and modified time of a file at the given path
+func Chtimes(name string, atime time.Time, mtime time.Time) error {
+ unixMinTime := time.Unix(0, 0)
+ unixMaxTime := maxTime
+
+ // If the modified time is prior to the Unix Epoch, or after the
+ // end of Unix Time, os.Chtimes has undefined behavior
+ // default to Unix Epoch in this case, just in case
+
+ if atime.Before(unixMinTime) || atime.After(unixMaxTime) {
+ atime = unixMinTime
+ }
+
+ if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) {
+ mtime = unixMinTime
+ }
+
+ if err := os.Chtimes(name, atime, mtime); err != nil {
+ return err
+ }
+
+ // Take platform specific action for setting create time.
+ if err := setCTime(name, mtime); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go
new file mode 100644
index 000000000..09d58bcbf
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go
@@ -0,0 +1,14 @@
+// +build !windows
+
+package system
+
+import (
+ "time"
+)
+
+//setCTime will set the create time on a file. On Unix, the create
+//time is updated as a side effect of setting the modified time, so
+//no action is required.
+func setCTime(path string, ctime time.Time) error {
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go
new file mode 100644
index 000000000..45428c141
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go
@@ -0,0 +1,28 @@
+// +build windows
+
+package system
+
+import (
+ "time"
+
+ "golang.org/x/sys/windows"
+)
+
+//setCTime will set the create time on a file. On Windows, this requires
+//calling SetFileTime and explicitly including the create time.
+func setCTime(path string, ctime time.Time) error {
+ ctimespec := windows.NsecToTimespec(ctime.UnixNano())
+ pathp, e := windows.UTF16PtrFromString(path)
+ if e != nil {
+ return e
+ }
+ h, e := windows.CreateFile(pathp,
+ windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil,
+ windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0)
+ if e != nil {
+ return e
+ }
+ defer windows.Close(h)
+ c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec))
+ return windows.SetFileTime(h, &c, nil, nil)
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/errors.go b/vendor/github.com/containers/storage/pkg/system/errors.go
new file mode 100644
index 000000000..288318985
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/errors.go
@@ -0,0 +1,10 @@
+package system
+
+import (
+ "errors"
+)
+
+var (
+ // ErrNotSupportedPlatform means the platform is not supported.
+ ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
+)
diff --git a/vendor/github.com/containers/storage/pkg/system/exitcode.go b/vendor/github.com/containers/storage/pkg/system/exitcode.go
new file mode 100644
index 000000000..60f0514b1
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/exitcode.go
@@ -0,0 +1,33 @@
+package system
+
+import (
+ "fmt"
+ "os/exec"
+ "syscall"
+)
+
+// GetExitCode returns the ExitStatus of the specified error if its type is
+// exec.ExitError, returns 0 and an error otherwise.
+func GetExitCode(err error) (int, error) {
+ exitCode := 0
+ if exiterr, ok := err.(*exec.ExitError); ok {
+ if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok {
+ return procExit.ExitStatus(), nil
+ }
+ }
+ return exitCode, fmt.Errorf("failed to get exit code")
+}
+
+// ProcessExitCode process the specified error and returns the exit status code
+// if the error was of type exec.ExitError, returns nothing otherwise.
+func ProcessExitCode(err error) (exitCode int) {
+ if err != nil {
+ var exiterr error
+ if exitCode, exiterr = GetExitCode(err); exiterr != nil {
+ // TODO: Fix this so we check the error's text.
+ // we've failed to retrieve exit code, so we set it to 127
+ exitCode = 127
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/filesys.go b/vendor/github.com/containers/storage/pkg/system/filesys.go
new file mode 100644
index 000000000..102565f76
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/filesys.go
@@ -0,0 +1,67 @@
+// +build !windows
+
+package system
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// MkdirAllWithACL is a wrapper for MkdirAll on unix systems.
+func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
+ return MkdirAll(path, perm, sddl)
+}
+
+// MkdirAll creates a directory named path along with any necessary parents,
+// with permission specified by attribute perm for all dir created.
+func MkdirAll(path string, perm os.FileMode, sddl string) error {
+ return os.MkdirAll(path, perm)
+}
+
+// IsAbs is a platform-specific wrapper for filepath.IsAbs.
+func IsAbs(path string) bool {
+ return filepath.IsAbs(path)
+}
+
+// The functions below here are wrappers for the equivalents in the os and ioutils packages.
+// They are passthrough on Unix platforms, and only relevant on Windows.
+
+// CreateSequential creates the named file with mode 0666 (before umask), truncating
+// it if it already exists. If successful, methods on the returned
+// File can be used for I/O; the associated file descriptor has mode
+// O_RDWR.
+// If there is an error, it will be of type *PathError.
+func CreateSequential(name string) (*os.File, error) {
+ return os.Create(name)
+}
+
+// OpenSequential opens the named file for reading. If successful, methods on
+// the returned file can be used for reading; the associated file
+// descriptor has mode O_RDONLY.
+// If there is an error, it will be of type *PathError.
+func OpenSequential(name string) (*os.File, error) {
+ return os.Open(name)
+}
+
+// OpenFileSequential is the generalized open call; most users will use Open
+// or Create instead. It opens the named file with specified flag
+// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
+// methods on the returned File can be used for I/O.
+// If there is an error, it will be of type *PathError.
+func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) {
+ return os.OpenFile(name, flag, perm)
+}
+
+// TempFileSequential creates a new temporary file in the directory dir
+// with a name beginning with prefix, opens the file for reading
+// and writing, and returns the resulting *os.File.
+// If dir is the empty string, TempFile uses the default directory
+// for temporary files (see os.TempDir).
+// Multiple programs calling TempFile simultaneously
+// will not choose the same file. The caller can use f.Name()
+// to find the pathname of the file. It is the caller's responsibility
+// to remove the file when no longer needed.
+func TempFileSequential(dir, prefix string) (f *os.File, err error) {
+ return ioutil.TempFile(dir, prefix)
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/filesys_windows.go b/vendor/github.com/containers/storage/pkg/system/filesys_windows.go
new file mode 100644
index 000000000..a61b53d0b
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/filesys_windows.go
@@ -0,0 +1,298 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+ "unsafe"
+
+ winio "github.com/Microsoft/go-winio"
+ "golang.org/x/sys/windows"
+)
+
+const (
+ // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System
+ SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
+ // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System
+ SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
+)
+
+// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory
+// with an appropriate SDDL defined ACL.
+func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
+ return mkdirall(path, true, sddl)
+}
+
+// MkdirAll implementation that is volume path aware for Windows.
+func MkdirAll(path string, _ os.FileMode, sddl string) error {
+ return mkdirall(path, false, sddl)
+}
+
+// mkdirall is a custom version of os.MkdirAll modified for use on Windows
+// so that it is both volume path aware, and can create a directory with
+// a DACL.
+func mkdirall(path string, applyACL bool, sddl string) error {
+ if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
+ return nil
+ }
+
+ // The rest of this method is largely copied from os.MkdirAll and should be kept
+ // as-is to ensure compatibility.
+
+ // Fast path: if we can tell whether path is a directory or file, stop with success or error.
+ dir, err := os.Stat(path)
+ if err == nil {
+ if dir.IsDir() {
+ return nil
+ }
+ return &os.PathError{
+ Op: "mkdir",
+ Path: path,
+ Err: syscall.ENOTDIR,
+ }
+ }
+
+ // Slow path: make sure parent exists and then call Mkdir for path.
+ i := len(path)
+ for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
+ i--
+ }
+
+ j := i
+ for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
+ j--
+ }
+
+ if j > 1 {
+ // Create parent
+ err = mkdirall(path[0:j-1], false, sddl)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result.
+ if applyACL {
+ err = mkdirWithACL(path, sddl)
+ } else {
+ err = os.Mkdir(path, 0)
+ }
+
+ if err != nil {
+ // Handle arguments like "foo/." by
+ // double-checking that directory doesn't exist.
+ dir, err1 := os.Lstat(path)
+ if err1 == nil && dir.IsDir() {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
+
+// mkdirWithACL creates a new directory. If there is an error, it will be of
+// type *PathError. .
+//
+// This is a modified and combined version of os.Mkdir and windows.Mkdir
+// in golang to cater for creating a directory am ACL permitting full
+// access, with inheritance, to any subfolder/file for Built-in Administrators
+// and Local System.
+func mkdirWithACL(name string, sddl string) error {
+ sa := windows.SecurityAttributes{Length: 0}
+ sd, err := winio.SddlToSecurityDescriptor(sddl)
+ if err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+ sa.Length = uint32(unsafe.Sizeof(sa))
+ sa.InheritHandle = 1
+ sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0]))
+
+ namep, err := windows.UTF16PtrFromString(name)
+ if err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+
+ e := windows.CreateDirectory(namep, &sa)
+ if e != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: e}
+ }
+ return nil
+}
+
+// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
+// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
+// as it doesn't start with a drive-letter/colon combination. However, in
+// docker we need to verify things such as WORKDIR /windows/system32 in
+// a Dockerfile (which gets translated to \windows\system32 when being processed
+// by the daemon. This SHOULD be treated as absolute from a docker processing
+// perspective.
+func IsAbs(path string) bool {
+ if !filepath.IsAbs(path) {
+ if !strings.HasPrefix(path, string(os.PathSeparator)) {
+ return false
+ }
+ }
+ return true
+}
+
+// The origin of the functions below here are the golang OS and windows packages,
+// slightly modified to only cope with files, not directories due to the
+// specific use case.
+//
+// The alteration is to allow a file on Windows to be opened with
+// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating
+// the standby list, particularly when accessing large files such as layer.tar.
+
+// CreateSequential creates the named file with mode 0666 (before umask), truncating
+// it if it already exists. If successful, methods on the returned
+// File can be used for I/O; the associated file descriptor has mode
+// O_RDWR.
+// If there is an error, it will be of type *PathError.
+func CreateSequential(name string) (*os.File, error) {
+ return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0)
+}
+
+// OpenSequential opens the named file for reading. If successful, methods on
+// the returned file can be used for reading; the associated file
+// descriptor has mode O_RDONLY.
+// If there is an error, it will be of type *PathError.
+func OpenSequential(name string) (*os.File, error) {
+ return OpenFileSequential(name, os.O_RDONLY, 0)
+}
+
+// OpenFileSequential is the generalized open call; most users will use Open
+// or Create instead.
+// If there is an error, it will be of type *PathError.
+func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) {
+ if name == "" {
+ return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT}
+ }
+ r, errf := windowsOpenFileSequential(name, flag, 0)
+ if errf == nil {
+ return r, nil
+ }
+ return nil, &os.PathError{Op: "open", Path: name, Err: errf}
+}
+
+func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) {
+ r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0)
+ if e != nil {
+ return nil, e
+ }
+ return os.NewFile(uintptr(r), name), nil
+}
+
+func makeInheritSa() *windows.SecurityAttributes {
+ var sa windows.SecurityAttributes
+ sa.Length = uint32(unsafe.Sizeof(sa))
+ sa.InheritHandle = 1
+ return &sa
+}
+
+func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) {
+ if len(path) == 0 {
+ return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND
+ }
+ pathp, err := windows.UTF16PtrFromString(path)
+ if err != nil {
+ return windows.InvalidHandle, err
+ }
+ var access uint32
+ switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) {
+ case windows.O_RDONLY:
+ access = windows.GENERIC_READ
+ case windows.O_WRONLY:
+ access = windows.GENERIC_WRITE
+ case windows.O_RDWR:
+ access = windows.GENERIC_READ | windows.GENERIC_WRITE
+ }
+ if mode&windows.O_CREAT != 0 {
+ access |= windows.GENERIC_WRITE
+ }
+ if mode&windows.O_APPEND != 0 {
+ access &^= windows.GENERIC_WRITE
+ access |= windows.FILE_APPEND_DATA
+ }
+ sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE)
+ var sa *windows.SecurityAttributes
+ if mode&windows.O_CLOEXEC == 0 {
+ sa = makeInheritSa()
+ }
+ var createmode uint32
+ switch {
+ case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL):
+ createmode = windows.CREATE_NEW
+ case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC):
+ createmode = windows.CREATE_ALWAYS
+ case mode&windows.O_CREAT == windows.O_CREAT:
+ createmode = windows.OPEN_ALWAYS
+ case mode&windows.O_TRUNC == windows.O_TRUNC:
+ createmode = windows.TRUNCATE_EXISTING
+ default:
+ createmode = windows.OPEN_EXISTING
+ }
+ // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang.
+ //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
+ const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
+ h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0)
+ return h, e
+}
+
+// Helpers for TempFileSequential
+var rand uint32
+var randmu sync.Mutex
+
+func reseed() uint32 {
+ return uint32(time.Now().UnixNano() + int64(os.Getpid()))
+}
+func nextSuffix() string {
+ randmu.Lock()
+ r := rand
+ if r == 0 {
+ r = reseed()
+ }
+ r = r*1664525 + 1013904223 // constants from Numerical Recipes
+ rand = r
+ randmu.Unlock()
+ return strconv.Itoa(int(1e9 + r%1e9))[1:]
+}
+
+// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential
+// file access. Below is the original comment from golang:
+// TempFile creates a new temporary file in the directory dir
+// with a name beginning with prefix, opens the file for reading
+// and writing, and returns the resulting *os.File.
+// If dir is the empty string, TempFile uses the default directory
+// for temporary files (see os.TempDir).
+// Multiple programs calling TempFile simultaneously
+// will not choose the same file. The caller can use f.Name()
+// to find the pathname of the file. It is the caller's responsibility
+// to remove the file when no longer needed.
+func TempFileSequential(dir, prefix string) (f *os.File, err error) {
+ if dir == "" {
+ dir = os.TempDir()
+ }
+
+ nconflict := 0
+ for i := 0; i < 10000; i++ {
+ name := filepath.Join(dir, prefix+nextSuffix())
+ f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
+ if os.IsExist(err) {
+ if nconflict++; nconflict > 10 {
+ randmu.Lock()
+ rand = reseed()
+ randmu.Unlock()
+ }
+ continue
+ }
+ break
+ }
+ return
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/init.go b/vendor/github.com/containers/storage/pkg/system/init.go
new file mode 100644
index 000000000..17935088d
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/init.go
@@ -0,0 +1,22 @@
+package system
+
+import (
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+// Used by chtimes
+var maxTime time.Time
+
+func init() {
+ // chtimes initialization
+ if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
+ // This is a 64 bit timespec
+ // os.Chtimes limits time to the following
+ maxTime = time.Unix(0, 1<<63-1)
+ } else {
+ // This is a 32 bit timespec
+ maxTime = time.Unix(1<<31-1, 0)
+ }
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/init_windows.go b/vendor/github.com/containers/storage/pkg/system/init_windows.go
new file mode 100644
index 000000000..019c66441
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/init_windows.go
@@ -0,0 +1,17 @@
+package system
+
+import "os"
+
+// LCOWSupported determines if Linux Containers on Windows are supported.
+// Note: This feature is in development (06/17) and enabled through an
+// environment variable. At a future time, it will be enabled based
+// on build number. @jhowardmsft
+var lcowSupported = false
+
+func init() {
+ // LCOW initialization
+ if os.Getenv("LCOW_SUPPORTED") != "" {
+ lcowSupported = true
+ }
+
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go
new file mode 100644
index 000000000..cff33bb40
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go
@@ -0,0 +1,8 @@
+// +build !windows
+
+package system
+
+// LCOWSupported returns true if Linux containers on Windows are supported.
+func LCOWSupported() bool {
+ return false
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_windows.go b/vendor/github.com/containers/storage/pkg/system/lcow_windows.go
new file mode 100644
index 000000000..e54d01e69
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/lcow_windows.go
@@ -0,0 +1,6 @@
+package system
+
+// LCOWSupported returns true if Linux containers on Windows are supported.
+func LCOWSupported() bool {
+ return lcowSupported
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go
new file mode 100644
index 000000000..bd23c4d50
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// Lstat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Lstat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Lstat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_windows.go b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go
new file mode 100644
index 000000000..e51df0daf
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go
@@ -0,0 +1,14 @@
+package system
+
+import "os"
+
+// Lstat calls os.Lstat to get a fileinfo interface back.
+// This is then copied into our own locally defined structure.
+func Lstat(path string) (*StatT, error) {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return fromStatT(&fi)
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo.go b/vendor/github.com/containers/storage/pkg/system/meminfo.go
new file mode 100644
index 000000000..3b6e947e6
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo.go
@@ -0,0 +1,17 @@
+package system
+
+// MemInfo contains memory statistics of the host system.
+type MemInfo struct {
+ // Total usable RAM (i.e. physical RAM minus a few reserved bits and the
+ // kernel binary code).
+ MemTotal int64
+
+ // Amount of free memory.
+ MemFree int64
+
+ // Total amount of swap space available.
+ SwapTotal int64
+
+ // Amount of swap space that is currently unused.
+ SwapFree int64
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go b/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go
new file mode 100644
index 000000000..385f1d5e7
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go
@@ -0,0 +1,65 @@
+package system
+
+import (
+ "bufio"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/docker/go-units"
+)
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ file, err := os.Open("/proc/meminfo")
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ return parseMemInfo(file)
+}
+
+// parseMemInfo parses the /proc/meminfo file into
+// a MemInfo object given an io.Reader to the file.
+// Throws error if there are problems reading from the file
+func parseMemInfo(reader io.Reader) (*MemInfo, error) {
+ meminfo := &MemInfo{}
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ // Expected format: ["MemTotal:", "1234", "kB"]
+ parts := strings.Fields(scanner.Text())
+
+ // Sanity checks: Skip malformed entries.
+ if len(parts) < 3 || parts[2] != "kB" {
+ continue
+ }
+
+ // Convert to bytes.
+ size, err := strconv.Atoi(parts[1])
+ if err != nil {
+ continue
+ }
+ bytes := int64(size) * units.KiB
+
+ switch parts[0] {
+ case "MemTotal:":
+ meminfo.MemTotal = bytes
+ case "MemFree:":
+ meminfo.MemFree = bytes
+ case "SwapTotal:":
+ meminfo.SwapTotal = bytes
+ case "SwapFree:":
+ meminfo.SwapFree = bytes
+ }
+
+ }
+
+ // Handle errors that may have occurred during the reading of the file.
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return meminfo, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go
new file mode 100644
index 000000000..925776e78
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go
@@ -0,0 +1,129 @@
+// +build solaris,cgo
+
+package system
+
+import (
+ "fmt"
+ "unsafe"
+)
+
+// #cgo CFLAGS: -std=c99
+// #cgo LDFLAGS: -lkstat
+// #include <unistd.h>
+// #include <stdlib.h>
+// #include <stdio.h>
+// #include <kstat.h>
+// #include <sys/swap.h>
+// #include <sys/param.h>
+// struct swaptable *allocSwaptable(int num) {
+// struct swaptable *st;
+// struct swapent *swapent;
+// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int));
+// swapent = st->swt_ent;
+// for (int i = 0; i < num; i++,swapent++) {
+// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char));
+// }
+// st->swt_n = num;
+// return st;
+//}
+// void freeSwaptable (struct swaptable *st) {
+// struct swapent *swapent = st->swt_ent;
+// for (int i = 0; i < st->swt_n; i++,swapent++) {
+// free(swapent->ste_path);
+// }
+// free(st);
+// }
+// swapent_t getSwapEnt(swapent_t *ent, int i) {
+// return ent[i];
+// }
+// int64_t getPpKernel() {
+// int64_t pp_kernel = 0;
+// kstat_ctl_t *ksc;
+// kstat_t *ks;
+// kstat_named_t *knp;
+// kid_t kid;
+//
+// if ((ksc = kstat_open()) == NULL) {
+// return -1;
+// }
+// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) {
+// return -1;
+// }
+// if (((kid = kstat_read(ksc, ks, NULL)) == -1) ||
+// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) {
+// return -1;
+// }
+// switch (knp->data_type) {
+// case KSTAT_DATA_UINT64:
+// pp_kernel = knp->value.ui64;
+// break;
+// case KSTAT_DATA_UINT32:
+// pp_kernel = knp->value.ui32;
+// break;
+// }
+// pp_kernel *= sysconf(_SC_PAGESIZE);
+// return (pp_kernel > 0 ? pp_kernel : -1);
+// }
+import "C"
+
+// Get the system memory info using sysconf same as prtconf
+func getTotalMem() int64 {
+ pagesize := C.sysconf(C._SC_PAGESIZE)
+ npages := C.sysconf(C._SC_PHYS_PAGES)
+ return int64(pagesize * npages)
+}
+
+func getFreeMem() int64 {
+ pagesize := C.sysconf(C._SC_PAGESIZE)
+ npages := C.sysconf(C._SC_AVPHYS_PAGES)
+ return int64(pagesize * npages)
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+
+ ppKernel := C.getPpKernel()
+ MemTotal := getTotalMem()
+ MemFree := getFreeMem()
+ SwapTotal, SwapFree, err := getSysSwap()
+
+ if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 ||
+ SwapFree < 0 {
+ return nil, fmt.Errorf("error getting system memory info %v\n", err)
+ }
+
+ meminfo := &MemInfo{}
+ // Total memory is total physical memory less than memory locked by kernel
+ meminfo.MemTotal = MemTotal - int64(ppKernel)
+ meminfo.MemFree = MemFree
+ meminfo.SwapTotal = SwapTotal
+ meminfo.SwapFree = SwapFree
+
+ return meminfo, nil
+}
+
+func getSysSwap() (int64, int64, error) {
+ var tSwap int64
+ var fSwap int64
+ var diskblksPerPage int64
+ num, err := C.swapctl(C.SC_GETNSWP, nil)
+ if err != nil {
+ return -1, -1, err
+ }
+ st := C.allocSwaptable(num)
+ _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st))
+ if err != nil {
+ C.freeSwaptable(st)
+ return -1, -1, err
+ }
+
+ diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT)
+ for i := 0; i < int(num); i++ {
+ swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i))
+ tSwap += int64(swapent.ste_pages) * diskblksPerPage
+ fSwap += int64(swapent.ste_free) * diskblksPerPage
+ }
+ C.freeSwaptable(st)
+ return tSwap, fSwap, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go
new file mode 100644
index 000000000..3ce019dff
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go
@@ -0,0 +1,8 @@
+// +build !linux,!windows,!solaris
+
+package system
+
+// ReadMemInfo is not supported on platforms other than linux and windows.
+func ReadMemInfo() (*MemInfo, error) {
+ return nil, ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go
new file mode 100644
index 000000000..883944a4c
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go
@@ -0,0 +1,45 @@
+package system
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var (
+ modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+
+ procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx
+type memorystatusex struct {
+ dwLength uint32
+ dwMemoryLoad uint32
+ ullTotalPhys uint64
+ ullAvailPhys uint64
+ ullTotalPageFile uint64
+ ullAvailPageFile uint64
+ ullTotalVirtual uint64
+ ullAvailVirtual uint64
+ ullAvailExtendedVirtual uint64
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ msi := &memorystatusex{
+ dwLength: 64,
+ }
+ r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi)))
+ if r1 == 0 {
+ return &MemInfo{}, nil
+ }
+ return &MemInfo{
+ MemTotal: int64(msi.ullTotalPhys),
+ MemFree: int64(msi.ullAvailPhys),
+ SwapTotal: int64(msi.ullTotalPageFile),
+ SwapFree: int64(msi.ullAvailPageFile),
+ }, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/mknod.go b/vendor/github.com/containers/storage/pkg/system/mknod.go
new file mode 100644
index 000000000..af79a6538
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/mknod.go
@@ -0,0 +1,22 @@
+// +build !windows
+
+package system
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// Mknod creates a filesystem node (file, device special file or named pipe) named path
+// with attributes specified by mode and dev.
+func Mknod(path string, mode uint32, dev int) error {
+ return unix.Mknod(path, mode, dev)
+}
+
+// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
+// and minor number of the newly created device special file.
+// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
+// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
+// then the top 12 bits of the minor.
+func Mkdev(major int64, minor int64) uint32 {
+ return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go
new file mode 100644
index 000000000..2e863c021
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go
@@ -0,0 +1,13 @@
+// +build windows
+
+package system
+
+// Mknod is not implemented on Windows.
+func Mknod(path string, mode uint32, dev int) error {
+ return ErrNotSupportedPlatform
+}
+
+// Mkdev is not implemented on Windows.
+func Mkdev(major int64, minor int64) uint32 {
+ panic("Mkdev not implemented on Windows.")
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/path.go b/vendor/github.com/containers/storage/pkg/system/path.go
new file mode 100644
index 000000000..f634a6be6
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/path.go
@@ -0,0 +1,21 @@
+package system
+
+import "runtime"
+
+const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+
+// DefaultPathEnv is unix style list of directories to search for
+// executables. Each directory is separated from the next by a colon
+// ':' character .
+func DefaultPathEnv(platform string) string {
+ if runtime.GOOS == "windows" {
+ if platform != runtime.GOOS && LCOWSupported() {
+ return defaultUnixPathEnv
+ }
+ // Deliberately empty on Windows containers on Windows as the default path will be set by
+ // the container. Docker has no context of what the default path should be.
+ return ""
+ }
+ return defaultUnixPathEnv
+
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/path_unix.go b/vendor/github.com/containers/storage/pkg/system/path_unix.go
new file mode 100644
index 000000000..f3762e69d
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/path_unix.go
@@ -0,0 +1,9 @@
+// +build !windows
+
+package system
+
+// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
+// is the system drive. This is a no-op on Linux.
+func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+ return path, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/path_windows.go b/vendor/github.com/containers/storage/pkg/system/path_windows.go
new file mode 100644
index 000000000..aab891522
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/path_windows.go
@@ -0,0 +1,33 @@
+// +build windows
+
+package system
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+)
+
+// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
+// This is used, for example, when validating a user provided path in docker cp.
+// If a drive letter is supplied, it must be the system drive. The drive letter
+// is always removed. Also, it translates it to OS semantics (IOW / to \). We
+// need the path in this syntax so that it can ultimately be concatenated with
+// a Windows long-path which doesn't support drive-letters. Examples:
+// C: --> Fail
+// C:\ --> \
+// a --> a
+// /a --> \a
+// d:\ --> Fail
+func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+ if len(path) == 2 && string(path[1]) == ":" {
+ return "", fmt.Errorf("No relative path specified in %q", path)
+ }
+ if !filepath.IsAbs(path) || len(path) < 2 {
+ return filepath.FromSlash(path), nil
+ }
+ if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
+ return "", fmt.Errorf("The specified path is not on the system drive (C:)")
+ }
+ return filepath.FromSlash(path[2:]), nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/process_unix.go b/vendor/github.com/containers/storage/pkg/system/process_unix.go
new file mode 100644
index 000000000..26c8b42c1
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/process_unix.go
@@ -0,0 +1,24 @@
+// +build linux freebsd solaris darwin
+
+package system
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+// IsProcessAlive returns true if process with a given pid is running.
+func IsProcessAlive(pid int) bool {
+ err := unix.Kill(pid, syscall.Signal(0))
+ if err == nil || err == unix.EPERM {
+ return true
+ }
+
+ return false
+}
+
+// KillProcess force-stops a process.
+func KillProcess(pid int) {
+ unix.Kill(pid, unix.SIGKILL)
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go
new file mode 100644
index 000000000..fc03c3e6b
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/rm.go
@@ -0,0 +1,80 @@
+package system
+
+import (
+ "os"
+ "syscall"
+ "time"
+
+ "github.com/containers/storage/pkg/mount"
+ "github.com/pkg/errors"
+)
+
+// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can
+// often be remedied.
+// Only use `EnsureRemoveAll` if you really want to make every effort to remove
+// a directory.
+//
+// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there
+// can be a race between reading directory entries and then actually attempting
+// to remove everything in the directory.
+// These types of errors do not need to be returned since it's ok for the dir to
+// be gone we can just retry the remove operation.
+//
+// This should not return a `os.ErrNotExist` kind of error under any circumstances
+func EnsureRemoveAll(dir string) error {
+ notExistErr := make(map[string]bool)
+
+ // track retries
+ exitOnErr := make(map[string]int)
+ maxRetry := 5
+
+ // Attempt to unmount anything beneath this dir first
+ mount.RecursiveUnmount(dir)
+
+ for {
+ err := os.RemoveAll(dir)
+ if err == nil {
+ return err
+ }
+
+ pe, ok := err.(*os.PathError)
+ if !ok {
+ return err
+ }
+
+ if os.IsNotExist(err) {
+ if notExistErr[pe.Path] {
+ return err
+ }
+ notExistErr[pe.Path] = true
+
+ // There is a race where some subdir can be removed but after the parent
+ // dir entries have been read.
+ // So the path could be from `os.Remove(subdir)`
+ // If the reported non-existent path is not the passed in `dir` we
+ // should just retry, but otherwise return with no error.
+ if pe.Path == dir {
+ return nil
+ }
+ continue
+ }
+
+ if pe.Err != syscall.EBUSY {
+ return err
+ }
+
+ if mounted, _ := mount.Mounted(pe.Path); mounted {
+ if e := mount.Unmount(pe.Path); e != nil {
+ if mounted, _ := mount.Mounted(pe.Path); mounted {
+ return errors.Wrapf(e, "error while removing %s", dir)
+ }
+ }
+ }
+
+ if exitOnErr[pe.Path] == maxRetry {
+ return err
+ }
+ exitOnErr[pe.Path]++
+ time.Sleep(100 * time.Millisecond)
+ }
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go
new file mode 100644
index 000000000..715f05b93
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go
@@ -0,0 +1,13 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtimespec}, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go
new file mode 100644
index 000000000..715f05b93
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go
@@ -0,0 +1,13 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtimespec}, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go
new file mode 100644
index 000000000..1939f9518
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go
@@ -0,0 +1,19 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: s.Mode,
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: s.Rdev,
+ mtim: s.Mtim}, nil
+}
+
+// FromStatT converts a syscall.Stat_t type to a system.Stat_t type
+// This is exposed on Linux as pkg/archive/changes uses it.
+func FromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return fromStatT(s)
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go
new file mode 100644
index 000000000..b607dea94
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go
@@ -0,0 +1,13 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtim}, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go
new file mode 100644
index 000000000..b607dea94
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go
@@ -0,0 +1,13 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtim}, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_unix.go b/vendor/github.com/containers/storage/pkg/system/stat_unix.go
new file mode 100644
index 000000000..91c7d121c
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/stat_unix.go
@@ -0,0 +1,60 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// StatT type contains status of a file. It contains metadata
+// like permission, owner, group, size, etc about a file.
+type StatT struct {
+ mode uint32
+ uid uint32
+ gid uint32
+ rdev uint64
+ size int64
+ mtim syscall.Timespec
+}
+
+// Mode returns file's permission mode.
+func (s StatT) Mode() uint32 {
+ return s.mode
+}
+
+// UID returns file's user id of owner.
+func (s StatT) UID() uint32 {
+ return s.uid
+}
+
+// GID returns file's group id of owner.
+func (s StatT) GID() uint32 {
+ return s.gid
+}
+
+// Rdev returns file's device ID (if it's special file).
+func (s StatT) Rdev() uint64 {
+ return s.rdev
+}
+
+// Size returns file's size.
+func (s StatT) Size() int64 {
+ return s.size
+}
+
+// Mtim returns file's last modification time.
+func (s StatT) Mtim() syscall.Timespec {
+ return s.mtim
+}
+
+// Stat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Stat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go
new file mode 100644
index 000000000..6c6397268
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/stat_windows.go
@@ -0,0 +1,49 @@
+package system
+
+import (
+ "os"
+ "time"
+)
+
+// StatT type contains status of a file. It contains metadata
+// like permission, size, etc about a file.
+type StatT struct {
+ mode os.FileMode
+ size int64
+ mtim time.Time
+}
+
+// Size returns file's size.
+func (s StatT) Size() int64 {
+ return s.size
+}
+
+// Mode returns file's permission mode.
+func (s StatT) Mode() os.FileMode {
+ return os.FileMode(s.mode)
+}
+
+// Mtim returns file's last modification time.
+func (s StatT) Mtim() time.Time {
+ return time.Time(s.mtim)
+}
+
+// Stat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+ fi, err := os.Stat(path)
+ if err != nil {
+ return nil, err
+ }
+ return fromStatT(&fi)
+}
+
+// fromStatT converts a os.FileInfo type to a system.StatT type
+func fromStatT(fi *os.FileInfo) (*StatT, error) {
+ return &StatT{
+ size: (*fi).Size(),
+ mode: (*fi).Mode(),
+ mtim: (*fi).ModTime()}, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go
new file mode 100644
index 000000000..49dbdd378
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go
@@ -0,0 +1,17 @@
+// +build linux freebsd
+
+package system
+
+import "golang.org/x/sys/unix"
+
+// Unmount is a platform-specific helper function to call
+// the unmount syscall.
+func Unmount(dest string) error {
+ return unix.Unmount(dest, 0)
+}
+
+// CommandLineToArgv should not be used on Unix.
+// It simply returns commandLine in the only element in the returned array.
+func CommandLineToArgv(commandLine string) ([]string, error) {
+ return []string{commandLine}, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go
new file mode 100644
index 000000000..23e9b207c
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go
@@ -0,0 +1,122 @@
+package system
+
+import (
+ "unsafe"
+
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/windows"
+)
+
+var (
+ ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0")
+ procGetVersionExW = modkernel32.NewProc("GetVersionExW")
+ procGetProductInfo = modkernel32.NewProc("GetProductInfo")
+)
+
+// OSVersion is a wrapper for Windows version information
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
+type OSVersion struct {
+ Version uint32
+ MajorVersion uint8
+ MinorVersion uint8
+ Build uint16
+}
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
+type osVersionInfoEx struct {
+ OSVersionInfoSize uint32
+ MajorVersion uint32
+ MinorVersion uint32
+ BuildNumber uint32
+ PlatformID uint32
+ CSDVersion [128]uint16
+ ServicePackMajor uint16
+ ServicePackMinor uint16
+ SuiteMask uint16
+ ProductType byte
+ Reserve byte
+}
+
+// GetOSVersion gets the operating system version on Windows. Note that
+// docker.exe must be manifested to get the correct version information.
+func GetOSVersion() OSVersion {
+ var err error
+ osv := OSVersion{}
+ osv.Version, err = windows.GetVersion()
+ if err != nil {
+ // GetVersion never fails.
+ panic(err)
+ }
+ osv.MajorVersion = uint8(osv.Version & 0xFF)
+ osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
+ osv.Build = uint16(osv.Version >> 16)
+ return osv
+}
+
+// IsWindowsClient returns true if the SKU is client
+// @engine maintainers - this function should not be removed or modified as it
+// is used to enforce licensing restrictions on Windows.
+func IsWindowsClient() bool {
+ osviex := &osVersionInfoEx{OSVersionInfoSize: 284}
+ r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex)))
+ if r1 == 0 {
+ logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err)
+ return false
+ }
+ const verNTWorkstation = 0x00000001
+ return osviex.ProductType == verNTWorkstation
+}
+
+// IsIoTCore returns true if the currently running image is based off of
+// Windows 10 IoT Core.
+// @engine maintainers - this function should not be removed or modified as it
+// is used to enforce licensing restrictions on Windows.
+func IsIoTCore() bool {
+ var returnedProductType uint32
+ r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType)))
+ if r1 == 0 {
+ logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err)
+ return false
+ }
+ const productIoTUAP = 0x0000007B
+ const productIoTUAPCommercial = 0x00000083
+ return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial
+}
+
+// Unmount is a platform-specific helper function to call
+// the unmount syscall. Not supported on Windows
+func Unmount(dest string) error {
+ return nil
+}
+
+// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array.
+func CommandLineToArgv(commandLine string) ([]string, error) {
+ var argc int32
+
+ argsPtr, err := windows.UTF16PtrFromString(commandLine)
+ if err != nil {
+ return nil, err
+ }
+
+ argv, err := windows.CommandLineToArgv(argsPtr, &argc)
+ if err != nil {
+ return nil, err
+ }
+ defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv))))
+
+ newArgs := make([]string, argc)
+ for i, v := range (*argv)[:argc] {
+ newArgs[i] = string(windows.UTF16ToString((*v)[:]))
+ }
+
+ return newArgs, nil
+}
+
+// HasWin32KSupport determines whether containers that depend on win32k can
+// run on this machine. Win32k is the driver used to implement windowing.
+func HasWin32KSupport() bool {
+ // For now, check for ntuser API support on the host. In the future, a host
+ // may support win32k in containers even if the host does not support ntuser
+ // APIs.
+ return ntuserApiset.Load() == nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/umask.go b/vendor/github.com/containers/storage/pkg/system/umask.go
new file mode 100644
index 000000000..5a10eda5a
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/umask.go
@@ -0,0 +1,13 @@
+// +build !windows
+
+package system
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// Umask sets current process's file mode creation mask to newmask
+// and returns oldmask.
+func Umask(newmask int) (oldmask int, err error) {
+ return unix.Umask(newmask), nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/umask_windows.go b/vendor/github.com/containers/storage/pkg/system/umask_windows.go
new file mode 100644
index 000000000..13f1de176
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/umask_windows.go
@@ -0,0 +1,9 @@
+// +build windows
+
+package system
+
+// Umask is not supported on the windows platform.
+func Umask(newmask int) (oldmask int, err error) {
+ // should not be called on cli code path
+ return 0, ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go
new file mode 100644
index 000000000..6a7752437
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go
@@ -0,0 +1,24 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// LUtimesNano is used to change access and modification time of the specified path.
+// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ var _path *byte
+ _path, err := unix.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_linux.go b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go
new file mode 100644
index 000000000..edc588a63
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go
@@ -0,0 +1,25 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// LUtimesNano is used to change access and modification time of the specified path.
+// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ atFdCwd := unix.AT_FDCWD
+
+ var _path *byte
+ _path, err := unix.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go
new file mode 100644
index 000000000..139714544
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go
@@ -0,0 +1,10 @@
+// +build !linux,!freebsd
+
+package system
+
+import "syscall"
+
+// LUtimesNano is only supported on linux and freebsd.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go
new file mode 100644
index 000000000..98b111be4
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go
@@ -0,0 +1,29 @@
+package system
+
+import "golang.org/x/sys/unix"
+
+// Lgetxattr retrieves the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+// It will returns a nil slice and nil error if the xattr is not set.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ dest := make([]byte, 128)
+ sz, errno := unix.Lgetxattr(path, attr, dest)
+ if errno == unix.ENODATA {
+ return nil, nil
+ }
+ if errno == unix.ERANGE {
+ dest = make([]byte, sz)
+ sz, errno = unix.Lgetxattr(path, attr, dest)
+ }
+ if errno != nil {
+ return nil, errno
+ }
+
+ return dest[:sz], nil
+}
+
+// Lsetxattr sets the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ return unix.Lsetxattr(path, attr, data, flags)
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go
new file mode 100644
index 000000000..0114f2227
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go
@@ -0,0 +1,13 @@
+// +build !linux
+
+package system
+
+// Lgetxattr is not supported on platforms other than linux.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ return nil, ErrNotSupportedPlatform
+}
+
+// Lsetxattr is not supported on platforms other than linux.
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go
new file mode 100644
index 000000000..02610b8b7
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go
@@ -0,0 +1,137 @@
+// Package truncindex provides a general 'index tree', used by Docker
+// in order to be able to reference containers by only a few unambiguous
+// characters of their id.
+package truncindex
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/tchap/go-patricia/patricia"
+)
+
+var (
+ // ErrEmptyPrefix is an error returned if the prefix was empty.
+ ErrEmptyPrefix = errors.New("Prefix can't be empty")
+
+ // ErrIllegalChar is returned when a space is in the ID
+ ErrIllegalChar = errors.New("illegal character: ' '")
+
+ // ErrNotExist is returned when ID or its prefix not found in index.
+ ErrNotExist = errors.New("ID does not exist")
+)
+
+// ErrAmbiguousPrefix is returned if the prefix was ambiguous
+// (multiple ids for the prefix).
+type ErrAmbiguousPrefix struct {
+ prefix string
+}
+
+func (e ErrAmbiguousPrefix) Error() string {
+ return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix)
+}
+
+// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes.
+// This is used to retrieve image and container IDs by more convenient shorthand prefixes.
+type TruncIndex struct {
+ sync.RWMutex
+ trie *patricia.Trie
+ ids map[string]struct{}
+}
+
+// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs.
+func NewTruncIndex(ids []string) (idx *TruncIndex) {
+ idx = &TruncIndex{
+ ids: make(map[string]struct{}),
+
+ // Change patricia max prefix per node length,
+ // because our len(ID) always 64
+ trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)),
+ }
+ for _, id := range ids {
+ idx.addID(id)
+ }
+ return
+}
+
+func (idx *TruncIndex) addID(id string) error {
+ if strings.Contains(id, " ") {
+ return ErrIllegalChar
+ }
+ if id == "" {
+ return ErrEmptyPrefix
+ }
+ if _, exists := idx.ids[id]; exists {
+ return fmt.Errorf("id already exists: '%s'", id)
+ }
+ idx.ids[id] = struct{}{}
+ if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted {
+ return fmt.Errorf("failed to insert id: %s", id)
+ }
+ return nil
+}
+
+// Add adds a new ID to the TruncIndex.
+func (idx *TruncIndex) Add(id string) error {
+ idx.Lock()
+ defer idx.Unlock()
+ if err := idx.addID(id); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Delete removes an ID from the TruncIndex. If there are multiple IDs
+// with the given prefix, an error is thrown.
+func (idx *TruncIndex) Delete(id string) error {
+ idx.Lock()
+ defer idx.Unlock()
+ if _, exists := idx.ids[id]; !exists || id == "" {
+ return fmt.Errorf("no such id: '%s'", id)
+ }
+ delete(idx.ids, id)
+ if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted {
+ return fmt.Errorf("no such id: '%s'", id)
+ }
+ return nil
+}
+
+// Get retrieves an ID from the TruncIndex. If there are multiple IDs
+// with the given prefix, an error is thrown.
+func (idx *TruncIndex) Get(s string) (string, error) {
+ if s == "" {
+ return "", ErrEmptyPrefix
+ }
+ var (
+ id string
+ )
+ subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error {
+ if id != "" {
+ // we haven't found the ID if there are two or more IDs
+ id = ""
+ return ErrAmbiguousPrefix{prefix: string(prefix)}
+ }
+ id = string(prefix)
+ return nil
+ }
+
+ idx.RLock()
+ defer idx.RUnlock()
+ if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil {
+ return "", err
+ }
+ if id != "" {
+ return id, nil
+ }
+ return "", ErrNotExist
+}
+
+// Iterate iterates over all stored IDs, and passes each of them to the given handler.
+func (idx *TruncIndex) Iterate(handler func(id string)) {
+ idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error {
+ handler(string(prefix))
+ return nil
+ })
+}
diff --git a/vendor/github.com/containers/storage/stat_mtim.go b/vendor/github.com/containers/storage/stat_mtim.go
new file mode 100644
index 000000000..84d34dce3
--- /dev/null
+++ b/vendor/github.com/containers/storage/stat_mtim.go
@@ -0,0 +1,11 @@
+// +build linux solaris
+
+package storage
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+func statTMtimeUnix(st unix.Stat_t) (int64, int64) {
+ return st.Mtim.Unix()
+}
diff --git a/vendor/github.com/containers/storage/stat_mtimespec.go b/vendor/github.com/containers/storage/stat_mtimespec.go
new file mode 100644
index 000000000..f55ed434b
--- /dev/null
+++ b/vendor/github.com/containers/storage/stat_mtimespec.go
@@ -0,0 +1,11 @@
+// +build !linux,!solaris
+
+package storage
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+func statTMtimeUnix(st unix.Stat_t) (int64, int64) {
+ return st.Mtimespec.Unix()
+}
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
new file mode 100644
index 000000000..f7e3dc347
--- /dev/null
+++ b/vendor/github.com/containers/storage/store.go
@@ -0,0 +1,2390 @@
+package storage
+
+import (
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ // register all of the built-in drivers
+ _ "github.com/containers/storage/drivers/register"
+
+ "github.com/BurntSushi/toml"
+ drivers "github.com/containers/storage/drivers"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/stringid"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+var (
+ // DefaultStoreOptions is a reasonable default set of options.
+ DefaultStoreOptions StoreOptions
+ stores []*store
+ storesLock sync.Mutex
+)
+
+// ROFileBasedStore wraps up the methods of the various types of file-based
+// data stores that we implement which are needed for both read-only and
+// read-write files.
+type ROFileBasedStore interface {
+ Locker
+
+ // Load reloads the contents of the store from disk. It should be called
+ // with the lock held.
+ Load() error
+}
+
+// RWFileBasedStore wraps up the methods of various types of file-based data
+// stores that we implement using read-write files.
+type RWFileBasedStore interface {
+ // Save saves the contents of the store to disk. It should be called with
+ // the lock held, and Touch() should be called afterward before releasing the
+ // lock.
+ Save() error
+}
+
+// FileBasedStore wraps up the common methods of various types of file-based
+// data stores that we implement.
+type FileBasedStore interface {
+ ROFileBasedStore
+ RWFileBasedStore
+}
+
+// ROMetadataStore wraps a method for reading metadata associated with an ID.
+type ROMetadataStore interface {
+ // Metadata reads metadata associated with an item with the specified ID.
+ Metadata(id string) (string, error)
+}
+
+// RWMetadataStore wraps a method for setting metadata associated with an ID.
+type RWMetadataStore interface {
+ // SetMetadata updates the metadata associated with the item with the specified ID.
+ SetMetadata(id, metadata string) error
+}
+
+// MetadataStore wraps up methods for getting and setting metadata associated with IDs.
+type MetadataStore interface {
+ ROMetadataStore
+ RWMetadataStore
+}
+
+// An ROBigDataStore wraps up the read-only big-data related methods of the
+// various types of file-based lookaside stores that we implement.
+type ROBigDataStore interface {
+ // BigData retrieves a (potentially large) piece of data associated with
+ // this ID, if it has previously been set.
+ BigData(id, key string) ([]byte, error)
+
+ // BigDataSize retrieves the size of a (potentially large) piece of
+ // data associated with this ID, if it has previously been set.
+ BigDataSize(id, key string) (int64, error)
+
+ // BigDataDigest retrieves the digest of a (potentially large) piece of
+ // data associated with this ID, if it has previously been set.
+ BigDataDigest(id, key string) (digest.Digest, error)
+
+ // BigDataNames() returns a list of the names of previously-stored pieces of
+ // data.
+ BigDataNames(id string) ([]string, error)
+}
+
+// A RWBigDataStore wraps up the read-write big-data related methods of the
+// various types of file-based lookaside stores that we implement.
+type RWBigDataStore interface {
+ // SetBigData stores a (potentially large) piece of data associated with this
+ // ID.
+ SetBigData(id, key string, data []byte) error
+}
+
+// A BigDataStore wraps up the most common big-data related methods of the
+// various types of file-based lookaside stores that we implement.
+type BigDataStore interface {
+ ROBigDataStore
+ RWBigDataStore
+}
+
+// A FlaggableStore can have flags set and cleared on items which it manages.
+type FlaggableStore interface {
+ // ClearFlag removes a named flag from an item in the store.
+ ClearFlag(id string, flag string) error
+
+ // SetFlag sets a named flag and its value on an item in the store.
+ SetFlag(id string, flag string, value interface{}) error
+}
+
+// StoreOptions is used for passing initialization options to GetStore(), for
+// initializing a Store object and the underlying storage that it controls.
+type StoreOptions struct {
+ // RunRoot is the filesystem path under which we can store run-time
+ // information, such as the locations of active mount points, that we
+ // want to lose if the host is rebooted.
+ RunRoot string `json:"runroot,omitempty"`
+ // GraphRoot is the filesystem path under which we will store the
+ // contents of layers, images, and containers.
+ GraphRoot string `json:"root,omitempty"`
+ // GraphDriverName is the underlying storage driver that we'll be
+ // using. It only needs to be specified the first time a Store is
+ // initialized for a given RunRoot and GraphRoot.
+ GraphDriverName string `json:"driver,omitempty"`
+ // GraphDriverOptions are driver-specific options.
+ GraphDriverOptions []string `json:"driver-options,omitempty"`
+ // UIDMap and GIDMap are used mainly for deciding on the ownership of
+ // files in layers as they're stored on disk, which is often necessary
+ // when user namespaces are being used.
+ UIDMap []idtools.IDMap `json:"uidmap,omitempty"`
+ GIDMap []idtools.IDMap `json:"gidmap,omitempty"`
+}
+
+// Store wraps up the various types of file-based stores that we use into a
+// singleton object that initializes and manages them all together.
+type Store interface {
+ // RunRoot, GraphRoot, GraphDriverName, and GraphOptions retrieve
+ // settings that were passed to GetStore() when the object was created.
+ RunRoot() string
+ GraphRoot() string
+ GraphDriverName() string
+ GraphOptions() []string
+
+ // GraphDriver obtains and returns a handle to the graph Driver object used
+ // by the Store.
+ GraphDriver() (drivers.Driver, error)
+
+ // CreateLayer creates a new layer in the underlying storage driver,
+ // optionally having the specified ID (one will be assigned if none is
+ // specified), with the specified layer (or no layer) as its parent,
+ // and with optional names. (The writeable flag is ignored.)
+ CreateLayer(id, parent string, names []string, mountLabel string, writeable bool) (*Layer, error)
+
+ // PutLayer combines the functions of CreateLayer and ApplyDiff,
+ // marking the layer for automatic removal if applying the diff fails
+ // for any reason.
+ //
+ // Note that we do some of this work in a child process. The calling
+ // process's main() function needs to import our pkg/reexec package and
+ // should begin with something like this in order to allow us to
+ // properly start that child process:
+ // if reexec.Init {
+ // return
+ // }
+ PutLayer(id, parent string, names []string, mountLabel string, writeable bool, diff io.Reader) (*Layer, int64, error)
+
+ // CreateImage creates a new image, optionally with the specified ID
+ // (one will be assigned if none is specified), with optional names,
+ // referring to a specified image, and with optional metadata. An
+ // image is a record which associates the ID of a layer with a
+ // additional bookkeeping information which the library stores for the
+ // convenience of its caller.
+ CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error)
+
+ // CreateContainer creates a new container, optionally with the
+ // specified ID (one will be assigned if none is specified), with
+ // optional names, using the specified image's top layer as the basis
+ // for the container's layer, and assigning the specified ID to that
+ // layer (one will be created if none is specified). A container is a
+ // layer which is associated with additional bookkeeping information
+ // which the library stores for the convenience of its caller.
+ CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error)
+
+ // Metadata retrieves the metadata which is associated with a layer,
+ // image, or container (whichever the passed-in ID refers to).
+ Metadata(id string) (string, error)
+
+ // SetMetadata updates the metadata which is associated with a layer,
+ // image, or container (whichever the passed-in ID refers to) to match
+ // the specified value. The metadata value can be retrieved at any
+ // time using Metadata, or using Layer, Image, or Container and reading
+ // the object directly.
+ SetMetadata(id, metadata string) error
+
+ // Exists checks if there is a layer, image, or container which has the
+ // passed-in ID or name.
+ Exists(id string) bool
+
+ // Status asks for a status report, in the form of key-value pairs,
+ // from the underlying storage driver. The contents vary from driver
+ // to driver.
+ Status() ([][2]string, error)
+
+ // Delete removes the layer, image, or container which has the
+ // passed-in ID or name. Note that no safety checks are performed, so
+ // this can leave images with references to layers which do not exist,
+ // and layers with references to parents which no longer exist.
+ Delete(id string) error
+
+ // DeleteLayer attempts to remove the specified layer. If the layer is the
+ // parent of any other layer, or is referred to by any images, it will return
+ // an error.
+ DeleteLayer(id string) error
+
+ // DeleteImage removes the specified image if it is not referred to by
+ // any containers. If its top layer is then no longer referred to by
+ // any other images and is not the parent of any other layers, its top
+ // layer will be removed. If that layer's parent is no longer referred
+ // to by any other images and is not the parent of any other layers,
+ // then it, too, will be removed. This procedure will be repeated
+ // until a layer which should not be removed, or the base layer, is
+ // reached, at which point the list of removed layers is returned. If
+ // the commit argument is false, the image and layers are not removed,
+ // but the list of layers which would be removed is still returned.
+ DeleteImage(id string, commit bool) (layers []string, err error)
+
+ // DeleteContainer removes the specified container and its layer. If
+ // there is no matching container, or if the container exists but its
+ // layer does not, an error will be returned.
+ DeleteContainer(id string) error
+
+ // Wipe removes all known layers, images, and containers.
+ Wipe() error
+
+ // Mount attempts to mount a layer, image, or container for access, and
+ // returns the pathname if it succeeds.
+ //
+ // Note that we do some of this work in a child process. The calling
+ // process's main() function needs to import our pkg/reexec package and
+ // should begin with something like this in order to allow us to
+ // properly start that child process:
+ // if reexec.Init {
+ // return
+ // }
+ Mount(id, mountLabel string) (string, error)
+
+ // Unmount attempts to unmount a layer, image, or container, given an ID, a
+ // name, or a mount path.
+ Unmount(id string) error
+
+ // Changes returns a summary of the changes which would need to be made
+ // to one layer to make its contents the same as a second layer. If
+ // the first layer is not specified, the second layer's parent is
+ // assumed. Each Change structure contains a Path relative to the
+ // layer's root directory, and a Kind which is either ChangeAdd,
+ // ChangeModify, or ChangeDelete.
+ Changes(from, to string) ([]archive.Change, error)
+
+ // DiffSize returns a count of the size of the tarstream which would
+ // specify the changes returned by Changes.
+ DiffSize(from, to string) (int64, error)
+
+ // Diff returns the tarstream which would specify the changes returned
+ // by Changes. If options are passed in, they can override default
+ // behaviors.
+ Diff(from, to string, options *DiffOptions) (io.ReadCloser, error)
+
+ // ApplyDiff applies a tarstream to a layer. Information about the
+ // tarstream is cached with the layer. Typically, a layer which is
+ // populated using a tarstream will be expected to not be modified in
+ // any other way, either before or after the diff is applied.
+ //
+ // Note that we do some of this work in a child process. The calling
+ // process's main() function needs to import our pkg/reexec package and
+ // should begin with something like this in order to allow us to
+ // properly start that child process:
+ // if reexec.Init {
+ // return
+ // }
+ ApplyDiff(to string, diff io.Reader) (int64, error)
+
+ // LayersByCompressedDigest returns a slice of the layers with the
+ // specified compressed digest value recorded for them.
+ LayersByCompressedDigest(d digest.Digest) ([]Layer, error)
+
+ // LayersByUncompressedDigest returns a slice of the layers with the
+ // specified uncompressed digest value recorded for them.
+ LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
+
+ // LayerSize returns a cached approximation of the layer's size, or -1
+ // if we don't have a value on hand.
+ LayerSize(id string) (int64, error)
+
+ // Layers returns a list of the currently known layers.
+ Layers() ([]Layer, error)
+
+ // Images returns a list of the currently known images.
+ Images() ([]Image, error)
+
+ // Containers returns a list of the currently known containers.
+ Containers() ([]Container, error)
+
+ // Names returns the list of names for a layer, image, or container.
+ Names(id string) ([]string, error)
+
+ // SetNames changes the list of names for a layer, image, or container.
+ // Duplicate names are removed from the list automatically.
+ SetNames(id string, names []string) error
+
+ // ListImageBigData retrieves a list of the (possibly large) chunks of
+ // named data associated with an image.
+ ListImageBigData(id string) ([]string, error)
+
+ // ImageBigData retrieves a (possibly large) chunk of named data
+ // associated with an image.
+ ImageBigData(id, key string) ([]byte, error)
+
+ // ImageBigDataSize retrieves the size of a (possibly large) chunk
+ // of named data associated with an image.
+ ImageBigDataSize(id, key string) (int64, error)
+
+ // ImageBigDataDigest retrieves the digest of a (possibly large) chunk
+ // of named data associated with an image.
+ ImageBigDataDigest(id, key string) (digest.Digest, error)
+
+ // SetImageBigData stores a (possibly large) chunk of named data associated
+ // with an image.
+ SetImageBigData(id, key string, data []byte) error
+
+ // ListContainerBigData retrieves a list of the (possibly large) chunks of
+ // named data associated with a container.
+ ListContainerBigData(id string) ([]string, error)
+
+ // ContainerBigData retrieves a (possibly large) chunk of named data
+ // associated with a container.
+ ContainerBigData(id, key string) ([]byte, error)
+
+ // ContainerBigDataSize retrieves the size of a (possibly large)
+ // chunk of named data associated with a container.
+ ContainerBigDataSize(id, key string) (int64, error)
+
+ // ContainerBigDataDigest retrieves the digest of a (possibly large)
+ // chunk of named data associated with a container.
+ ContainerBigDataDigest(id, key string) (digest.Digest, error)
+
+ // SetContainerBigData stores a (possibly large) chunk of named data
+ // associated with a container.
+ SetContainerBigData(id, key string, data []byte) error
+
+ // Layer returns a specific layer.
+ Layer(id string) (*Layer, error)
+
+ // Image returns a specific image.
+ Image(id string) (*Image, error)
+
+ // ImagesByTopLayer returns a list of images which reference the specified
+ // layer as their top layer. They will have different IDs and names
+ // and may have different metadata, big data items, and flags.
+ ImagesByTopLayer(id string) ([]*Image, error)
+
+ // Container returns a specific container.
+ Container(id string) (*Container, error)
+
+ // ContainerByLayer returns a specific container based on its layer ID or
+ // name.
+ ContainerByLayer(id string) (*Container, error)
+
+ // ContainerDirectory returns a path of a directory which the caller
+ // can use to store data, specific to the container, which the library
+ // does not directly manage. The directory will be deleted when the
+ // container is deleted.
+ ContainerDirectory(id string) (string, error)
+
+ // SetContainerDirectoryFile is a convenience function which stores
+ // a piece of data in the specified file relative to the container's
+ // directory.
+ SetContainerDirectoryFile(id, file string, data []byte) error
+
+ // FromContainerDirectory is a convenience function which reads
+ // the contents of the specified file relative to the container's
+ // directory.
+ FromContainerDirectory(id, file string) ([]byte, error)
+
+ // ContainerRunDirectory returns a path of a directory which the
+ // caller can use to store data, specific to the container, which the
+ // library does not directly manage. The directory will be deleted
+ // when the host system is restarted.
+ ContainerRunDirectory(id string) (string, error)
+
+ // SetContainerRunDirectoryFile is a convenience function which stores
+ // a piece of data in the specified file relative to the container's
+ // run directory.
+ SetContainerRunDirectoryFile(id, file string, data []byte) error
+
+ // FromContainerRunDirectory is a convenience function which reads
+ // the contents of the specified file relative to the container's run
+ // directory.
+ FromContainerRunDirectory(id, file string) ([]byte, error)
+
+ // Lookup returns the ID of a layer, image, or container with the specified
+ // name or ID.
+ Lookup(name string) (string, error)
+
+ // Shutdown attempts to free any kernel resources which are being used
+ // by the underlying driver. If "force" is true, any mounted (i.e., in
+ // use) layers are unmounted beforehand. If "force" is not true, then
+ // layers being in use is considered to be an error condition. A list
+ // of still-mounted layers is returned along with possible errors.
+ Shutdown(force bool) (layers []string, err error)
+
+ // Version returns version information, in the form of key-value pairs, from
+ // the storage package.
+ Version() ([][2]string, error)
+}
+
+// ImageOptions is used for passing options to a Store's CreateImage() method.
+type ImageOptions struct {
+ // CreationDate, if not zero, will override the default behavior of marking the image as having been
+ // created when CreateImage() was called, recording CreationDate instead.
+ CreationDate time.Time
+}
+
+// ContainerOptions is used for passing options to a Store's CreateContainer() method.
+type ContainerOptions struct {
+}
+
+type store struct {
+ lastLoaded time.Time
+ runRoot string
+ graphLock Locker
+ graphRoot string
+ graphDriverName string
+ graphOptions []string
+ uidMap []idtools.IDMap
+ gidMap []idtools.IDMap
+ graphDriver drivers.Driver
+ layerStore LayerStore
+ roLayerStores []ROLayerStore
+ imageStore ImageStore
+ roImageStores []ROImageStore
+ containerStore ContainerStore
+}
+
+// GetStore attempts to find an already-created Store object matching the
+// specified location and graph driver, and if it can't, it creates and
+// initializes a new Store object, and the underlying storage that it controls.
+func GetStore(options StoreOptions) (Store, error) {
+ if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 {
+ options = DefaultStoreOptions
+ }
+
+ if options.GraphRoot != "" {
+ options.GraphRoot = filepath.Clean(options.GraphRoot)
+ }
+ if options.RunRoot != "" {
+ options.RunRoot = filepath.Clean(options.RunRoot)
+ }
+
+ storesLock.Lock()
+ defer storesLock.Unlock()
+
+ for _, s := range stores {
+ if s.graphRoot == options.GraphRoot && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) {
+ return s, nil
+ }
+ }
+
+ if options.GraphRoot == "" {
+ return nil, ErrIncompleteOptions
+ }
+ if options.RunRoot == "" {
+ return nil, ErrIncompleteOptions
+ }
+
+ if err := os.MkdirAll(options.RunRoot, 0700); err != nil && !os.IsExist(err) {
+ return nil, err
+ }
+ for _, subdir := range []string{} {
+ if err := os.MkdirAll(filepath.Join(options.RunRoot, subdir), 0700); err != nil && !os.IsExist(err) {
+ return nil, err
+ }
+ }
+ if err := os.MkdirAll(options.GraphRoot, 0700); err != nil && !os.IsExist(err) {
+ return nil, err
+ }
+ for _, subdir := range []string{"mounts", "tmp", options.GraphDriverName} {
+ if err := os.MkdirAll(filepath.Join(options.GraphRoot, subdir), 0700); err != nil && !os.IsExist(err) {
+ return nil, err
+ }
+ }
+
+ graphLock, err := GetLockfile(filepath.Join(options.GraphRoot, "storage.lock"))
+ if err != nil {
+ return nil, err
+ }
+ s := &store{
+ runRoot: options.RunRoot,
+ graphLock: graphLock,
+ graphRoot: options.GraphRoot,
+ graphDriverName: options.GraphDriverName,
+ graphOptions: options.GraphDriverOptions,
+ uidMap: copyIDMap(options.UIDMap),
+ gidMap: copyIDMap(options.GIDMap),
+ }
+ if err := s.load(); err != nil {
+ return nil, err
+ }
+
+ stores = append(stores, s)
+
+ return s, nil
+}
+
+func copyIDMap(idmap []idtools.IDMap) []idtools.IDMap {
+ m := []idtools.IDMap{}
+ if idmap != nil {
+ m = make([]idtools.IDMap, len(idmap))
+ copy(m, idmap)
+ }
+ if len(m) > 0 {
+ return m[:]
+ }
+ return nil
+}
+
+func (s *store) RunRoot() string {
+ return s.runRoot
+}
+
+func (s *store) GraphDriverName() string {
+ return s.graphDriverName
+}
+
+func (s *store) GraphRoot() string {
+ return s.graphRoot
+}
+
+func (s *store) GraphOptions() []string {
+ return s.graphOptions
+}
+
+func (s *store) load() error {
+ driver, err := s.GraphDriver()
+ if err != nil {
+ return err
+ }
+ s.graphDriver = driver
+ s.graphDriverName = driver.String()
+ driverPrefix := s.graphDriverName + "-"
+
+ rls, err := s.LayerStore()
+ if err != nil {
+ return err
+ }
+ s.layerStore = rls
+ if _, err := s.ROLayerStores(); err != nil {
+ return err
+ }
+
+ gipath := filepath.Join(s.graphRoot, driverPrefix+"images")
+ if err := os.MkdirAll(gipath, 0700); err != nil {
+ return err
+ }
+ ris, err := newImageStore(gipath)
+ if err != nil {
+ return err
+ }
+ s.imageStore = ris
+ if _, err := s.ROImageStores(); err != nil {
+ return err
+ }
+
+ gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers")
+ if err := os.MkdirAll(gcpath, 0700); err != nil {
+ return err
+ }
+ rcs, err := newContainerStore(gcpath)
+ if err != nil {
+ return err
+ }
+ rcpath := filepath.Join(s.runRoot, driverPrefix+"containers")
+ if err := os.MkdirAll(rcpath, 0700); err != nil {
+ return err
+ }
+ s.containerStore = rcs
+ return nil
+}
+
+func (s *store) getGraphDriver() (drivers.Driver, error) {
+ if s.graphDriver != nil {
+ return s.graphDriver, nil
+ }
+ config := drivers.Options{
+ Root: s.graphRoot,
+ DriverOptions: s.graphOptions,
+ UIDMaps: s.uidMap,
+ GIDMaps: s.gidMap,
+ }
+ driver, err := drivers.New(s.graphDriverName, config)
+ if err != nil {
+ return nil, err
+ }
+ s.graphDriver = driver
+ s.graphDriverName = driver.String()
+ return driver, nil
+}
+
+func (s *store) GraphDriver() (drivers.Driver, error) {
+ s.graphLock.Lock()
+ defer s.graphLock.Unlock()
+ if s.graphLock.TouchedSince(s.lastLoaded) {
+ s.graphDriver = nil
+ s.layerStore = nil
+ s.lastLoaded = time.Now()
+ }
+ return s.getGraphDriver()
+}
+
+// LayerStore obtains and returns a handle to the writeable layer store object
+// used by the Store. Accessing this store directly will bypass locking and
+// synchronization, so it is not a part of the exported Store interface.
+func (s *store) LayerStore() (LayerStore, error) {
+ s.graphLock.Lock()
+ defer s.graphLock.Unlock()
+ if s.graphLock.TouchedSince(s.lastLoaded) {
+ s.graphDriver = nil
+ s.layerStore = nil
+ s.lastLoaded = time.Now()
+ }
+ if s.layerStore != nil {
+ return s.layerStore, nil
+ }
+ driver, err := s.getGraphDriver()
+ if err != nil {
+ return nil, err
+ }
+ driverPrefix := s.graphDriverName + "-"
+ rlpath := filepath.Join(s.runRoot, driverPrefix+"layers")
+ if err := os.MkdirAll(rlpath, 0700); err != nil {
+ return nil, err
+ }
+ glpath := filepath.Join(s.graphRoot, driverPrefix+"layers")
+ if err := os.MkdirAll(glpath, 0700); err != nil {
+ return nil, err
+ }
+ rls, err := newLayerStore(rlpath, glpath, driver)
+ if err != nil {
+ return nil, err
+ }
+ s.layerStore = rls
+ return s.layerStore, nil
+}
+
+// ROLayerStores obtains additional read/only layer store objects used by the
+// Store. Accessing these stores directly will bypass locking and
+// synchronization, so it is not part of the exported Store interface.
+func (s *store) ROLayerStores() ([]ROLayerStore, error) {
+ s.graphLock.Lock()
+ defer s.graphLock.Unlock()
+ if s.roLayerStores != nil {
+ return s.roLayerStores, nil
+ }
+ driver, err := s.getGraphDriver()
+ if err != nil {
+ return nil, err
+ }
+ driverPrefix := s.graphDriverName + "-"
+ rlpath := filepath.Join(s.runRoot, driverPrefix+"layers")
+ if err := os.MkdirAll(rlpath, 0700); err != nil {
+ return nil, err
+ }
+ for _, store := range driver.AdditionalImageStores() {
+ glpath := filepath.Join(store, driverPrefix+"layers")
+ rls, err := newROLayerStore(rlpath, glpath, driver)
+ if err != nil {
+ return nil, err
+ }
+ s.roLayerStores = append(s.roLayerStores, rls)
+ }
+ return s.roLayerStores, nil
+}
+
+// ImageStore obtains and returns a handle to the writable image store object
+// used by the Store. Accessing this store directly will bypass locking and
+// synchronization, so it is not a part of the exported Store interface.
+func (s *store) ImageStore() (ImageStore, error) {
+ if s.imageStore != nil {
+ return s.imageStore, nil
+ }
+ return nil, ErrLoadError
+}
+
+// ROImageStores obtains additional read/only image store objects used by the
+// Store. Accessing these stores directly will bypass locking and
+// synchronization, so it is not a part of the exported Store interface.
+func (s *store) ROImageStores() ([]ROImageStore, error) {
+ if len(s.roImageStores) != 0 {
+ return s.roImageStores, nil
+ }
+ driver, err := s.getGraphDriver()
+ if err != nil {
+ return nil, err
+ }
+ driverPrefix := s.graphDriverName + "-"
+ for _, store := range driver.AdditionalImageStores() {
+ gipath := filepath.Join(store, driverPrefix+"images")
+ ris, err := newROImageStore(gipath)
+ if err != nil {
+ return nil, err
+ }
+ s.roImageStores = append(s.roImageStores, ris)
+ }
+ return s.roImageStores, nil
+}
+
+// ContainerStore obtains and returns a handle to the container store object
+// used by the Store. Accessing this store directly will bypass locking and
+// synchronization, so it is not a part of the exported Store interface.
+func (s *store) ContainerStore() (ContainerStore, error) {
+ if s.containerStore != nil {
+ return s.containerStore, nil
+ }
+ return nil, ErrLoadError
+}
+
+func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, diff io.Reader) (*Layer, int64, error) {
+ rlstore, err := s.LayerStore()
+ if err != nil {
+ return nil, -1, err
+ }
+ rlstores, err := s.ROLayerStores()
+ if err != nil {
+ return nil, -1, err
+ }
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return nil, -1, err
+ }
+ rlstore.Lock()
+ defer rlstore.Unlock()
+ if modified, err := rlstore.Modified(); modified || err != nil {
+ rlstore.Load()
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+ if id == "" {
+ id = stringid.GenerateRandomID()
+ }
+ if parent != "" {
+ var ilayer *Layer
+ for _, lstore := range append([]ROLayerStore{rlstore}, rlstores...) {
+ if l, err := lstore.Get(parent); err == nil && l != nil {
+ ilayer = l
+ parent = ilayer.ID
+ break
+ }
+ }
+ if ilayer == nil {
+ return nil, -1, ErrLayerUnknown
+ }
+ containers, err := rcstore.Containers()
+ if err != nil {
+ return nil, -1, err
+ }
+ for _, container := range containers {
+ if container.LayerID == parent {
+ return nil, -1, ErrParentIsContainer
+ }
+ }
+ }
+ return rlstore.Put(id, parent, names, mountLabel, nil, writeable, nil, diff)
+}
+
+func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool) (*Layer, error) {
+ layer, _, err := s.PutLayer(id, parent, names, mountLabel, writeable, nil)
+ return layer, err
+}
+
+func (s *store) CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) {
+ if id == "" {
+ id = stringid.GenerateRandomID()
+ }
+
+ if layer != "" {
+ lstore, err := s.LayerStore()
+ if err != nil {
+ return nil, err
+ }
+ lstores, err := s.ROLayerStores()
+ if err != nil {
+ return nil, err
+ }
+ var ilayer *Layer
+ for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ ilayer, err = store.Get(layer)
+ if err == nil {
+ break
+ }
+ }
+ if ilayer == nil {
+ return nil, ErrLayerUnknown
+ }
+ layer = ilayer.ID
+ }
+
+ ristore, err := s.ImageStore()
+ if err != nil {
+ return nil, err
+ }
+ ristore.Lock()
+ defer ristore.Unlock()
+ if modified, err := ristore.Modified(); modified || err != nil {
+ ristore.Load()
+ }
+
+ creationDate := time.Now().UTC()
+ if options != nil {
+ creationDate = options.CreationDate
+ }
+
+ return ristore.Create(id, names, layer, metadata, creationDate)
+}
+
+func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) {
+ rlstore, err := s.LayerStore()
+ if err != nil {
+ return nil, err
+ }
+ rlstore.Lock()
+ defer rlstore.Unlock()
+ if modified, err := rlstore.Modified(); modified || err != nil {
+ rlstore.Load()
+ }
+ if id == "" {
+ id = stringid.GenerateRandomID()
+ }
+
+ imageTopLayer := ""
+ imageID := ""
+ if image != "" {
+ istore, err := s.ImageStore()
+ if err != nil {
+ return nil, err
+ }
+ istores, err := s.ROImageStores()
+ if err != nil {
+ return nil, err
+ }
+ var cimage *Image
+ for _, store := range append([]ROImageStore{istore}, istores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ cimage, err = store.Get(image)
+ if err == nil {
+ break
+ }
+ }
+ if cimage == nil {
+ return nil, ErrImageUnknown
+ }
+ imageTopLayer = cimage.TopLayer
+ imageID = cimage.ID
+ }
+ clayer, err := rlstore.Create(layer, imageTopLayer, nil, "", nil, true)
+ if err != nil {
+ return nil, err
+ }
+ layer = clayer.ID
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return nil, err
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+ container, err := rcstore.Create(id, names, imageID, layer, metadata)
+ if err != nil || container == nil {
+ rlstore.Delete(layer)
+ }
+ return container, err
+}
+
+func (s *store) SetMetadata(id, metadata string) error {
+ rlstore, err := s.LayerStore()
+ if err != nil {
+ return err
+ }
+ ristore, err := s.ImageStore()
+ if err != nil {
+ return err
+ }
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return err
+ }
+
+ rlstore.Lock()
+ defer rlstore.Unlock()
+ if modified, err := rlstore.Modified(); modified || err != nil {
+ rlstore.Load()
+ }
+ ristore.Lock()
+ defer ristore.Unlock()
+ if modified, err := ristore.Modified(); modified || err != nil {
+ ristore.Load()
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+
+ if rlstore.Exists(id) {
+ return rlstore.SetMetadata(id, metadata)
+ }
+ if ristore.Exists(id) {
+ return ristore.SetMetadata(id, metadata)
+ }
+ if rcstore.Exists(id) {
+ return rcstore.SetMetadata(id, metadata)
+ }
+ return ErrNotAnID
+}
+
+func (s *store) Metadata(id string) (string, error) {
+ lstore, err := s.LayerStore()
+ if err != nil {
+ return "", err
+ }
+ lstores, err := s.ROLayerStores()
+ if err != nil {
+ return "", err
+ }
+ for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ if store.Exists(id) {
+ return store.Metadata(id)
+ }
+ }
+
+ istore, err := s.ImageStore()
+ if err != nil {
+ return "", err
+ }
+ istores, err := s.ROImageStores()
+ if err != nil {
+ return "", err
+ }
+ for _, store := range append([]ROImageStore{istore}, istores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ if store.Exists(id) {
+ return store.Metadata(id)
+ }
+ }
+
+ cstore, err := s.ContainerStore()
+ if err != nil {
+ return "", err
+ }
+ cstore.Lock()
+ defer cstore.Unlock()
+ if modified, err := cstore.Modified(); modified || err != nil {
+ cstore.Load()
+ }
+ if cstore.Exists(id) {
+ return cstore.Metadata(id)
+ }
+ return "", ErrNotAnID
+}
+
+func (s *store) ListImageBigData(id string) ([]string, error) {
+ istore, err := s.ImageStore()
+ if err != nil {
+ return nil, err
+ }
+ istores, err := s.ROImageStores()
+ if err != nil {
+ return nil, err
+ }
+ for _, store := range append([]ROImageStore{istore}, istores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ bigDataNames, err := store.BigDataNames(id)
+ if err == nil {
+ return bigDataNames, err
+ }
+ }
+ return nil, ErrImageUnknown
+}
+
+func (s *store) ImageBigDataSize(id, key string) (int64, error) {
+ istore, err := s.ImageStore()
+ if err != nil {
+ return -1, err
+ }
+ istores, err := s.ROImageStores()
+ if err != nil {
+ return -1, err
+ }
+ for _, store := range append([]ROImageStore{istore}, istores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ size, err := store.BigDataSize(id, key)
+ if err == nil {
+ return size, nil
+ }
+ }
+ return -1, ErrSizeUnknown
+}
+
+func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) {
+ ristore, err := s.ImageStore()
+ if err != nil {
+ return "", err
+ }
+ stores, err := s.ROImageStores()
+ if err != nil {
+ return "", err
+ }
+ stores = append([]ROImageStore{ristore}, stores...)
+ for _, ristore := range stores {
+ ristore.Lock()
+ defer ristore.Unlock()
+ if modified, err := ristore.Modified(); modified || err != nil {
+ ristore.Load()
+ }
+ d, err := ristore.BigDataDigest(id, key)
+ if err == nil && d.Validate() == nil {
+ return d, nil
+ }
+ }
+ return "", ErrDigestUnknown
+}
+
+func (s *store) ImageBigData(id, key string) ([]byte, error) {
+ istore, err := s.ImageStore()
+ if err != nil {
+ return nil, err
+ }
+ istores, err := s.ROImageStores()
+ if err != nil {
+ return nil, err
+ }
+ for _, store := range append([]ROImageStore{istore}, istores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ data, err := store.BigData(id, key)
+ if err == nil {
+ return data, nil
+ }
+ }
+ return nil, ErrImageUnknown
+}
+
+func (s *store) SetImageBigData(id, key string, data []byte) error {
+ ristore, err := s.ImageStore()
+ if err != nil {
+ return err
+ }
+
+ ristore.Lock()
+ defer ristore.Unlock()
+ if modified, err := ristore.Modified(); modified || err != nil {
+ ristore.Load()
+ }
+
+ return ristore.SetBigData(id, key, data)
+}
+
+func (s *store) ListContainerBigData(id string) ([]string, error) {
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return nil, err
+ }
+
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+
+ return rcstore.BigDataNames(id)
+}
+
+func (s *store) ContainerBigDataSize(id, key string) (int64, error) {
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return -1, err
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+ return rcstore.BigDataSize(id, key)
+}
+
+func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) {
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return "", err
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+ return rcstore.BigDataDigest(id, key)
+}
+
+func (s *store) ContainerBigData(id, key string) ([]byte, error) {
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return nil, err
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+ return rcstore.BigData(id, key)
+}
+
+func (s *store) SetContainerBigData(id, key string, data []byte) error {
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return err
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+ return rcstore.SetBigData(id, key, data)
+}
+
+func (s *store) Exists(id string) bool {
+ lstore, err := s.LayerStore()
+ if err != nil {
+ return false
+ }
+ lstores, err := s.ROLayerStores()
+ if err != nil {
+ return false
+ }
+ for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ if store.Exists(id) {
+ return true
+ }
+ }
+
+ istore, err := s.ImageStore()
+ if err != nil {
+ return false
+ }
+ istores, err := s.ROImageStores()
+ if err != nil {
+ return false
+ }
+ for _, store := range append([]ROImageStore{istore}, istores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ if store.Exists(id) {
+ return true
+ }
+ }
+
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return false
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+ if rcstore.Exists(id) {
+ return true
+ }
+
+ return false
+}
+
+func dedupeNames(names []string) []string {
+ seen := make(map[string]bool)
+ deduped := make([]string, 0, len(names))
+ for _, name := range names {
+ if _, wasSeen := seen[name]; !wasSeen {
+ seen[name] = true
+ deduped = append(deduped, name)
+ }
+ }
+ return deduped
+}
+
+func (s *store) SetNames(id string, names []string) error {
+ deduped := dedupeNames(names)
+
+ rlstore, err := s.LayerStore()
+ if err != nil {
+ return err
+ }
+ rlstore.Lock()
+ defer rlstore.Unlock()
+ if modified, err := rlstore.Modified(); modified || err != nil {
+ rlstore.Load()
+ }
+ if rlstore.Exists(id) {
+ return rlstore.SetNames(id, deduped)
+ }
+
+ ristore, err := s.ImageStore()
+ if err != nil {
+ return err
+ }
+ ristore.Lock()
+ defer ristore.Unlock()
+ if modified, err := ristore.Modified(); modified || err != nil {
+ ristore.Load()
+ }
+ if ristore.Exists(id) {
+ return ristore.SetNames(id, deduped)
+ }
+
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return err
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+ if rcstore.Exists(id) {
+ return rcstore.SetNames(id, deduped)
+ }
+ return ErrLayerUnknown
+}
+
+func (s *store) Names(id string) ([]string, error) {
+ lstore, err := s.LayerStore()
+ if err != nil {
+ return nil, err
+ }
+ lstores, err := s.ROLayerStores()
+ if err != nil {
+ return nil, err
+ }
+ for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ if l, err := store.Get(id); l != nil && err == nil {
+ return l.Names, nil
+ }
+ }
+
+ istore, err := s.ImageStore()
+ if err != nil {
+ return nil, err
+ }
+ istores, err := s.ROImageStores()
+ if err != nil {
+ return nil, err
+ }
+ for _, store := range append([]ROImageStore{istore}, istores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ if i, err := store.Get(id); i != nil && err == nil {
+ return i.Names, nil
+ }
+ }
+
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return nil, err
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+ if c, err := rcstore.Get(id); c != nil && err == nil {
+ return c.Names, nil
+ }
+ return nil, ErrLayerUnknown
+}
+
+func (s *store) Lookup(name string) (string, error) {
+ lstore, err := s.LayerStore()
+ if err != nil {
+ return "", err
+ }
+ lstores, err := s.ROLayerStores()
+ if err != nil {
+ return "", err
+ }
+ for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ if l, err := store.Get(name); l != nil && err == nil {
+ return l.ID, nil
+ }
+ }
+
+ istore, err := s.ImageStore()
+ if err != nil {
+ return "", err
+ }
+ istores, err := s.ROImageStores()
+ if err != nil {
+ return "", err
+ }
+ for _, store := range append([]ROImageStore{istore}, istores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ if i, err := store.Get(name); i != nil && err == nil {
+ return i.ID, nil
+ }
+ }
+
+ cstore, err := s.ContainerStore()
+ if err != nil {
+ return "", err
+ }
+ cstore.Lock()
+ defer cstore.Unlock()
+ if modified, err := cstore.Modified(); modified || err != nil {
+ cstore.Load()
+ }
+ if c, err := cstore.Get(name); c != nil && err == nil {
+ return c.ID, nil
+ }
+
+ return "", ErrLayerUnknown
+}
+
+func (s *store) DeleteLayer(id string) error {
+ rlstore, err := s.LayerStore()
+ if err != nil {
+ return err
+ }
+ ristore, err := s.ImageStore()
+ if err != nil {
+ return err
+ }
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return err
+ }
+
+ rlstore.Lock()
+ defer rlstore.Unlock()
+ if modified, err := rlstore.Modified(); modified || err != nil {
+ rlstore.Load()
+ }
+ ristore.Lock()
+ defer ristore.Unlock()
+ if modified, err := ristore.Modified(); modified || err != nil {
+ ristore.Load()
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+
+ if rlstore.Exists(id) {
+ if l, err := rlstore.Get(id); err != nil {
+ id = l.ID
+ }
+ layers, err := rlstore.Layers()
+ if err != nil {
+ return err
+ }
+ for _, layer := range layers {
+ if layer.Parent == id {
+ return ErrLayerHasChildren
+ }
+ }
+ images, err := ristore.Images()
+ if err != nil {
+ return err
+ }
+ for _, image := range images {
+ if image.TopLayer == id {
+ return errors.Wrapf(ErrLayerUsedByImage, "Layer %v used by image %v", id, image.ID)
+ }
+ }
+ containers, err := rcstore.Containers()
+ if err != nil {
+ return err
+ }
+ for _, container := range containers {
+ if container.LayerID == id {
+ return errors.Wrapf(ErrLayerUsedByContainer, "Layer %v used by container %v", id, container.ID)
+ }
+ }
+ return rlstore.Delete(id)
+ }
+ return ErrNotALayer
+}
+
+func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) {
+ rlstore, err := s.LayerStore()
+ if err != nil {
+ return nil, err
+ }
+ ristore, err := s.ImageStore()
+ if err != nil {
+ return nil, err
+ }
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return nil, err
+ }
+
+ rlstore.Lock()
+ defer rlstore.Unlock()
+ if modified, err := rlstore.Modified(); modified || err != nil {
+ rlstore.Load()
+ }
+ ristore.Lock()
+ defer ristore.Unlock()
+ if modified, err := ristore.Modified(); modified || err != nil {
+ ristore.Load()
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+ layersToRemove := []string{}
+ if ristore.Exists(id) {
+ image, err := ristore.Get(id)
+ if err != nil {
+ return nil, err
+ }
+ id = image.ID
+ containers, err := rcstore.Containers()
+ if err != nil {
+ return nil, err
+ }
+ aContainerByImage := make(map[string]string)
+ for _, container := range containers {
+ aContainerByImage[container.ImageID] = container.ID
+ }
+ if container, ok := aContainerByImage[id]; ok {
+ return nil, errors.Wrapf(ErrImageUsedByContainer, "Image used by %v", container)
+ }
+ images, err := ristore.Images()
+ if err != nil {
+ return nil, err
+ }
+ layers, err := rlstore.Layers()
+ if err != nil {
+ return nil, err
+ }
+ childrenByParent := make(map[string]*[]string)
+ for _, layer := range layers {
+ parent := layer.Parent
+ if list, ok := childrenByParent[parent]; ok {
+ newList := append(*list, layer.ID)
+ childrenByParent[parent] = &newList
+ } else {
+ childrenByParent[parent] = &([]string{layer.ID})
+ }
+ }
+ anyImageByTopLayer := make(map[string]string)
+ for _, img := range images {
+ if img.ID != id {
+ anyImageByTopLayer[img.TopLayer] = img.ID
+ }
+ }
+ if commit {
+ if err = ristore.Delete(id); err != nil {
+ return nil, err
+ }
+ }
+ layer := image.TopLayer
+ lastRemoved := ""
+ for layer != "" {
+ if rcstore.Exists(layer) {
+ break
+ }
+ if _, ok := anyImageByTopLayer[layer]; ok {
+ break
+ }
+ parent := ""
+ if l, err := rlstore.Get(layer); err == nil {
+ parent = l.Parent
+ }
+ otherRefs := 0
+ if childList, ok := childrenByParent[layer]; ok && childList != nil {
+ children := *childList
+ for _, child := range children {
+ if child != lastRemoved {
+ otherRefs++
+ }
+ }
+ }
+ if otherRefs != 0 {
+ break
+ }
+ lastRemoved = layer
+ layersToRemove = append(layersToRemove, lastRemoved)
+ layer = parent
+ }
+ } else {
+ return nil, ErrNotAnImage
+ }
+ if commit {
+ for _, layer := range layersToRemove {
+ if err = rlstore.Delete(layer); err != nil {
+ return nil, err
+ }
+ }
+ }
+ return layersToRemove, nil
+}
+
+func (s *store) DeleteContainer(id string) error {
+ rlstore, err := s.LayerStore()
+ if err != nil {
+ return err
+ }
+ ristore, err := s.ImageStore()
+ if err != nil {
+ return err
+ }
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return err
+ }
+
+ rlstore.Lock()
+ defer rlstore.Unlock()
+ if modified, err := rlstore.Modified(); modified || err != nil {
+ rlstore.Load()
+ }
+ ristore.Lock()
+ defer ristore.Unlock()
+ if modified, err := ristore.Modified(); modified || err != nil {
+ ristore.Load()
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+
+ if rcstore.Exists(id) {
+ if container, err := rcstore.Get(id); err == nil {
+ if rlstore.Exists(container.LayerID) {
+ if err = rlstore.Delete(container.LayerID); err != nil {
+ return err
+ }
+ if err = rcstore.Delete(id); err != nil {
+ return err
+ }
+ middleDir := s.graphDriverName + "-containers"
+ gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
+ if err = os.RemoveAll(gcpath); err != nil {
+ return err
+ }
+ rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
+ if err = os.RemoveAll(rcpath); err != nil {
+ return err
+ }
+ return nil
+ }
+ return ErrNotALayer
+ }
+ }
+ return ErrNotAContainer
+}
+
+func (s *store) Delete(id string) error {
+ rlstore, err := s.LayerStore()
+ if err != nil {
+ return err
+ }
+ ristore, err := s.ImageStore()
+ if err != nil {
+ return err
+ }
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return err
+ }
+
+ rlstore.Lock()
+ defer rlstore.Unlock()
+ if modified, err := rlstore.Modified(); modified || err != nil {
+ rlstore.Load()
+ }
+ ristore.Lock()
+ defer ristore.Unlock()
+ if modified, err := ristore.Modified(); modified || err != nil {
+ ristore.Load()
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+
+ if rcstore.Exists(id) {
+ if container, err := rcstore.Get(id); err == nil {
+ if rlstore.Exists(container.LayerID) {
+ if err = rlstore.Delete(container.LayerID); err != nil {
+ return err
+ }
+ if err = rcstore.Delete(id); err != nil {
+ return err
+ }
+ middleDir := s.graphDriverName + "-containers"
+ gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID, "userdata")
+ if err = os.RemoveAll(gcpath); err != nil {
+ return err
+ }
+ rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID, "userdata")
+ if err = os.RemoveAll(rcpath); err != nil {
+ return err
+ }
+ return nil
+ }
+ return ErrNotALayer
+ }
+ }
+ if ristore.Exists(id) {
+ return ristore.Delete(id)
+ }
+ if rlstore.Exists(id) {
+ return rlstore.Delete(id)
+ }
+ return ErrLayerUnknown
+}
+
+func (s *store) Wipe() error {
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return err
+ }
+ ristore, err := s.ImageStore()
+ if err != nil {
+ return err
+ }
+ rlstore, err := s.LayerStore()
+ if err != nil {
+ return err
+ }
+
+ rlstore.Lock()
+ defer rlstore.Unlock()
+ if modified, err := rlstore.Modified(); modified || err != nil {
+ rlstore.Load()
+ }
+ ristore.Lock()
+ defer ristore.Unlock()
+ if modified, err := ristore.Modified(); modified || err != nil {
+ ristore.Load()
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+
+ if err = rcstore.Wipe(); err != nil {
+ return err
+ }
+ if err = ristore.Wipe(); err != nil {
+ return err
+ }
+ return rlstore.Wipe()
+}
+
+func (s *store) Status() ([][2]string, error) {
+ rlstore, err := s.LayerStore()
+ if err != nil {
+ return nil, err
+ }
+ return rlstore.Status()
+}
+
+func (s *store) Version() ([][2]string, error) {
+ return [][2]string{}, nil
+}
+
+func (s *store) Mount(id, mountLabel string) (string, error) {
+ if layerID, err := s.ContainerLayerID(id); err == nil {
+ id = layerID
+ }
+ rlstore, err := s.LayerStore()
+ if err != nil {
+ return "", err
+ }
+ rlstore.Lock()
+ defer rlstore.Unlock()
+ if modified, err := rlstore.Modified(); modified || err != nil {
+ rlstore.Load()
+ }
+ if rlstore.Exists(id) {
+ return rlstore.Mount(id, mountLabel)
+ }
+ return "", ErrLayerUnknown
+}
+
+func (s *store) Unmount(id string) error {
+ if layerID, err := s.ContainerLayerID(id); err == nil {
+ id = layerID
+ }
+ rlstore, err := s.LayerStore()
+ if err != nil {
+ return err
+ }
+ rlstore.Lock()
+ defer rlstore.Unlock()
+ if modified, err := rlstore.Modified(); modified || err != nil {
+ rlstore.Load()
+ }
+ if rlstore.Exists(id) {
+ return rlstore.Unmount(id)
+ }
+ return ErrLayerUnknown
+}
+
+func (s *store) Changes(from, to string) ([]archive.Change, error) {
+ lstore, err := s.LayerStore()
+ if err != nil {
+ return nil, err
+ }
+ lstores, err := s.ROLayerStores()
+ if err != nil {
+ return nil, err
+ }
+ for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ if store.Exists(to) {
+ return store.Changes(from, to)
+ }
+ }
+ return nil, ErrLayerUnknown
+}
+
+func (s *store) DiffSize(from, to string) (int64, error) {
+ lstore, err := s.LayerStore()
+ if err != nil {
+ return -1, err
+ }
+ lstores, err := s.ROLayerStores()
+ if err != nil {
+ return -1, err
+ }
+ for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ if store.Exists(to) {
+ return store.DiffSize(from, to)
+ }
+ }
+ return -1, ErrLayerUnknown
+}
+
+func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) {
+ lstore, err := s.LayerStore()
+ if err != nil {
+ return nil, err
+ }
+ lstores, err := s.ROLayerStores()
+ if err != nil {
+ return nil, err
+ }
+ for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ if store.Exists(to) {
+ return store.Diff(from, to, options)
+ }
+ }
+ return nil, ErrLayerUnknown
+}
+
+func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) {
+ rlstore, err := s.LayerStore()
+ if err != nil {
+ return -1, err
+ }
+ rlstore.Lock()
+ defer rlstore.Unlock()
+ if modified, err := rlstore.Modified(); modified || err != nil {
+ rlstore.Load()
+ }
+ if rlstore.Exists(to) {
+ return rlstore.ApplyDiff(to, diff)
+ }
+ return -1, ErrLayerUnknown
+}
+
+func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) {
+ var layers []Layer
+ lstore, err := s.LayerStore()
+ if err != nil {
+ return nil, err
+ }
+
+ lstores, err := s.ROLayerStores()
+ if err != nil {
+ return nil, err
+ }
+ for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ storeLayers, err := m(store, d)
+ if err != nil {
+ if errors.Cause(err) != ErrLayerUnknown {
+ return nil, err
+ }
+ continue
+ }
+ layers = append(layers, storeLayers...)
+ }
+ if len(layers) == 0 {
+ return nil, ErrLayerUnknown
+ }
+ return layers, nil
+}
+
+func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) {
+ if err := d.Validate(); err != nil {
+ return nil, errors.Wrapf(err, "error looking for compressed layers matching digest %q", d)
+ }
+ return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d)
+}
+
+func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
+ if err := d.Validate(); err != nil {
+ return nil, errors.Wrapf(err, "error looking for layers matching digest %q", d)
+ }
+ return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d)
+}
+
+func (s *store) LayerSize(id string) (int64, error) {
+ lstore, err := s.LayerStore()
+ if err != nil {
+ return -1, err
+ }
+ lstores, err := s.ROLayerStores()
+ if err != nil {
+ return -1, err
+ }
+ for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ if store.Exists(id) {
+ return store.Size(id)
+ }
+ }
+ return -1, ErrLayerUnknown
+}
+
+func (s *store) Layers() ([]Layer, error) {
+ var layers []Layer
+ lstore, err := s.LayerStore()
+ if err != nil {
+ return nil, err
+ }
+
+ lstores, err := s.ROLayerStores()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ storeLayers, err := store.Layers()
+ if err != nil {
+ return nil, err
+ }
+ layers = append(layers, storeLayers...)
+ }
+ return layers, nil
+}
+
+func (s *store) Images() ([]Image, error) {
+ var images []Image
+ istore, err := s.ImageStore()
+ if err != nil {
+ return nil, err
+ }
+
+ istores, err := s.ROImageStores()
+ if err != nil {
+ return nil, err
+ }
+ for _, store := range append([]ROImageStore{istore}, istores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ storeImages, err := store.Images()
+ if err != nil {
+ return nil, err
+ }
+ images = append(images, storeImages...)
+ }
+ return images, nil
+}
+
+func (s *store) Containers() ([]Container, error) {
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return nil, err
+ }
+
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+
+ return rcstore.Containers()
+}
+
+func (s *store) Layer(id string) (*Layer, error) {
+ lstore, err := s.LayerStore()
+ if err != nil {
+ return nil, err
+ }
+ lstores, err := s.ROLayerStores()
+ if err != nil {
+ return nil, err
+ }
+ for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ layer, err := store.Get(id)
+ if err == nil {
+ return layer, nil
+ }
+ }
+ return nil, ErrLayerUnknown
+}
+
+func (s *store) Image(id string) (*Image, error) {
+ istore, err := s.ImageStore()
+ if err != nil {
+ return nil, err
+ }
+ istores, err := s.ROImageStores()
+ if err != nil {
+ return nil, err
+ }
+ for _, store := range append([]ROImageStore{istore}, istores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ image, err := store.Get(id)
+ if err == nil {
+ return image, nil
+ }
+ }
+ return nil, ErrImageUnknown
+}
+
+func (s *store) ImagesByTopLayer(id string) ([]*Image, error) {
+ images := []*Image{}
+ layer, err := s.Layer(id)
+ if err != nil {
+ return nil, err
+ }
+
+ istore, err := s.ImageStore()
+ if err != nil {
+ return nil, err
+ }
+
+ istores, err := s.ROImageStores()
+ if err != nil {
+ return nil, err
+ }
+ for _, store := range append([]ROImageStore{istore}, istores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ imageList, err := store.Images()
+ if err != nil {
+ return nil, err
+ }
+ for _, image := range imageList {
+ if image.TopLayer == layer.ID {
+ images = append(images, &image)
+ }
+ }
+ }
+ return images, nil
+}
+
+func (s *store) Container(id string) (*Container, error) {
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return nil, err
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+
+ return rcstore.Get(id)
+}
+
+func (s *store) ContainerLayerID(id string) (string, error) {
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return "", err
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+ container, err := rcstore.Get(id)
+ if err != nil {
+ return "", err
+ }
+ return container.LayerID, nil
+}
+
+func (s *store) ContainerByLayer(id string) (*Container, error) {
+ layer, err := s.Layer(id)
+ if err != nil {
+ return nil, err
+ }
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return nil, err
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+ containerList, err := rcstore.Containers()
+ if err != nil {
+ return nil, err
+ }
+ for _, container := range containerList {
+ if container.LayerID == layer.ID {
+ return &container, nil
+ }
+ }
+
+ return nil, ErrContainerUnknown
+}
+
+func (s *store) ContainerDirectory(id string) (string, error) {
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return "", err
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+
+ id, err = rcstore.Lookup(id)
+ if err != nil {
+ return "", err
+ }
+
+ middleDir := s.graphDriverName + "-containers"
+ gcpath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata")
+ if err := os.MkdirAll(gcpath, 0700); err != nil {
+ return "", err
+ }
+ return gcpath, nil
+}
+
+func (s *store) ContainerRunDirectory(id string) (string, error) {
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return "", err
+ }
+
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+
+ id, err = rcstore.Lookup(id)
+ if err != nil {
+ return "", err
+ }
+
+ middleDir := s.graphDriverName + "-containers"
+ rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata")
+ if err := os.MkdirAll(rcpath, 0700); err != nil {
+ return "", err
+ }
+ return rcpath, nil
+}
+
+func (s *store) SetContainerDirectoryFile(id, file string, data []byte) error {
+ dir, err := s.ContainerDirectory(id)
+ if err != nil {
+ return err
+ }
+ err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700)
+ if err != nil {
+ return err
+ }
+ return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600)
+}
+
+func (s *store) FromContainerDirectory(id, file string) ([]byte, error) {
+ dir, err := s.ContainerDirectory(id)
+ if err != nil {
+ return nil, err
+ }
+ return ioutil.ReadFile(filepath.Join(dir, file))
+}
+
+func (s *store) SetContainerRunDirectoryFile(id, file string, data []byte) error {
+ dir, err := s.ContainerRunDirectory(id)
+ if err != nil {
+ return err
+ }
+ err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700)
+ if err != nil {
+ return err
+ }
+ return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600)
+}
+
+func (s *store) FromContainerRunDirectory(id, file string) ([]byte, error) {
+ dir, err := s.ContainerRunDirectory(id)
+ if err != nil {
+ return nil, err
+ }
+ return ioutil.ReadFile(filepath.Join(dir, file))
+}
+
+func (s *store) Shutdown(force bool) ([]string, error) {
+ mounted := []string{}
+ modified := false
+
+ rlstore, err := s.LayerStore()
+ if err != nil {
+ return mounted, err
+ }
+
+ rlstore.Lock()
+ defer rlstore.Unlock()
+ if modified, err := rlstore.Modified(); modified || err != nil {
+ rlstore.Load()
+ }
+
+ s.graphLock.Lock()
+ defer s.graphLock.Unlock()
+ layers, err := rlstore.Layers()
+ if err != nil {
+ return mounted, err
+ }
+ for _, layer := range layers {
+ if layer.MountCount == 0 {
+ continue
+ }
+ mounted = append(mounted, layer.ID)
+ if force {
+ for layer.MountCount > 0 {
+ err2 := rlstore.Unmount(layer.ID)
+ if err2 != nil {
+ if err == nil {
+ err = err2
+ }
+ break
+ }
+ modified = true
+ }
+ }
+ }
+ if len(mounted) > 0 && err == nil {
+ err = errors.Wrap(ErrLayerUsedByContainer, "A layer is mounted")
+ }
+ if err == nil {
+ err = s.graphDriver.Cleanup()
+ s.graphLock.Touch()
+ modified = true
+ }
+ if modified {
+ rlstore.Touch()
+ }
+ return mounted, err
+}
+
+// Convert a BigData key name into an acceptable file name.
+func makeBigDataBaseName(key string) string {
+ reader := strings.NewReader(key)
+ for reader.Len() > 0 {
+ ch, size, err := reader.ReadRune()
+ if err != nil || size != 1 {
+ break
+ }
+ if ch != '.' && !(ch >= '0' && ch <= '9') && !(ch >= 'a' && ch <= 'z') {
+ break
+ }
+ }
+ if reader.Len() > 0 {
+ return "=" + base64.StdEncoding.EncodeToString([]byte(key))
+ }
+ return key
+}
+
+func stringSliceWithoutValue(slice []string, value string) []string {
+ modified := make([]string, 0, len(slice))
+ for _, v := range slice {
+ if v == value {
+ continue
+ }
+ modified = append(modified, v)
+ }
+ return modified
+}
+
+const configFile = "/etc/containers/storage.conf"
+
+// OptionsConfig represents the "storage.options" TOML config table.
+type OptionsConfig struct {
+ // AdditionalImagesStores is the location of additional read/only
+ // Image stores. Usually used to access Networked File System
+ // for shared image content
+ AdditionalImageStores []string `toml:"additionalimagestores"`
+
+ // Size
+ Size string `toml:"size"`
+
+ // OverrideKernelCheck
+ OverrideKernelCheck string `toml:"override_kernel_check"`
+}
+
+// TOML-friendly explicit tables used for conversions.
+type tomlConfig struct {
+ Storage struct {
+ Driver string `toml:"driver"`
+ RunRoot string `toml:"runroot"`
+ GraphRoot string `toml:"graphroot"`
+ Options struct{ OptionsConfig } `toml:"options"`
+ } `toml:"storage"`
+}
+
+func init() {
+ DefaultStoreOptions.RunRoot = "/var/run/containers/storage"
+ DefaultStoreOptions.GraphRoot = "/var/lib/containers/storage"
+ DefaultStoreOptions.GraphDriverName = ""
+
+ data, err := ioutil.ReadFile(configFile)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ fmt.Printf("Failed to read %s %v\n", configFile, err.Error())
+ return
+ }
+ }
+
+ config := new(tomlConfig)
+
+ if _, err := toml.Decode(string(data), config); err != nil {
+ fmt.Printf("Failed to parse %s %v\n", configFile, err.Error())
+ return
+ }
+ if config.Storage.Driver != "" {
+ DefaultStoreOptions.GraphDriverName = config.Storage.Driver
+ }
+ if config.Storage.RunRoot != "" {
+ DefaultStoreOptions.RunRoot = config.Storage.RunRoot
+ }
+ if config.Storage.GraphRoot != "" {
+ DefaultStoreOptions.GraphRoot = config.Storage.GraphRoot
+ }
+ for _, s := range config.Storage.Options.AdditionalImageStores {
+ DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s))
+ }
+ if config.Storage.Options.Size != "" {
+ DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.size=%s", config.Storage.Driver, config.Storage.Options.Size))
+ }
+ if config.Storage.Options.OverrideKernelCheck != "" {
+ DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.override_kernel_check=%s", config.Storage.Driver, config.Storage.Options.OverrideKernelCheck))
+ }
+ if os.Getenv("STORAGE_DRIVER") != "" {
+ DefaultStoreOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER")
+ }
+ if os.Getenv("STORAGE_OPTS") != "" {
+ DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...)
+ }
+ if len(DefaultStoreOptions.GraphDriverOptions) == 1 && DefaultStoreOptions.GraphDriverOptions[0] == "" {
+ DefaultStoreOptions.GraphDriverOptions = nil
+ }
+}
diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf
new file mode 100644
index 000000000..9958101e2
--- /dev/null
+++ b/vendor/github.com/containers/storage/vendor.conf
@@ -0,0 +1,21 @@
+github.com/BurntSushi/toml master
+github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165
+github.com/Microsoft/hcsshim a8d9cc56cbce765a7eebdf4792e6ceceeff3edb8
+github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
+github.com/docker/engine-api 4290f40c056686fcaa5c9caf02eac1dde9315adf
+github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
+github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6
+github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
+github.com/opencontainers/go-digest master
+github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07
+github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
+github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
+github.com/pkg/errors master
+github.com/pmezard/go-difflib v1.0.0
+github.com/sirupsen/logrus v1.0.0
+github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
+github.com/tchap/go-patricia v2.2.6
+github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721
+golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
+golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
+github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE
new file mode 100644
index 000000000..37ec93a14
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/coreos/go-systemd/README.md b/vendor/github.com/coreos/go-systemd/README.md
new file mode 100644
index 000000000..cb87a1124
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/README.md
@@ -0,0 +1,54 @@
+# go-systemd
+
+[![Build Status](https://travis-ci.org/coreos/go-systemd.png?branch=master)](https://travis-ci.org/coreos/go-systemd)
+[![godoc](https://godoc.org/github.com/coreos/go-systemd?status.svg)](http://godoc.org/github.com/coreos/go-systemd)
+
+Go bindings to systemd. The project has several packages:
+
+- `activation` - for writing and using socket activation from Go
+- `dbus` - for starting/stopping/inspecting running services and units
+- `journal` - for writing to systemd's logging service, journald
+- `sdjournal` - for reading from journald by wrapping its C API
+- `machine1` - for registering machines/containers with systemd
+- `unit` - for (de)serialization and comparison of unit files
+
+## Socket Activation
+
+An example HTTP server using socket activation can be quickly set up by following this README on a Linux machine running systemd:
+
+https://github.com/coreos/go-systemd/tree/master/examples/activation/httpserver
+
+## Journal
+
+Using the pure-Go `journal` package you can submit journal entries directly to systemd's journal, taking advantage of features like indexed key/value pairs for each log entry.
+The `sdjournal` package provides read access to the journal by wrapping around journald's native C API; consequently it requires cgo and the journal headers to be available.
+
+## D-Bus
+
+The `dbus` package connects to the [systemd D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/dbus/) and lets you start, stop and introspect systemd units. The API docs are here:
+
+http://godoc.org/github.com/coreos/go-systemd/dbus
+
+### Debugging
+
+Create `/etc/dbus-1/system-local.conf` that looks like this:
+
+```
+<!DOCTYPE busconfig PUBLIC
+"-//freedesktop//DTD D-Bus Bus Configuration 1.0//EN"
+"http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
+<busconfig>
+ <policy user="root">
+ <allow eavesdrop="true"/>
+ <allow eavesdrop="true" send_destination="*"/>
+ </policy>
+</busconfig>
+```
+
+## machined
+
+The `machine1` package allows interaction with the [systemd machined D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/machined/).
+
+## Units
+
+The `unit` package provides various functions for working with [systemd unit files](http://www.freedesktop.org/software/systemd/man/systemd.unit.html).
diff --git a/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go
new file mode 100644
index 000000000..ba6d41d85
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go
@@ -0,0 +1,63 @@
+// Copyright 2014 Docker, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Code forked from Docker project
+package daemon
+
+import (
+ "net"
+ "os"
+)
+
+// SdNotify sends a message to the init daemon. It is common to ignore the error.
+// If `unsetEnvironment` is true, the environment variable `NOTIFY_SOCKET`
+// will be unconditionally unset.
+//
+// It returns one of the following:
+// (false, nil) - notification not supported (i.e. NOTIFY_SOCKET is unset)
+// (false, err) - notification supported, but failure happened (e.g. error connecting to NOTIFY_SOCKET or while sending data)
+// (true, nil) - notification supported, data has been sent
+func SdNotify(unsetEnvironment bool, state string) (sent bool, err error) {
+ socketAddr := &net.UnixAddr{
+ Name: os.Getenv("NOTIFY_SOCKET"),
+ Net: "unixgram",
+ }
+
+ // NOTIFY_SOCKET not set
+ if socketAddr.Name == "" {
+ return false, nil
+ }
+
+ if unsetEnvironment {
+ err = os.Unsetenv("NOTIFY_SOCKET")
+ }
+ if err != nil {
+ return false, err
+ }
+
+ conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr)
+ // Error connecting to NOTIFY_SOCKET
+ if err != nil {
+ return false, err
+ }
+ defer conn.Close()
+
+ _, err = conn.Write([]byte(state))
+ // Error sending the message
+ if err != nil {
+ return false, err
+ }
+ return true, nil
+}
diff --git a/vendor/github.com/coreos/go-systemd/daemon/watchdog.go b/vendor/github.com/coreos/go-systemd/daemon/watchdog.go
new file mode 100644
index 000000000..35a92e6e6
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/daemon/watchdog.go
@@ -0,0 +1,72 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package daemon
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "time"
+)
+
+// SdWatchdogEnabled return watchdog information for a service.
+// Process should send daemon.SdNotify("WATCHDOG=1") every time / 2.
+// If `unsetEnvironment` is true, the environment variables `WATCHDOG_USEC`
+// and `WATCHDOG_PID` will be unconditionally unset.
+//
+// It returns one of the following:
+// (0, nil) - watchdog isn't enabled or we aren't the watched PID.
+// (0, err) - an error happened (e.g. error converting time).
+// (time, nil) - watchdog is enabled and we can send ping.
+// time is delay before inactive service will be killed.
+func SdWatchdogEnabled(unsetEnvironment bool) (time.Duration, error) {
+ wusec := os.Getenv("WATCHDOG_USEC")
+ wpid := os.Getenv("WATCHDOG_PID")
+ if unsetEnvironment {
+ wusecErr := os.Unsetenv("WATCHDOG_USEC")
+ wpidErr := os.Unsetenv("WATCHDOG_PID")
+ if wusecErr != nil {
+ return 0, wusecErr
+ }
+ if wpidErr != nil {
+ return 0, wpidErr
+ }
+ }
+
+ if wusec == "" {
+ return 0, nil
+ }
+ s, err := strconv.Atoi(wusec)
+ if err != nil {
+ return 0, fmt.Errorf("error converting WATCHDOG_USEC: %s", err)
+ }
+ if s <= 0 {
+ return 0, fmt.Errorf("error WATCHDOG_USEC must be a positive number")
+ }
+ interval := time.Duration(s) * time.Microsecond
+
+ if wpid == "" {
+ return interval, nil
+ }
+ p, err := strconv.Atoi(wpid)
+ if err != nil {
+ return 0, fmt.Errorf("error converting WATCHDOG_PID: %s", err)
+ }
+ if os.Getpid() != p {
+ return 0, nil
+ }
+
+ return interval, nil
+}
diff --git a/vendor/github.com/coreos/go-systemd/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/dbus/dbus.go
new file mode 100644
index 000000000..c1694fb52
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/dbus/dbus.go
@@ -0,0 +1,213 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/
+package dbus
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/godbus/dbus"
+)
+
+const (
+ alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ`
+ num = `0123456789`
+ alphanum = alpha + num
+ signalBuffer = 100
+)
+
+// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped
+func needsEscape(i int, b byte) bool {
+ // Escape everything that is not a-z-A-Z-0-9
+ // Also escape 0-9 if it's the first character
+ return strings.IndexByte(alphanum, b) == -1 ||
+ (i == 0 && strings.IndexByte(num, b) != -1)
+}
+
+// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the
+// rules that systemd uses for serializing special characters.
+func PathBusEscape(path string) string {
+ // Special case the empty string
+ if len(path) == 0 {
+ return "_"
+ }
+ n := []byte{}
+ for i := 0; i < len(path); i++ {
+ c := path[i]
+ if needsEscape(i, c) {
+ e := fmt.Sprintf("_%x", c)
+ n = append(n, []byte(e)...)
+ } else {
+ n = append(n, c)
+ }
+ }
+ return string(n)
+}
+
+// Conn is a connection to systemd's dbus endpoint.
+type Conn struct {
+ // sysconn/sysobj are only used to call dbus methods
+ sysconn *dbus.Conn
+ sysobj dbus.BusObject
+
+ // sigconn/sigobj are only used to receive dbus signals
+ sigconn *dbus.Conn
+ sigobj dbus.BusObject
+
+ jobListener struct {
+ jobs map[dbus.ObjectPath]chan<- string
+ sync.Mutex
+ }
+ subscriber struct {
+ updateCh chan<- *SubStateUpdate
+ errCh chan<- error
+ sync.Mutex
+ ignore map[dbus.ObjectPath]int64
+ cleanIgnore int64
+ }
+}
+
+// New establishes a connection to any available bus and authenticates.
+// Callers should call Close() when done with the connection.
+func New() (*Conn, error) {
+ conn, err := NewSystemConnection()
+ if err != nil && os.Geteuid() == 0 {
+ return NewSystemdConnection()
+ }
+ return conn, err
+}
+
+// NewSystemConnection establishes a connection to the system bus and authenticates.
+// Callers should call Close() when done with the connection
+func NewSystemConnection() (*Conn, error) {
+ return NewConnection(func() (*dbus.Conn, error) {
+ return dbusAuthHelloConnection(dbus.SystemBusPrivate)
+ })
+}
+
+// NewUserConnection establishes a connection to the session bus and
+// authenticates. This can be used to connect to systemd user instances.
+// Callers should call Close() when done with the connection.
+func NewUserConnection() (*Conn, error) {
+ return NewConnection(func() (*dbus.Conn, error) {
+ return dbusAuthHelloConnection(dbus.SessionBusPrivate)
+ })
+}
+
+// NewSystemdConnection establishes a private, direct connection to systemd.
+// This can be used for communicating with systemd without a dbus daemon.
+// Callers should call Close() when done with the connection.
+func NewSystemdConnection() (*Conn, error) {
+ return NewConnection(func() (*dbus.Conn, error) {
+ // We skip Hello when talking directly to systemd.
+ return dbusAuthConnection(func() (*dbus.Conn, error) {
+ return dbus.Dial("unix:path=/run/systemd/private")
+ })
+ })
+}
+
+// Close closes an established connection
+func (c *Conn) Close() {
+ c.sysconn.Close()
+ c.sigconn.Close()
+}
+
+// NewConnection establishes a connection to a bus using a caller-supplied function.
+// This allows connecting to remote buses through a user-supplied mechanism.
+// The supplied function may be called multiple times, and should return independent connections.
+// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded,
+// and any authentication should be handled by the function.
+func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) {
+ sysconn, err := dialBus()
+ if err != nil {
+ return nil, err
+ }
+
+ sigconn, err := dialBus()
+ if err != nil {
+ sysconn.Close()
+ return nil, err
+ }
+
+ c := &Conn{
+ sysconn: sysconn,
+ sysobj: systemdObject(sysconn),
+ sigconn: sigconn,
+ sigobj: systemdObject(sigconn),
+ }
+
+ c.subscriber.ignore = make(map[dbus.ObjectPath]int64)
+ c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string)
+
+ // Setup the listeners on jobs so that we can get completions
+ c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+ "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'")
+
+ c.dispatch()
+ return c, nil
+}
+
+// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager
+// interface. The value is returned in its string representation, as defined at
+// https://developer.gnome.org/glib/unstable/gvariant-text.html
+func (c *Conn) GetManagerProperty(prop string) (string, error) {
+ variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop)
+ if err != nil {
+ return "", err
+ }
+ return variant.String(), nil
+}
+
+func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
+ conn, err := createBus()
+ if err != nil {
+ return nil, err
+ }
+
+ // Only use EXTERNAL method, and hardcode the uid (not username)
+ // to avoid a username lookup (which requires a dynamically linked
+ // libc)
+ methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))}
+
+ err = conn.Auth(methods)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
+ conn, err := dbusAuthConnection(createBus)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = conn.Hello(); err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+func systemdObject(conn *dbus.Conn) dbus.BusObject {
+ return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1"))
+}
diff --git a/vendor/github.com/coreos/go-systemd/dbus/methods.go b/vendor/github.com/coreos/go-systemd/dbus/methods.go
new file mode 100644
index 000000000..ab17f7cc7
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/dbus/methods.go
@@ -0,0 +1,565 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+ "errors"
+ "path"
+ "strconv"
+
+ "github.com/godbus/dbus"
+)
+
+func (c *Conn) jobComplete(signal *dbus.Signal) {
+ var id uint32
+ var job dbus.ObjectPath
+ var unit string
+ var result string
+ dbus.Store(signal.Body, &id, &job, &unit, &result)
+ c.jobListener.Lock()
+ out, ok := c.jobListener.jobs[job]
+ if ok {
+ out <- result
+ delete(c.jobListener.jobs, job)
+ }
+ c.jobListener.Unlock()
+}
+
+func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) {
+ if ch != nil {
+ c.jobListener.Lock()
+ defer c.jobListener.Unlock()
+ }
+
+ var p dbus.ObjectPath
+ err := c.sysobj.Call(job, 0, args...).Store(&p)
+ if err != nil {
+ return 0, err
+ }
+
+ if ch != nil {
+ c.jobListener.jobs[p] = ch
+ }
+
+ // ignore error since 0 is fine if conversion fails
+ jobID, _ := strconv.Atoi(path.Base(string(p)))
+
+ return jobID, nil
+}
+
+// StartUnit enqueues a start job and depending jobs, if any (unless otherwise
+// specified by the mode string).
+//
+// Takes the unit to activate, plus a mode string. The mode needs to be one of
+// replace, fail, isolate, ignore-dependencies, ignore-requirements. If
+// "replace" the call will start the unit and its dependencies, possibly
+// replacing already queued jobs that conflict with this. If "fail" the call
+// will start the unit and its dependencies, but will fail if this would change
+// an already queued job. If "isolate" the call will start the unit in question
+// and terminate all units that aren't dependencies of it. If
+// "ignore-dependencies" it will start a unit but ignore all its dependencies.
+// If "ignore-requirements" it will start a unit but only ignore the
+// requirement dependencies. It is not recommended to make use of the latter
+// two options.
+//
+// If the provided channel is non-nil, a result string will be sent to it upon
+// job completion: one of done, canceled, timeout, failed, dependency, skipped.
+// done indicates successful execution of a job. canceled indicates that a job
+// has been canceled before it finished execution. timeout indicates that the
+// job timeout was reached. failed indicates that the job failed. dependency
+// indicates that a job this job has been depending on failed and the job hence
+// has been removed too. skipped indicates that a job was skipped because it
+// didn't apply to the units current state.
+//
+// If no error occurs, the ID of the underlying systemd job will be returned. There
+// does exist the possibility for no error to be returned, but for the returned job
+// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint
+// should not be considered authoritative.
+//
+// If an error does occur, it will be returned to the user alongside a job ID of 0.
+func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode)
+}
+
+// StopUnit is similar to StartUnit but stops the specified unit rather
+// than starting it.
+func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode)
+}
+
+// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise.
+func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode)
+}
+
+// RestartUnit restarts a service. If a service is restarted that isn't
+// running it will be started.
+func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode)
+}
+
+// TryRestartUnit is like RestartUnit, except that a service that isn't running
+// is not affected by the restart.
+func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode)
+}
+
+// ReloadOrRestart attempts a reload if the unit supports it and use a restart
+// otherwise.
+func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode)
+}
+
+// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try"
+// flavored restart otherwise.
+func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode)
+}
+
+// StartTransientUnit() may be used to create and start a transient unit, which
+// will be released as soon as it is not running or referenced anymore or the
+// system is rebooted. name is the unit name including suffix, and must be
+// unique. mode is the same as in StartUnit(), properties contains properties
+// of the unit.
+func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0))
+}
+
+// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's
+// processes are killed.
+func (c *Conn) KillUnit(name string, signal int32) {
+ c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store()
+}
+
+// ResetFailedUnit resets the "failed" state of a specific unit.
+func (c *Conn) ResetFailedUnit(name string) error {
+ return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store()
+}
+
+// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface
+func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) {
+ var err error
+ var props map[string]dbus.Variant
+
+ path := unitPath(unit)
+ if !path.IsValid() {
+ return nil, errors.New("invalid unit name: " + unit)
+ }
+
+ obj := c.sysconn.Object("org.freedesktop.systemd1", path)
+ err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props)
+ if err != nil {
+ return nil, err
+ }
+
+ out := make(map[string]interface{}, len(props))
+ for k, v := range props {
+ out[k] = v.Value()
+ }
+
+ return out, nil
+}
+
+// GetUnitProperties takes the unit name and returns all of its dbus object properties.
+func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) {
+ return c.getProperties(unit, "org.freedesktop.systemd1.Unit")
+}
+
+func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) {
+ var err error
+ var prop dbus.Variant
+
+ path := unitPath(unit)
+ if !path.IsValid() {
+ return nil, errors.New("invalid unit name: " + unit)
+ }
+
+ obj := c.sysconn.Object("org.freedesktop.systemd1", path)
+ err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Property{Name: propertyName, Value: prop}, nil
+}
+
+func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) {
+ return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName)
+}
+
+// GetServiceProperty returns property for given service name and property name
+func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) {
+ return c.getProperty(service, "org.freedesktop.systemd1.Service", propertyName)
+}
+
+// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type.
+// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope
+// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit
+func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) {
+ return c.getProperties(unit, "org.freedesktop.systemd1."+unitType)
+}
+
+// SetUnitProperties() may be used to modify certain unit properties at runtime.
+// Not all properties may be changed at runtime, but many resource management
+// settings (primarily those in systemd.cgroup(5)) may. The changes are applied
+// instantly, and stored on disk for future boots, unless runtime is true, in which
+// case the settings only apply until the next reboot. name is the name of the unit
+// to modify. properties are the settings to set, encoded as an array of property
+// name and value pairs.
+func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error {
+ return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store()
+}
+
+func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
+ return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName)
+}
+
+type UnitStatus struct {
+ Name string // The primary unit name as string
+ Description string // The human readable description string
+ LoadState string // The load state (i.e. whether the unit file has been loaded successfully)
+ ActiveState string // The active state (i.e. whether the unit is currently started or not)
+ SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not)
+ Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string.
+ Path dbus.ObjectPath // The unit object path
+ JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise
+ JobType string // The job type as string
+ JobPath dbus.ObjectPath // The job object path
+}
+
+type storeFunc func(retvalues ...interface{}) error
+
+func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) {
+ result := make([][]interface{}, 0)
+ err := f(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ status := make([]UnitStatus, len(result))
+ statusInterface := make([]interface{}, len(status))
+ for i := range status {
+ statusInterface[i] = &status[i]
+ }
+
+ err = dbus.Store(resultInterface, statusInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return status, nil
+}
+
+// ListUnits returns an array with all currently loaded units. Note that
+// units may be known by multiple names at the same time, and hence there might
+// be more unit names loaded than actual units behind them.
+func (c *Conn) ListUnits() ([]UnitStatus, error) {
+ return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store)
+}
+
+// ListUnitsFiltered returns an array with units filtered by state.
+// It takes a list of units' statuses to filter.
+func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) {
+ return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store)
+}
+
+// ListUnitsByPatterns returns an array with units.
+// It takes a list of units' statuses and names to filter.
+// Note that units may be known by multiple names at the same time,
+// and hence there might be more unit names loaded than actual units behind them.
+func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) {
+ return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store)
+}
+
+// ListUnitsByNames returns an array with units. It takes a list of units'
+// names and returns an UnitStatus array. Comparing to ListUnitsByPatterns
+// method, this method returns statuses even for inactive or non-existing
+// units. Input array should contain exact unit names, but not patterns.
+func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) {
+ return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store)
+}
+
+type UnitFile struct {
+ Path string
+ Type string
+}
+
+func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) {
+ result := make([][]interface{}, 0)
+ err := f(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ files := make([]UnitFile, len(result))
+ fileInterface := make([]interface{}, len(files))
+ for i := range files {
+ fileInterface[i] = &files[i]
+ }
+
+ err = dbus.Store(resultInterface, fileInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return files, nil
+}
+
+// ListUnitFiles returns an array of all available units on disk.
+func (c *Conn) ListUnitFiles() ([]UnitFile, error) {
+ return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store)
+}
+
+// ListUnitFilesByPatterns returns an array of all available units on disk matched the patterns.
+func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) {
+ return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store)
+}
+
+type LinkUnitFileChange EnableUnitFileChange
+
+// LinkUnitFiles() links unit files (that are located outside of the
+// usual unit search paths) into the unit search path.
+//
+// It takes a list of absolute paths to unit files to link and two
+// booleans. The first boolean controls whether the unit shall be
+// enabled for runtime only (true, /run), or persistently (false,
+// /etc).
+// The second controls whether symlinks pointing to other units shall
+// be replaced if necessary.
+//
+// This call returns a list of the changes made. The list consists of
+// structures with three strings: the type of the change (one of symlink
+// or unlink), the file name of the symlink and the destination of the
+// symlink.
+func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) {
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ changes := make([]LinkUnitFileChange, len(result))
+ changesInterface := make([]interface{}, len(changes))
+ for i := range changes {
+ changesInterface[i] = &changes[i]
+ }
+
+ err = dbus.Store(resultInterface, changesInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return changes, nil
+}
+
+// EnableUnitFiles() may be used to enable one or more units in the system (by
+// creating symlinks to them in /etc or /run).
+//
+// It takes a list of unit files to enable (either just file names or full
+// absolute paths if the unit files are residing outside the usual unit
+// search paths), and two booleans: the first controls whether the unit shall
+// be enabled for runtime only (true, /run), or persistently (false, /etc).
+// The second one controls whether symlinks pointing to other units shall
+// be replaced if necessary.
+//
+// This call returns one boolean and an array with the changes made. The
+// boolean signals whether the unit files contained any enablement
+// information (i.e. an [Install]) section. The changes list consists of
+// structures with three strings: the type of the change (one of symlink
+// or unlink), the file name of the symlink and the destination of the
+// symlink.
+func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) {
+ var carries_install_info bool
+
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result)
+ if err != nil {
+ return false, nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ changes := make([]EnableUnitFileChange, len(result))
+ changesInterface := make([]interface{}, len(changes))
+ for i := range changes {
+ changesInterface[i] = &changes[i]
+ }
+
+ err = dbus.Store(resultInterface, changesInterface...)
+ if err != nil {
+ return false, nil, err
+ }
+
+ return carries_install_info, changes, nil
+}
+
+type EnableUnitFileChange struct {
+ Type string // Type of the change (one of symlink or unlink)
+ Filename string // File name of the symlink
+ Destination string // Destination of the symlink
+}
+
+// DisableUnitFiles() may be used to disable one or more units in the system (by
+// removing symlinks to them from /etc or /run).
+//
+// It takes a list of unit files to disable (either just file names or full
+// absolute paths if the unit files are residing outside the usual unit
+// search paths), and one boolean: whether the unit was enabled for runtime
+// only (true, /run), or persistently (false, /etc).
+//
+// This call returns an array with the changes made. The changes list
+// consists of structures with three strings: the type of the change (one of
+// symlink or unlink), the file name of the symlink and the destination of the
+// symlink.
+func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) {
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ changes := make([]DisableUnitFileChange, len(result))
+ changesInterface := make([]interface{}, len(changes))
+ for i := range changes {
+ changesInterface[i] = &changes[i]
+ }
+
+ err = dbus.Store(resultInterface, changesInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return changes, nil
+}
+
+type DisableUnitFileChange struct {
+ Type string // Type of the change (one of symlink or unlink)
+ Filename string // File name of the symlink
+ Destination string // Destination of the symlink
+}
+
+// MaskUnitFiles masks one or more units in the system
+//
+// It takes three arguments:
+// * list of units to mask (either just file names or full
+// absolute paths if the unit files are residing outside
+// the usual unit search paths)
+// * runtime to specify whether the unit was enabled for runtime
+// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..)
+// * force flag
+func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) {
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ changes := make([]MaskUnitFileChange, len(result))
+ changesInterface := make([]interface{}, len(changes))
+ for i := range changes {
+ changesInterface[i] = &changes[i]
+ }
+
+ err = dbus.Store(resultInterface, changesInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return changes, nil
+}
+
+type MaskUnitFileChange struct {
+ Type string // Type of the change (one of symlink or unlink)
+ Filename string // File name of the symlink
+ Destination string // Destination of the symlink
+}
+
+// UnmaskUnitFiles unmasks one or more units in the system
+//
+// It takes two arguments:
+// * list of unit files to mask (either just file names or full
+// absolute paths if the unit files are residing outside
+// the usual unit search paths)
+// * runtime to specify whether the unit was enabled for runtime
+// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..)
+func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) {
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ changes := make([]UnmaskUnitFileChange, len(result))
+ changesInterface := make([]interface{}, len(changes))
+ for i := range changes {
+ changesInterface[i] = &changes[i]
+ }
+
+ err = dbus.Store(resultInterface, changesInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return changes, nil
+}
+
+type UnmaskUnitFileChange struct {
+ Type string // Type of the change (one of symlink or unlink)
+ Filename string // File name of the symlink
+ Destination string // Destination of the symlink
+}
+
+// Reload instructs systemd to scan for and reload unit files. This is
+// equivalent to a 'systemctl daemon-reload'.
+func (c *Conn) Reload() error {
+ return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store()
+}
+
+func unitPath(name string) dbus.ObjectPath {
+ return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name))
+}
diff --git a/vendor/github.com/coreos/go-systemd/dbus/properties.go b/vendor/github.com/coreos/go-systemd/dbus/properties.go
new file mode 100644
index 000000000..6c8189587
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/dbus/properties.go
@@ -0,0 +1,237 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+ "github.com/godbus/dbus"
+)
+
+// From the systemd docs:
+//
+// The properties array of StartTransientUnit() may take many of the settings
+// that may also be configured in unit files. Not all parameters are currently
+// accepted though, but we plan to cover more properties with future release.
+// Currently you may set the Description, Slice and all dependency types of
+// units, as well as RemainAfterExit, ExecStart for service units,
+// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares,
+// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth,
+// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit,
+// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map
+// directly to their counterparts in unit files and as normal D-Bus object
+// properties. The exception here is the PIDs field of scope units which is
+// used for construction of the scope only and specifies the initial PIDs to
+// add to the scope object.
+
+type Property struct {
+ Name string
+ Value dbus.Variant
+}
+
+type PropertyCollection struct {
+ Name string
+ Properties []Property
+}
+
+type execStart struct {
+ Path string // the binary path to execute
+ Args []string // an array with all arguments to pass to the executed command, starting with argument 0
+ UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly
+}
+
+// PropExecStart sets the ExecStart service property. The first argument is a
+// slice with the binary path to execute followed by the arguments to pass to
+// the executed command. See
+// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart=
+func PropExecStart(command []string, uncleanIsFailure bool) Property {
+ execStarts := []execStart{
+ execStart{
+ Path: command[0],
+ Args: command,
+ UncleanIsFailure: uncleanIsFailure,
+ },
+ }
+
+ return Property{
+ Name: "ExecStart",
+ Value: dbus.MakeVariant(execStarts),
+ }
+}
+
+// PropRemainAfterExit sets the RemainAfterExit service property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit=
+func PropRemainAfterExit(b bool) Property {
+ return Property{
+ Name: "RemainAfterExit",
+ Value: dbus.MakeVariant(b),
+ }
+}
+
+// PropType sets the Type service property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type=
+func PropType(t string) Property {
+ return Property{
+ Name: "Type",
+ Value: dbus.MakeVariant(t),
+ }
+}
+
+// PropDescription sets the Description unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description=
+func PropDescription(desc string) Property {
+ return Property{
+ Name: "Description",
+ Value: dbus.MakeVariant(desc),
+ }
+}
+
+func propDependency(name string, units []string) Property {
+ return Property{
+ Name: name,
+ Value: dbus.MakeVariant(units),
+ }
+}
+
+// PropRequires sets the Requires unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires=
+func PropRequires(units ...string) Property {
+ return propDependency("Requires", units)
+}
+
+// PropRequiresOverridable sets the RequiresOverridable unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable=
+func PropRequiresOverridable(units ...string) Property {
+ return propDependency("RequiresOverridable", units)
+}
+
+// PropRequisite sets the Requisite unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite=
+func PropRequisite(units ...string) Property {
+ return propDependency("Requisite", units)
+}
+
+// PropRequisiteOverridable sets the RequisiteOverridable unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable=
+func PropRequisiteOverridable(units ...string) Property {
+ return propDependency("RequisiteOverridable", units)
+}
+
+// PropWants sets the Wants unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants=
+func PropWants(units ...string) Property {
+ return propDependency("Wants", units)
+}
+
+// PropBindsTo sets the BindsTo unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo=
+func PropBindsTo(units ...string) Property {
+ return propDependency("BindsTo", units)
+}
+
+// PropRequiredBy sets the RequiredBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy=
+func PropRequiredBy(units ...string) Property {
+ return propDependency("RequiredBy", units)
+}
+
+// PropRequiredByOverridable sets the RequiredByOverridable unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable=
+func PropRequiredByOverridable(units ...string) Property {
+ return propDependency("RequiredByOverridable", units)
+}
+
+// PropWantedBy sets the WantedBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy=
+func PropWantedBy(units ...string) Property {
+ return propDependency("WantedBy", units)
+}
+
+// PropBoundBy sets the BoundBy unit property. See
+// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy=
+func PropBoundBy(units ...string) Property {
+ return propDependency("BoundBy", units)
+}
+
+// PropConflicts sets the Conflicts unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts=
+func PropConflicts(units ...string) Property {
+ return propDependency("Conflicts", units)
+}
+
+// PropConflictedBy sets the ConflictedBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy=
+func PropConflictedBy(units ...string) Property {
+ return propDependency("ConflictedBy", units)
+}
+
+// PropBefore sets the Before unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=
+func PropBefore(units ...string) Property {
+ return propDependency("Before", units)
+}
+
+// PropAfter sets the After unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After=
+func PropAfter(units ...string) Property {
+ return propDependency("After", units)
+}
+
+// PropOnFailure sets the OnFailure unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure=
+func PropOnFailure(units ...string) Property {
+ return propDependency("OnFailure", units)
+}
+
+// PropTriggers sets the Triggers unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers=
+func PropTriggers(units ...string) Property {
+ return propDependency("Triggers", units)
+}
+
+// PropTriggeredBy sets the TriggeredBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy=
+func PropTriggeredBy(units ...string) Property {
+ return propDependency("TriggeredBy", units)
+}
+
+// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo=
+func PropPropagatesReloadTo(units ...string) Property {
+ return propDependency("PropagatesReloadTo", units)
+}
+
+// PropRequiresMountsFor sets the RequiresMountsFor unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor=
+func PropRequiresMountsFor(units ...string) Property {
+ return propDependency("RequiresMountsFor", units)
+}
+
+// PropSlice sets the Slice unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice=
+func PropSlice(slice string) Property {
+ return Property{
+ Name: "Slice",
+ Value: dbus.MakeVariant(slice),
+ }
+}
+
+// PropPids sets the PIDs field of scope units used in the initial construction
+// of the scope only and specifies the initial PIDs to add to the scope object.
+// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties
+func PropPids(pids ...uint32) Property {
+ return Property{
+ Name: "PIDs",
+ Value: dbus.MakeVariant(pids),
+ }
+}
diff --git a/vendor/github.com/coreos/go-systemd/dbus/set.go b/vendor/github.com/coreos/go-systemd/dbus/set.go
new file mode 100644
index 000000000..f92e6fbed
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/dbus/set.go
@@ -0,0 +1,47 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+type set struct {
+ data map[string]bool
+}
+
+func (s *set) Add(value string) {
+ s.data[value] = true
+}
+
+func (s *set) Remove(value string) {
+ delete(s.data, value)
+}
+
+func (s *set) Contains(value string) (exists bool) {
+ _, exists = s.data[value]
+ return
+}
+
+func (s *set) Length() int {
+ return len(s.data)
+}
+
+func (s *set) Values() (values []string) {
+ for val, _ := range s.data {
+ values = append(values, val)
+ }
+ return
+}
+
+func newSet() *set {
+ return &set{make(map[string]bool)}
+}
diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/dbus/subscription.go
new file mode 100644
index 000000000..996451445
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/dbus/subscription.go
@@ -0,0 +1,250 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+ "errors"
+ "time"
+
+ "github.com/godbus/dbus"
+)
+
+const (
+ cleanIgnoreInterval = int64(10 * time.Second)
+ ignoreInterval = int64(30 * time.Millisecond)
+)
+
+// Subscribe sets up this connection to subscribe to all systemd dbus events.
+// This is required before calling SubscribeUnits. When the connection closes
+// systemd will automatically stop sending signals so there is no need to
+// explicitly call Unsubscribe().
+func (c *Conn) Subscribe() error {
+ c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+ "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'")
+ c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+ "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'")
+
+ err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Unsubscribe this connection from systemd dbus events.
+func (c *Conn) Unsubscribe() error {
+ err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (c *Conn) dispatch() {
+ ch := make(chan *dbus.Signal, signalBuffer)
+
+ c.sigconn.Signal(ch)
+
+ go func() {
+ for {
+ signal, ok := <-ch
+ if !ok {
+ return
+ }
+
+ if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" {
+ c.jobComplete(signal)
+ }
+
+ if c.subscriber.updateCh == nil {
+ continue
+ }
+
+ var unitPath dbus.ObjectPath
+ switch signal.Name {
+ case "org.freedesktop.systemd1.Manager.JobRemoved":
+ unitName := signal.Body[2].(string)
+ c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath)
+ case "org.freedesktop.systemd1.Manager.UnitNew":
+ unitPath = signal.Body[1].(dbus.ObjectPath)
+ case "org.freedesktop.DBus.Properties.PropertiesChanged":
+ if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" {
+ unitPath = signal.Path
+ }
+ }
+
+ if unitPath == dbus.ObjectPath("") {
+ continue
+ }
+
+ c.sendSubStateUpdate(unitPath)
+ }
+ }()
+}
+
+// Returns two unbuffered channels which will receive all changed units every
+// interval. Deleted units are sent as nil.
+func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) {
+ return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil)
+}
+
+// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer
+// size of the channels, the comparison function for detecting changes and a filter
+// function for cutting down on the noise that your channel receives.
+func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
+ old := make(map[string]*UnitStatus)
+ statusChan := make(chan map[string]*UnitStatus, buffer)
+ errChan := make(chan error, buffer)
+
+ go func() {
+ for {
+ timerChan := time.After(interval)
+
+ units, err := c.ListUnits()
+ if err == nil {
+ cur := make(map[string]*UnitStatus)
+ for i := range units {
+ if filterUnit != nil && filterUnit(units[i].Name) {
+ continue
+ }
+ cur[units[i].Name] = &units[i]
+ }
+
+ // add all new or changed units
+ changed := make(map[string]*UnitStatus)
+ for n, u := range cur {
+ if oldU, ok := old[n]; !ok || isChanged(oldU, u) {
+ changed[n] = u
+ }
+ delete(old, n)
+ }
+
+ // add all deleted units
+ for oldN := range old {
+ changed[oldN] = nil
+ }
+
+ old = cur
+
+ if len(changed) != 0 {
+ statusChan <- changed
+ }
+ } else {
+ errChan <- err
+ }
+
+ <-timerChan
+ }
+ }()
+
+ return statusChan, errChan
+}
+
+type SubStateUpdate struct {
+ UnitName string
+ SubState string
+}
+
+// SetSubStateSubscriber writes to updateCh when any unit's substate changes.
+// Although this writes to updateCh on every state change, the reported state
+// may be more recent than the change that generated it (due to an unavoidable
+// race in the systemd dbus interface). That is, this method provides a good
+// way to keep a current view of all units' states, but is not guaranteed to
+// show every state transition they go through. Furthermore, state changes
+// will only be written to the channel with non-blocking writes. If updateCh
+// is full, it attempts to write an error to errCh; if errCh is full, the error
+// passes silently.
+func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) {
+ c.subscriber.Lock()
+ defer c.subscriber.Unlock()
+ c.subscriber.updateCh = updateCh
+ c.subscriber.errCh = errCh
+}
+
+func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) {
+ c.subscriber.Lock()
+ defer c.subscriber.Unlock()
+
+ if c.shouldIgnore(path) {
+ return
+ }
+
+ info, err := c.GetUnitProperties(string(path))
+ if err != nil {
+ select {
+ case c.subscriber.errCh <- err:
+ default:
+ }
+ }
+
+ name := info["Id"].(string)
+ substate := info["SubState"].(string)
+
+ update := &SubStateUpdate{name, substate}
+ select {
+ case c.subscriber.updateCh <- update:
+ default:
+ select {
+ case c.subscriber.errCh <- errors.New("update channel full!"):
+ default:
+ }
+ }
+
+ c.updateIgnore(path, info)
+}
+
+// The ignore functions work around a wart in the systemd dbus interface.
+// Requesting the properties of an unloaded unit will cause systemd to send a
+// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's
+// properties on UnitNew (as that's the only indication of a new unit coming up
+// for the first time), we would enter an infinite loop if we did not attempt
+// to detect and ignore these spurious signals. The signal themselves are
+// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an
+// unloaded unit's signals for a short time after requesting its properties.
+// This means that we will miss e.g. a transient unit being restarted
+// *immediately* upon failure and also a transient unit being started
+// immediately after requesting its status (with systemctl status, for example,
+// because this causes a UnitNew signal to be sent which then causes us to fetch
+// the properties).
+
+func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool {
+ t, ok := c.subscriber.ignore[path]
+ return ok && t >= time.Now().UnixNano()
+}
+
+func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) {
+ c.cleanIgnore()
+
+ // unit is unloaded - it will trigger bad systemd dbus behavior
+ if info["LoadState"].(string) == "not-found" {
+ c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval
+ }
+}
+
+// without this, ignore would grow unboundedly over time
+func (c *Conn) cleanIgnore() {
+ now := time.Now().UnixNano()
+ if c.subscriber.cleanIgnore < now {
+ c.subscriber.cleanIgnore = now + cleanIgnoreInterval
+
+ for p, t := range c.subscriber.ignore {
+ if t < now {
+ delete(c.subscriber.ignore, p)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go
new file mode 100644
index 000000000..5b408d584
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go
@@ -0,0 +1,57 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+ "time"
+)
+
+// SubscriptionSet returns a subscription set which is like conn.Subscribe but
+// can filter to only return events for a set of units.
+type SubscriptionSet struct {
+ *set
+ conn *Conn
+}
+
+func (s *SubscriptionSet) filter(unit string) bool {
+ return !s.Contains(unit)
+}
+
+// Subscribe starts listening for dbus events for all of the units in the set.
+// Returns channels identical to conn.SubscribeUnits.
+func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) {
+ // TODO: Make fully evented by using systemd 209 with properties changed values
+ return s.conn.SubscribeUnitsCustom(time.Second, 0,
+ mismatchUnitStatus,
+ func(unit string) bool { return s.filter(unit) },
+ )
+}
+
+// NewSubscriptionSet returns a new subscription set.
+func (conn *Conn) NewSubscriptionSet() *SubscriptionSet {
+ return &SubscriptionSet{newSet(), conn}
+}
+
+// mismatchUnitStatus returns true if the provided UnitStatus objects
+// are not equivalent. false is returned if the objects are equivalent.
+// Only the Name, Description and state-related fields are used in
+// the comparison.
+func mismatchUnitStatus(u1, u2 *UnitStatus) bool {
+ return u1.Name != u2.Name ||
+ u1.Description != u2.Description ||
+ u1.LoadState != u2.LoadState ||
+ u1.ActiveState != u2.ActiveState ||
+ u1.SubState != u2.SubState
+}
diff --git a/vendor/github.com/coreos/go-systemd/util/util.go b/vendor/github.com/coreos/go-systemd/util/util.go
new file mode 100644
index 000000000..7828ce6f0
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/util/util.go
@@ -0,0 +1,90 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package util contains utility functions related to systemd that applications
+// can use to check things like whether systemd is running. Note that some of
+// these functions attempt to manually load systemd libraries at runtime rather
+// than linking against them.
+package util
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+)
+
+var (
+ ErrNoCGO = fmt.Errorf("go-systemd built with CGO disabled")
+)
+
+// GetRunningSlice attempts to retrieve the name of the systemd slice in which
+// the current process is running.
+// This function is a wrapper around the libsystemd C library; if it cannot be
+// opened, an error is returned.
+func GetRunningSlice() (string, error) {
+ return getRunningSlice()
+}
+
+// RunningFromSystemService tries to detect whether the current process has
+// been invoked from a system service. The condition for this is whether the
+// process is _not_ a user process. User processes are those running in session
+// scopes or under per-user `systemd --user` instances.
+//
+// To avoid false positives on systems without `pam_systemd` (which is
+// responsible for creating user sessions), this function also uses a heuristic
+// to detect whether it's being invoked from a session leader process. This is
+// the case if the current process is executed directly from a service file
+// (e.g. with `ExecStart=/this/cmd`). Note that this heuristic will fail if the
+// command is instead launched in a subshell or similar so that it is not
+// session leader (e.g. `ExecStart=/bin/bash -c "/this/cmd"`)
+//
+// This function is a wrapper around the libsystemd C library; if this is
+// unable to successfully open a handle to the library for any reason (e.g. it
+// cannot be found), an error will be returned.
+func RunningFromSystemService() (bool, error) {
+ return runningFromSystemService()
+}
+
+// CurrentUnitName attempts to retrieve the name of the systemd system unit
+// from which the calling process has been invoked. It wraps the systemd
+// `sd_pid_get_unit` call, with the same caveat: for processes not part of a
+// systemd system unit, this function will return an error.
+func CurrentUnitName() (string, error) {
+ return currentUnitName()
+}
+
+// IsRunningSystemd checks whether the host was booted with systemd as its init
+// system. This functions similarly to systemd's `sd_booted(3)`: internally, it
+// checks whether /run/systemd/system/ exists and is a directory.
+// http://www.freedesktop.org/software/systemd/man/sd_booted.html
+func IsRunningSystemd() bool {
+ fi, err := os.Lstat("/run/systemd/system")
+ if err != nil {
+ return false
+ }
+ return fi.IsDir()
+}
+
+// GetMachineID returns a host's 128-bit machine ID as a string. This functions
+// similarly to systemd's `sd_id128_get_machine`: internally, it simply reads
+// the contents of /etc/machine-id
+// http://www.freedesktop.org/software/systemd/man/sd_id128_get_machine.html
+func GetMachineID() (string, error) {
+ machineID, err := ioutil.ReadFile("/etc/machine-id")
+ if err != nil {
+ return "", fmt.Errorf("failed to read /etc/machine-id: %v", err)
+ }
+ return strings.TrimSpace(string(machineID)), nil
+}
diff --git a/vendor/github.com/coreos/go-systemd/util/util_cgo.go b/vendor/github.com/coreos/go-systemd/util/util_cgo.go
new file mode 100644
index 000000000..22c0d6099
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/util/util_cgo.go
@@ -0,0 +1,174 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+
+package util
+
+// #include <stdlib.h>
+// #include <sys/types.h>
+// #include <unistd.h>
+//
+// int
+// my_sd_pid_get_owner_uid(void *f, pid_t pid, uid_t *uid)
+// {
+// int (*sd_pid_get_owner_uid)(pid_t, uid_t *);
+//
+// sd_pid_get_owner_uid = (int (*)(pid_t, uid_t *))f;
+// return sd_pid_get_owner_uid(pid, uid);
+// }
+//
+// int
+// my_sd_pid_get_unit(void *f, pid_t pid, char **unit)
+// {
+// int (*sd_pid_get_unit)(pid_t, char **);
+//
+// sd_pid_get_unit = (int (*)(pid_t, char **))f;
+// return sd_pid_get_unit(pid, unit);
+// }
+//
+// int
+// my_sd_pid_get_slice(void *f, pid_t pid, char **slice)
+// {
+// int (*sd_pid_get_slice)(pid_t, char **);
+//
+// sd_pid_get_slice = (int (*)(pid_t, char **))f;
+// return sd_pid_get_slice(pid, slice);
+// }
+//
+// int
+// am_session_leader()
+// {
+// return (getsid(0) == getpid());
+// }
+import "C"
+import (
+ "fmt"
+ "syscall"
+ "unsafe"
+
+ "github.com/coreos/pkg/dlopen"
+)
+
+var libsystemdNames = []string{
+ // systemd < 209
+ "libsystemd-login.so.0",
+ "libsystemd-login.so",
+
+ // systemd >= 209 merged libsystemd-login into libsystemd proper
+ "libsystemd.so.0",
+ "libsystemd.so",
+}
+
+func getRunningSlice() (slice string, err error) {
+ var h *dlopen.LibHandle
+ h, err = dlopen.GetHandle(libsystemdNames)
+ if err != nil {
+ return
+ }
+ defer func() {
+ if err1 := h.Close(); err1 != nil {
+ err = err1
+ }
+ }()
+
+ sd_pid_get_slice, err := h.GetSymbolPointer("sd_pid_get_slice")
+ if err != nil {
+ return
+ }
+
+ var s string
+ sl := C.CString(s)
+ defer C.free(unsafe.Pointer(sl))
+
+ ret := C.my_sd_pid_get_slice(sd_pid_get_slice, 0, &sl)
+ if ret < 0 {
+ err = fmt.Errorf("error calling sd_pid_get_slice: %v", syscall.Errno(-ret))
+ return
+ }
+
+ return C.GoString(sl), nil
+}
+
+func runningFromSystemService() (ret bool, err error) {
+ var h *dlopen.LibHandle
+ h, err = dlopen.GetHandle(libsystemdNames)
+ if err != nil {
+ return
+ }
+ defer func() {
+ if err1 := h.Close(); err1 != nil {
+ err = err1
+ }
+ }()
+
+ sd_pid_get_owner_uid, err := h.GetSymbolPointer("sd_pid_get_owner_uid")
+ if err != nil {
+ return
+ }
+
+ var uid C.uid_t
+ errno := C.my_sd_pid_get_owner_uid(sd_pid_get_owner_uid, 0, &uid)
+ serrno := syscall.Errno(-errno)
+ // when we're running from a unit file, sd_pid_get_owner_uid returns
+ // ENOENT (systemd <220) or ENXIO (systemd >=220)
+ switch {
+ case errno >= 0:
+ ret = false
+ case serrno == syscall.ENOENT, serrno == syscall.ENXIO:
+ // Since the implementation of sessions in systemd relies on
+ // the `pam_systemd` module, using the sd_pid_get_owner_uid
+ // heuristic alone can result in false positives if that module
+ // (or PAM itself) is not present or properly configured on the
+ // system. As such, we also check if we're the session leader,
+ // which should be the case if we're invoked from a unit file,
+ // but not if e.g. we're invoked from the command line from a
+ // user's login session
+ ret = C.am_session_leader() == 1
+ default:
+ err = fmt.Errorf("error calling sd_pid_get_owner_uid: %v", syscall.Errno(-errno))
+ }
+ return
+}
+
+func currentUnitName() (unit string, err error) {
+ var h *dlopen.LibHandle
+ h, err = dlopen.GetHandle(libsystemdNames)
+ if err != nil {
+ return
+ }
+ defer func() {
+ if err1 := h.Close(); err1 != nil {
+ err = err1
+ }
+ }()
+
+ sd_pid_get_unit, err := h.GetSymbolPointer("sd_pid_get_unit")
+ if err != nil {
+ return
+ }
+
+ var s string
+ u := C.CString(s)
+ defer C.free(unsafe.Pointer(u))
+
+ ret := C.my_sd_pid_get_unit(sd_pid_get_unit, 0, &u)
+ if ret < 0 {
+ err = fmt.Errorf("error calling sd_pid_get_unit: %v", syscall.Errno(-ret))
+ return
+ }
+
+ unit = C.GoString(u)
+ return
+}
diff --git a/vendor/github.com/coreos/go-systemd/util/util_stub.go b/vendor/github.com/coreos/go-systemd/util/util_stub.go
new file mode 100644
index 000000000..477589e12
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/util/util_stub.go
@@ -0,0 +1,23 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !cgo
+
+package util
+
+func getRunningSlice() (string, error) { return "", ErrNoCGO }
+
+func runningFromSystemService() (bool, error) { return false, ErrNoCGO }
+
+func currentUnitName() (string, error) { return "", ErrNoCGO }
diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE
new file mode 100644
index 000000000..e06d20818
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE
new file mode 100644
index 000000000..b39ddfa5c
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/NOTICE
@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2014 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/pkg/README.md b/vendor/github.com/coreos/pkg/README.md
new file mode 100644
index 000000000..ca68a07f0
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/README.md
@@ -0,0 +1,4 @@
+a collection of go utility packages
+
+[![Build Status](https://travis-ci.org/coreos/pkg.png?branch=master)](https://travis-ci.org/coreos/pkg)
+[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/coreos/pkg)
diff --git a/vendor/github.com/coreos/pkg/dlopen/dlopen.go b/vendor/github.com/coreos/pkg/dlopen/dlopen.go
new file mode 100644
index 000000000..23774f612
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/dlopen/dlopen.go
@@ -0,0 +1,82 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package dlopen provides some convenience functions to dlopen a library and
+// get its symbols.
+package dlopen
+
+// #cgo LDFLAGS: -ldl
+// #include <stdlib.h>
+// #include <dlfcn.h>
+import "C"
+import (
+ "errors"
+ "fmt"
+ "unsafe"
+)
+
+var ErrSoNotFound = errors.New("unable to open a handle to the library")
+
+// LibHandle represents an open handle to a library (.so)
+type LibHandle struct {
+ Handle unsafe.Pointer
+ Libname string
+}
+
+// GetHandle tries to get a handle to a library (.so), attempting to access it
+// by the names specified in libs and returning the first that is successfully
+// opened. Callers are responsible for closing the handler. If no library can
+// be successfully opened, an error is returned.
+func GetHandle(libs []string) (*LibHandle, error) {
+ for _, name := range libs {
+ libname := C.CString(name)
+ defer C.free(unsafe.Pointer(libname))
+ handle := C.dlopen(libname, C.RTLD_LAZY)
+ if handle != nil {
+ h := &LibHandle{
+ Handle: handle,
+ Libname: name,
+ }
+ return h, nil
+ }
+ }
+ return nil, ErrSoNotFound
+}
+
+// GetSymbolPointer takes a symbol name and returns a pointer to the symbol.
+func (l *LibHandle) GetSymbolPointer(symbol string) (unsafe.Pointer, error) {
+ sym := C.CString(symbol)
+ defer C.free(unsafe.Pointer(sym))
+
+ C.dlerror()
+ p := C.dlsym(l.Handle, sym)
+ e := C.dlerror()
+ if e != nil {
+ return nil, fmt.Errorf("error resolving symbol %q: %v", symbol, errors.New(C.GoString(e)))
+ }
+
+ return p, nil
+}
+
+// Close closes a LibHandle.
+func (l *LibHandle) Close() error {
+ C.dlerror()
+ C.dlclose(l.Handle)
+ e := C.dlerror()
+ if e != nil {
+ return fmt.Errorf("error closing %v: %v", l.Libname, errors.New(C.GoString(e)))
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/coreos/pkg/dlopen/dlopen_example.go b/vendor/github.com/coreos/pkg/dlopen/dlopen_example.go
new file mode 100644
index 000000000..48a660104
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/dlopen/dlopen_example.go
@@ -0,0 +1,56 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build linux
+
+package dlopen
+
+// #include <string.h>
+// #include <stdlib.h>
+//
+// int
+// my_strlen(void *f, const char *s)
+// {
+// size_t (*strlen)(const char *);
+//
+// strlen = (size_t (*)(const char *))f;
+// return strlen(s);
+// }
+import "C"
+
+import (
+ "fmt"
+ "unsafe"
+)
+
+func strlen(libs []string, s string) (int, error) {
+ h, err := GetHandle(libs)
+ if err != nil {
+ return -1, fmt.Errorf(`couldn't get a handle to the library: %v`, err)
+ }
+ defer h.Close()
+
+ f := "strlen"
+ cs := C.CString(s)
+ defer C.free(unsafe.Pointer(cs))
+
+ strlen, err := h.GetSymbolPointer(f)
+ if err != nil {
+ return -1, fmt.Errorf(`couldn't get symbol %q: %v`, f, err)
+ }
+
+ len := C.my_strlen(strlen, cs)
+
+ return int(len), nil
+}
diff --git a/vendor/github.com/cri-o/ocicni/LICENSE b/vendor/github.com/cri-o/ocicni/LICENSE
new file mode 100644
index 000000000..3fd703072
--- /dev/null
+++ b/vendor/github.com/cri-o/ocicni/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2016 Red Hat, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/cri-o/ocicni/README.md b/vendor/github.com/cri-o/ocicni/README.md
new file mode 100644
index 000000000..99c103c83
--- /dev/null
+++ b/vendor/github.com/cri-o/ocicni/README.md
@@ -0,0 +1,3 @@
+# ocicni
+
+API layer to call the CNI plugins from an OCI lifecycle daemon
diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/noop.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/noop.go
new file mode 100644
index 000000000..9f315a7c6
--- /dev/null
+++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/noop.go
@@ -0,0 +1,24 @@
+package ocicni
+
+type cniNoOp struct {
+}
+
+func (noop *cniNoOp) Name() string {
+ return "CNINoOp"
+}
+
+func (noop *cniNoOp) SetUpPod(network PodNetwork) error {
+ return nil
+}
+
+func (noop *cniNoOp) TearDownPod(network PodNetwork) error {
+ return nil
+}
+
+func (noop *cniNoOp) GetPodNetworkStatus(network PodNetwork) (string, error) {
+ return "", nil
+}
+
+func (noop *cniNoOp) Status() error {
+ return nil
+}
diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
new file mode 100644
index 000000000..03918bfa4
--- /dev/null
+++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
@@ -0,0 +1,421 @@
+package ocicni
+
+import (
+ "errors"
+ "fmt"
+ "os/exec"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/containernetworking/cni/libcni"
+ cnitypes "github.com/containernetworking/cni/pkg/types"
+ "github.com/fsnotify/fsnotify"
+ "github.com/sirupsen/logrus"
+)
+
+type cniNetworkPlugin struct {
+ loNetwork *cniNetwork
+
+ sync.RWMutex
+ defaultNetwork *cniNetwork
+
+ nsenterPath string
+ pluginDir string
+ cniDirs []string
+ vendorCNIDirPrefix string
+
+ monitorNetDirChan chan struct{}
+
+ // The pod map provides synchronization for a given pod's network
+ // operations. Each pod's setup/teardown/status operations
+ // are synchronized against each other, but network operations of other
+ // pods can proceed in parallel.
+ podsLock sync.Mutex
+ pods map[string]*podLock
+}
+
+type cniNetwork struct {
+ name string
+ NetworkConfig *libcni.NetworkConfigList
+ CNIConfig libcni.CNI
+}
+
+var errMissingDefaultNetwork = errors.New("Missing CNI default network")
+
+type podLock struct {
+ // Count of in-flight operations for this pod; when this reaches zero
+ // the lock can be removed from the pod map
+ refcount uint
+
+ // Lock to synchronize operations for this specific pod
+ mu sync.Mutex
+}
+
+func buildFullPodName(podNetwork PodNetwork) string {
+ return podNetwork.Namespace + "_" + podNetwork.Name
+}
+
+// Lock network operations for a specific pod. If that pod is not yet in
+// the pod map, it will be added. The reference count for the pod will
+// be increased.
+func (plugin *cniNetworkPlugin) podLock(podNetwork PodNetwork) *sync.Mutex {
+ plugin.podsLock.Lock()
+ defer plugin.podsLock.Unlock()
+
+ fullPodName := buildFullPodName(podNetwork)
+ lock, ok := plugin.pods[fullPodName]
+ if !ok {
+ lock = &podLock{}
+ plugin.pods[fullPodName] = lock
+ }
+ lock.refcount++
+ return &lock.mu
+}
+
+// Unlock network operations for a specific pod. The reference count for the
+// pod will be decreased. If the reference count reaches zero, the pod will be
+// removed from the pod map.
+func (plugin *cniNetworkPlugin) podUnlock(podNetwork PodNetwork) {
+ plugin.podsLock.Lock()
+ defer plugin.podsLock.Unlock()
+
+ fullPodName := buildFullPodName(podNetwork)
+ lock, ok := plugin.pods[fullPodName]
+ if !ok {
+ logrus.Warningf("Unbalanced pod lock unref for %s", fullPodName)
+ return
+ } else if lock.refcount == 0 {
+ // This should never ever happen, but handle it anyway
+ delete(plugin.pods, fullPodName)
+ logrus.Errorf("Pod lock for %s still in map with zero refcount", fullPodName)
+ return
+ }
+ lock.refcount--
+ lock.mu.Unlock()
+ if lock.refcount == 0 {
+ delete(plugin.pods, fullPodName)
+ }
+}
+
+func (plugin *cniNetworkPlugin) monitorNetDir() {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ logrus.Errorf("could not create new watcher %v", err)
+ return
+ }
+ defer watcher.Close()
+
+ go func() {
+ for {
+ select {
+ case event := <-watcher.Events:
+ logrus.Debugf("CNI monitoring event %v", event)
+ if event.Op&fsnotify.Create != fsnotify.Create &&
+ event.Op&fsnotify.Write != fsnotify.Write {
+ continue
+ }
+
+ if err = plugin.syncNetworkConfig(); err == nil {
+ logrus.Infof("CNI asynchronous setting succeeded")
+ continue
+ }
+
+ logrus.Errorf("CNI setting failed, continue monitoring: %v", err)
+
+ case err := <-watcher.Errors:
+ logrus.Errorf("CNI monitoring error %v", err)
+ close(plugin.monitorNetDirChan)
+ return
+ }
+ }
+ }()
+
+ if err = watcher.Add(plugin.pluginDir); err != nil {
+ logrus.Error(err)
+ return
+ }
+
+ <-plugin.monitorNetDirChan
+}
+
+// InitCNI takes the plugin directory and cni directories where the cni files should be searched for
+// Returns a valid plugin object and any error
+func InitCNI(pluginDir string, cniDirs ...string) (CNIPlugin, error) {
+ plugin := probeNetworkPluginsWithVendorCNIDirPrefix(pluginDir, cniDirs, "")
+ var err error
+ plugin.nsenterPath, err = exec.LookPath("nsenter")
+ if err != nil {
+ return nil, err
+ }
+
+ // check if a default network exists, otherwise dump the CNI search and return a noop plugin
+ _, err = getDefaultCNINetwork(plugin.pluginDir, plugin.cniDirs, plugin.vendorCNIDirPrefix)
+ if err != nil {
+ if err != errMissingDefaultNetwork {
+ logrus.Warningf("Error in finding usable CNI plugin - %v", err)
+ // create a noop plugin instead
+ return &cniNoOp{}, nil
+ }
+
+ // We do not have a default network, we start the monitoring thread.
+ go plugin.monitorNetDir()
+ }
+
+ return plugin, nil
+}
+
+func probeNetworkPluginsWithVendorCNIDirPrefix(pluginDir string, cniDirs []string, vendorCNIDirPrefix string) *cniNetworkPlugin {
+ plugin := &cniNetworkPlugin{
+ defaultNetwork: nil,
+ loNetwork: getLoNetwork(cniDirs, vendorCNIDirPrefix),
+ pluginDir: pluginDir,
+ cniDirs: cniDirs,
+ vendorCNIDirPrefix: vendorCNIDirPrefix,
+ monitorNetDirChan: make(chan struct{}),
+ pods: make(map[string]*podLock),
+ }
+
+ // sync NetworkConfig in best effort during probing.
+ if err := plugin.syncNetworkConfig(); err != nil {
+ logrus.Error(err)
+ }
+ return plugin
+}
+
+func getDefaultCNINetwork(pluginDir string, cniDirs []string, vendorCNIDirPrefix string) (*cniNetwork, error) {
+ if pluginDir == "" {
+ pluginDir = DefaultNetDir
+ }
+ if len(cniDirs) == 0 {
+ cniDirs = []string{DefaultCNIDir}
+ }
+
+ files, err := libcni.ConfFiles(pluginDir, []string{".conf", ".conflist", ".json"})
+ switch {
+ case err != nil:
+ return nil, err
+ case len(files) == 0:
+ return nil, errMissingDefaultNetwork
+ }
+
+ sort.Strings(files)
+ for _, confFile := range files {
+ var confList *libcni.NetworkConfigList
+ if strings.HasSuffix(confFile, ".conflist") {
+ confList, err = libcni.ConfListFromFile(confFile)
+ if err != nil {
+ logrus.Warningf("Error loading CNI config list file %s: %v", confFile, err)
+ continue
+ }
+ } else {
+ conf, err := libcni.ConfFromFile(confFile)
+ if err != nil {
+ logrus.Warningf("Error loading CNI config file %s: %v", confFile, err)
+ continue
+ }
+ if conf.Network.Type == "" {
+ logrus.Warningf("Error loading CNI config file %s: no 'type'; perhaps this is a .conflist?", confFile)
+ continue
+ }
+ confList, err = libcni.ConfListFromConf(conf)
+ if err != nil {
+ logrus.Warningf("Error converting CNI config file %s to list: %v", confFile, err)
+ continue
+ }
+ }
+ if len(confList.Plugins) == 0 {
+ logrus.Warningf("CNI config list %s has no networks, skipping", confFile)
+ continue
+ }
+ logrus.Infof("CNI network %s (type=%v) is used from %s", confList.Name, confList.Plugins[0].Network.Type, confFile)
+ // Search for vendor-specific plugins as well as default plugins in the CNI codebase.
+ vendorDir := vendorCNIDir(vendorCNIDirPrefix, confList.Plugins[0].Network.Type)
+ cninet := &libcni.CNIConfig{
+ Path: append(cniDirs, vendorDir),
+ }
+ network := &cniNetwork{name: confList.Name, NetworkConfig: confList, CNIConfig: cninet}
+ return network, nil
+ }
+ return nil, fmt.Errorf("No valid networks found in %s", pluginDir)
+}
+
+func vendorCNIDir(prefix, pluginType string) string {
+ return fmt.Sprintf(VendorCNIDirTemplate, prefix, pluginType)
+}
+
+func getLoNetwork(cniDirs []string, vendorDirPrefix string) *cniNetwork {
+ if len(cniDirs) == 0 {
+ cniDirs = []string{DefaultCNIDir}
+ }
+
+ loConfig, err := libcni.ConfListFromBytes([]byte(`{
+ "cniVersion": "0.2.0",
+ "name": "cni-loopback",
+ "plugins": [{
+ "type": "loopback"
+ }]
+}`))
+ if err != nil {
+ // The hardcoded config above should always be valid and unit tests will
+ // catch this
+ panic(err)
+ }
+ vendorDir := vendorCNIDir(vendorDirPrefix, loConfig.Plugins[0].Network.Type)
+ cninet := &libcni.CNIConfig{
+ Path: append(cniDirs, vendorDir),
+ }
+ loNetwork := &cniNetwork{
+ name: "lo",
+ NetworkConfig: loConfig,
+ CNIConfig: cninet,
+ }
+
+ return loNetwork
+}
+
+func (plugin *cniNetworkPlugin) syncNetworkConfig() error {
+ network, err := getDefaultCNINetwork(plugin.pluginDir, plugin.cniDirs, plugin.vendorCNIDirPrefix)
+ if err != nil {
+ logrus.Errorf("error updating cni config: %s", err)
+ return err
+ }
+ plugin.setDefaultNetwork(network)
+
+ return nil
+}
+
+func (plugin *cniNetworkPlugin) getDefaultNetwork() *cniNetwork {
+ plugin.RLock()
+ defer plugin.RUnlock()
+ return plugin.defaultNetwork
+}
+
+func (plugin *cniNetworkPlugin) setDefaultNetwork(n *cniNetwork) {
+ plugin.Lock()
+ defer plugin.Unlock()
+ plugin.defaultNetwork = n
+}
+
+func (plugin *cniNetworkPlugin) checkInitialized() error {
+ if plugin.getDefaultNetwork() == nil {
+ return errors.New("cni config uninitialized")
+ }
+ return nil
+}
+
+func (plugin *cniNetworkPlugin) Name() string {
+ return CNIPluginName
+}
+
+func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) error {
+ if err := plugin.checkInitialized(); err != nil {
+ return err
+ }
+
+ plugin.podLock(podNetwork).Lock()
+ defer plugin.podUnlock(podNetwork)
+
+ _, err := plugin.loNetwork.addToNetwork(podNetwork)
+ if err != nil {
+ logrus.Errorf("Error while adding to cni lo network: %s", err)
+ return err
+ }
+
+ _, err = plugin.getDefaultNetwork().addToNetwork(podNetwork)
+ if err != nil {
+ logrus.Errorf("Error while adding to cni network: %s", err)
+ return err
+ }
+
+ return err
+}
+
+func (plugin *cniNetworkPlugin) TearDownPod(podNetwork PodNetwork) error {
+ if err := plugin.checkInitialized(); err != nil {
+ return err
+ }
+
+ plugin.podLock(podNetwork).Lock()
+ defer plugin.podUnlock(podNetwork)
+
+ return plugin.getDefaultNetwork().deleteFromNetwork(podNetwork)
+}
+
+// TODO: Use the addToNetwork function to obtain the IP of the Pod. That will assume idempotent ADD call to the plugin.
+// Also fix the runtime's call to Status function to be done only in the case that the IP is lost, no need to do periodic calls
+func (plugin *cniNetworkPlugin) GetPodNetworkStatus(podNetwork PodNetwork) (string, error) {
+ plugin.podLock(podNetwork).Lock()
+ defer plugin.podUnlock(podNetwork)
+
+ ip, err := getContainerIP(plugin.nsenterPath, podNetwork.NetNS, DefaultInterfaceName, "-4")
+ if err != nil {
+ return "", err
+ }
+
+ return ip.String(), nil
+}
+
+func (network *cniNetwork) addToNetwork(podNetwork PodNetwork) (cnitypes.Result, error) {
+ rt, err := buildCNIRuntimeConf(podNetwork)
+ if err != nil {
+ logrus.Errorf("Error adding network: %v", err)
+ return nil, err
+ }
+
+ netconf, cninet := network.NetworkConfig, network.CNIConfig
+ logrus.Infof("About to add CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type)
+ res, err := cninet.AddNetworkList(netconf, rt)
+ if err != nil {
+ logrus.Errorf("Error adding network: %v", err)
+ return nil, err
+ }
+
+ return res, nil
+}
+
+func (network *cniNetwork) deleteFromNetwork(podNetwork PodNetwork) error {
+ rt, err := buildCNIRuntimeConf(podNetwork)
+ if err != nil {
+ logrus.Errorf("Error deleting network: %v", err)
+ return err
+ }
+
+ netconf, cninet := network.NetworkConfig, network.CNIConfig
+ logrus.Infof("About to del CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type)
+ err = cninet.DelNetworkList(netconf, rt)
+ if err != nil {
+ logrus.Errorf("Error deleting network: %v", err)
+ return err
+ }
+ return nil
+}
+
+func buildCNIRuntimeConf(podNetwork PodNetwork) (*libcni.RuntimeConf, error) {
+ logrus.Infof("Got pod network %+v", podNetwork)
+
+ rt := &libcni.RuntimeConf{
+ ContainerID: podNetwork.ID,
+ NetNS: podNetwork.NetNS,
+ IfName: DefaultInterfaceName,
+ Args: [][2]string{
+ {"IgnoreUnknown", "1"},
+ {"K8S_POD_NAMESPACE", podNetwork.Namespace},
+ {"K8S_POD_NAME", podNetwork.Name},
+ {"K8S_POD_INFRA_CONTAINER_ID", podNetwork.ID},
+ },
+ }
+
+ if len(podNetwork.PortMappings) == 0 {
+ return rt, nil
+ }
+
+ rt.CapabilityArgs = map[string]interface{}{
+ "portMappings": podNetwork.PortMappings,
+ }
+ return rt, nil
+}
+
+func (plugin *cniNetworkPlugin) Status() error {
+ return plugin.checkInitialized()
+}
diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go
new file mode 100644
index 000000000..a272e92e7
--- /dev/null
+++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go
@@ -0,0 +1,62 @@
+package ocicni
+
+const (
+ // DefaultInterfaceName is the string to be used for the interface name inside the net namespace
+ DefaultInterfaceName = "eth0"
+ // CNIPluginName is the default name of the plugin
+ CNIPluginName = "cni"
+ // DefaultNetDir is the place to look for CNI Network
+ DefaultNetDir = "/etc/cni/net.d"
+ // DefaultCNIDir is the place to look for cni config files
+ DefaultCNIDir = "/opt/cni/bin"
+ // VendorCNIDirTemplate is the template for looking up vendor specific cni config/executable files
+ VendorCNIDirTemplate = "%s/opt/%s/bin"
+)
+
+// PortMapping maps to the standard CNI portmapping Capability
+// see: https://github.com/containernetworking/cni/blob/master/CONVENTIONS.md
+type PortMapping struct {
+ // HostPort is the port number on the host.
+ HostPort int32 `json:"hostPort"`
+ // ContainerPort is the port number inside the sandbox.
+ ContainerPort int32 `json:"containerPort"`
+ // Protocol is the protocol of the port mapping.
+ Protocol string `json:"protocol"`
+ // HostIP is the host ip to use.
+ HostIP string `json:"hostIP"`
+}
+
+// PodNetwork configures the network of a pod sandbox.
+type PodNetwork struct {
+ // Name is the name of the sandbox.
+ Name string
+ // Namespace is the namespace of the sandbox.
+ Namespace string
+ // ID is the id of the sandbox container.
+ ID string
+ // NetNS is the network namespace path of the sandbox.
+ NetNS string
+ // PortMappings is the port mapping of the sandbox.
+ PortMappings []PortMapping
+}
+
+// CNIPlugin is the interface that needs to be implemented by a plugin
+type CNIPlugin interface {
+ // Name returns the plugin's name. This will be used when searching
+ // for a plugin by name, e.g.
+ Name() string
+
+ // SetUpPod is the method called after the sandbox container of
+ // the pod has been created but before the other containers of the
+ // pod are launched.
+ SetUpPod(network PodNetwork) error
+
+ // TearDownPod is the method called before a pod's sandbox container will be deleted
+ TearDownPod(network PodNetwork) error
+
+ // Status is the method called to obtain the ipv4 or ipv6 addresses of the pod sandbox
+ GetPodNetworkStatus(network PodNetwork) (string, error)
+
+ // NetworkStatus returns error if the network plugin is in error state
+ Status() error
+}
diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/util.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/util.go
new file mode 100644
index 000000000..547e95972
--- /dev/null
+++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/util.go
@@ -0,0 +1,32 @@
+package ocicni
+
+import (
+ "fmt"
+ "net"
+ "os/exec"
+ "strings"
+)
+
+func getContainerIP(nsenterPath, netnsPath, interfaceName, addrType string) (net.IP, error) {
+ // Try to retrieve ip inside container network namespace
+ output, err := exec.Command(nsenterPath, fmt.Sprintf("--net=%s", netnsPath), "-F", "--",
+ "ip", "-o", addrType, "addr", "show", "dev", interfaceName, "scope", "global").CombinedOutput()
+ if err != nil {
+ return nil, fmt.Errorf("Unexpected command output %s with error: %v", output, err)
+ }
+
+ lines := strings.Split(string(output), "\n")
+ if len(lines) < 1 {
+ return nil, fmt.Errorf("Unexpected command output %s", output)
+ }
+ fields := strings.Fields(lines[0])
+ if len(fields) < 4 {
+ return nil, fmt.Errorf("Unexpected address output %s ", lines[0])
+ }
+ ip, _, err := net.ParseCIDR(fields[3])
+ if err != nil {
+ return nil, fmt.Errorf("CNI failed to parse ip from output %s due to %v", output, err)
+ }
+
+ return ip, nil
+}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 000000000..c83641619
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md
new file mode 100644
index 000000000..262430449
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/README.md
@@ -0,0 +1,205 @@
+go-spew
+=======
+
+[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)]
+(https://travis-ci.org/davecgh/go-spew) [![ISC License]
+(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status]
+(https://img.shields.io/coveralls/davecgh/go-spew.svg)]
+(https://coveralls.io/r/davecgh/go-spew?branch=master)
+
+
+Go-spew implements a deep pretty printer for Go data structures to aid in
+debugging. A comprehensive suite of tests with 100% test coverage is provided
+to ensure proper functionality. See `test_coverage.txt` for the gocov coverage
+report. Go-spew is licensed under the liberal ISC license, so it may be used in
+open source or commercial projects.
+
+If you're interested in reading about how this package came to life and some
+of the challenges involved in providing a deep pretty printer, there is a blog
+post about it
+[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/).
+
+## Documentation
+
+[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)]
+(http://godoc.org/github.com/davecgh/go-spew/spew)
+
+Full `go doc` style documentation for the project can be viewed online without
+installing this package by using the excellent GoDoc site here:
+http://godoc.org/github.com/davecgh/go-spew/spew
+
+You can also view the documentation locally once the package is installed with
+the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to
+http://localhost:6060/pkg/github.com/davecgh/go-spew/spew
+
+## Installation
+
+```bash
+$ go get -u github.com/davecgh/go-spew/spew
+```
+
+## Quick Start
+
+Add this import line to the file you're working in:
+
+```Go
+import "github.com/davecgh/go-spew/spew"
+```
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+
+```Go
+spew.Dump(myVar1, myVar2, ...)
+spew.Fdump(someWriter, myVar1, myVar2, ...)
+str := spew.Sdump(myVar1, myVar2, ...)
+```
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most
+compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types
+and pointer addresses):
+
+```Go
+spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+```
+
+## Debugging a Web Application Example
+
+Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production.
+
+```Go
+package main
+
+import (
+ "fmt"
+ "html"
+ "net/http"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/html")
+ fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:])
+ fmt.Fprintf(w, "<!--\n" + html.EscapeString(spew.Sdump(w)) + "\n-->")
+}
+
+func main() {
+ http.HandleFunc("/", handler)
+ http.ListenAndServe(":8080", nil)
+}
+```
+
+## Sample Dump Output
+
+```
+(main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr) <nil>
+ }),
+ ExportedField: (map[interface {}]interface {}) {
+ (string) "one": (bool) true
+ }
+}
+([]uint8) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+}
+```
+
+## Sample Formatter Output
+
+Double pointer to a uint8:
+```
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+```
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+```
+ %v: <*>{1 <*><shown>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+```
+
+## Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available via the
+spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+```
+* Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+* MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+* DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+* DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables. This option
+ relies on access to the unsafe package, so it will not have any effect when
+ running in environments without access to the unsafe package such as Google
+ App Engine or with the "safe" build tag specified.
+ Pointer method invocation is enabled by default.
+
+* DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+* DisableCapacities
+ DisableCapacities specifies whether to disable the printing of capacities
+ for arrays, slices, maps and channels. This is useful when diffing data
+ structures in tests.
+
+* ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+* SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are supported,
+ with other types sorted according to the reflect.Value.String() output
+ which guarantees display stability. Natural map order is used by
+ default.
+
+* SpewKeys
+ SpewKeys specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only considered
+ if SortKeys is true.
+
+```
+
+## Unsafe Package Dependency
+
+This package relies on the unsafe package to perform some of the more advanced
+features, however it also supports a "limited" mode which allows it to work in
+environments where the unsafe package is not available. By default, it will
+operate in this mode on Google App Engine and when compiled with GopherJS. The
+"safe" build tag may also be specified to force the package to build without
+using the unsafe package.
+
+## License
+
+Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 000000000..8a4a6589a
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build !js,!appengine,!safe,!disableunsafe
+
+package spew
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = false
+
+ // ptrSize is the size of a pointer on the current arch.
+ ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+var (
+ // offsetPtr, offsetScalar, and offsetFlag are the offsets for the
+ // internal reflect.Value fields. These values are valid before golang
+ // commit ecccf07e7f9d which changed the format. The are also valid
+ // after commit 82f48826c6c7 which changed the format again to mirror
+ // the original format. Code in the init function updates these offsets
+ // as necessary.
+ offsetPtr = uintptr(ptrSize)
+ offsetScalar = uintptr(0)
+ offsetFlag = uintptr(ptrSize * 2)
+
+ // flagKindWidth and flagKindShift indicate various bits that the
+ // reflect package uses internally to track kind information.
+ //
+ // flagRO indicates whether or not the value field of a reflect.Value is
+ // read-only.
+ //
+ // flagIndir indicates whether the value field of a reflect.Value is
+ // the actual data or a pointer to the data.
+ //
+ // These values are valid before golang commit 90a7c3c86944 which
+ // changed their positions. Code in the init function updates these
+ // flags as necessary.
+ flagKindWidth = uintptr(5)
+ flagKindShift = uintptr(flagKindWidth - 1)
+ flagRO = uintptr(1 << 0)
+ flagIndir = uintptr(1 << 1)
+)
+
+func init() {
+ // Older versions of reflect.Value stored small integers directly in the
+ // ptr field (which is named val in the older versions). Versions
+ // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
+ // scalar for this purpose which unfortunately came before the flag
+ // field, so the offset of the flag field is different for those
+ // versions.
+ //
+ // This code constructs a new reflect.Value from a known small integer
+ // and checks if the size of the reflect.Value struct indicates it has
+ // the scalar field. When it does, the offsets are updated accordingly.
+ vv := reflect.ValueOf(0xf00)
+ if unsafe.Sizeof(vv) == (ptrSize * 4) {
+ offsetScalar = ptrSize * 2
+ offsetFlag = ptrSize * 3
+ }
+
+ // Commit 90a7c3c86944 changed the flag positions such that the low
+ // order bits are the kind. This code extracts the kind from the flags
+ // field and ensures it's the correct type. When it's not, the flag
+ // order has been changed to the newer format, so the flags are updated
+ // accordingly.
+ upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
+ upfv := *(*uintptr)(upf)
+ flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
+ if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
+ flagKindShift = 0
+ flagRO = 1 << 5
+ flagIndir = 1 << 6
+
+ // Commit adf9b30e5594 modified the flags to separate the
+ // flagRO flag into two bits which specifies whether or not the
+ // field is embedded. This causes flagIndir to move over a bit
+ // and means that flagRO is the combination of either of the
+ // original flagRO bit and the new bit.
+ //
+ // This code detects the change by extracting what used to be
+ // the indirect bit to ensure it's set. When it's not, the flag
+ // order has been changed to the newer format, so the flags are
+ // updated accordingly.
+ if upfv&flagIndir == 0 {
+ flagRO = 3 << 5
+ flagIndir = 1 << 7
+ }
+ }
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data. It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
+ indirects := 1
+ vt := v.Type()
+ upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
+ rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
+ if rvf&flagIndir != 0 {
+ vt = reflect.PtrTo(v.Type())
+ indirects++
+ } else if offsetScalar != 0 {
+ // The value is in the scalar field when it's not one of the
+ // reference types.
+ switch vt.Kind() {
+ case reflect.Uintptr:
+ case reflect.Chan:
+ case reflect.Func:
+ case reflect.Map:
+ case reflect.Ptr:
+ case reflect.UnsafePointer:
+ default:
+ upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
+ offsetScalar)
+ }
+ }
+
+ pv := reflect.NewAt(vt, upv)
+ rv = pv
+ for i := 0; i < indirects; i++ {
+ rv = rv.Elem()
+ }
+ return rv
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 000000000..1fe3cf3d5
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe
+
+package spew
+
+import "reflect"
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data. However, doing this relies on access to
+// the unsafe package. This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 000000000..7c519ff47
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead. This mirrors
+// the technique used in the fmt package.
+var (
+ panicBytes = []byte("(PANIC=")
+ plusBytes = []byte("+")
+ iBytes = []byte("i")
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ interfaceBytes = []byte("(interface {})")
+ commaNewlineBytes = []byte(",\n")
+ newlineBytes = []byte("\n")
+ openBraceBytes = []byte("{")
+ openBraceNewlineBytes = []byte("{\n")
+ closeBraceBytes = []byte("}")
+ asteriskBytes = []byte("*")
+ colonBytes = []byte(":")
+ colonSpaceBytes = []byte(": ")
+ openParenBytes = []byte("(")
+ closeParenBytes = []byte(")")
+ spaceBytes = []byte(" ")
+ pointerChainBytes = []byte("->")
+ nilAngleBytes = []byte("<nil>")
+ maxNewlineBytes = []byte("<max depth reached>\n")
+ maxShortBytes = []byte("<max>")
+ circularBytes = []byte("<already shown>")
+ circularShortBytes = []byte("<shown>")
+ invalidAngleBytes = []byte("<invalid>")
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ percentBytes = []byte("%")
+ precisionBytes = []byte(".")
+ openAngleBytes = []byte("<")
+ closeAngleBytes = []byte(">")
+ openMapBytes = []byte("map[")
+ closeMapBytes = []byte("]")
+ lenEqualsBytes = []byte("len=")
+ capEqualsBytes = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+ if err := recover(); err != nil {
+ w.Write(panicBytes)
+ fmt.Fprintf(w, "%v", err)
+ w.Write(closeParenBytes)
+ }
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+ // We need an interface to check if the type implements the error or
+ // Stringer interface. However, the reflect package won't give us an
+ // interface on certain things like unexported struct fields in order
+ // to enforce visibility rules. We use unsafe, when it's available,
+ // to bypass these restrictions since this package does not mutate the
+ // values.
+ if !v.CanInterface() {
+ if UnsafeDisabled {
+ return false
+ }
+
+ v = unsafeReflectValue(v)
+ }
+
+ // Choose whether or not to do error and Stringer interface lookups against
+ // the base type or a pointer to the base type depending on settings.
+ // Technically calling one of these methods with a pointer receiver can
+ // mutate the value, however, types which choose to satisify an error or
+ // Stringer interface with a pointer receiver should not be mutating their
+ // state inside these interface methods.
+ if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+ v = unsafeReflectValue(v)
+ }
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+
+ // Is it an error or Stringer?
+ switch iface := v.Interface().(type) {
+ case error:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.Error()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+
+ w.Write([]byte(iface.Error()))
+ return true
+
+ case fmt.Stringer:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.String()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+ w.Write([]byte(iface.String()))
+ return true
+ }
+ return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+ if val {
+ w.Write(trueBytes)
+ } else {
+ w.Write(falseBytes)
+ }
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+ w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+ w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+ w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+ r := real(c)
+ w.Write(openParenBytes)
+ w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+ i := imag(c)
+ if i >= 0 {
+ w.Write(plusBytes)
+ }
+ w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+ w.Write(iBytes)
+ w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+ // Null pointer.
+ num := uint64(p)
+ if num == 0 {
+ w.Write(nilAngleBytes)
+ return
+ }
+
+ // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+ buf := make([]byte, 18)
+
+ // It's simpler to construct the hex string right to left.
+ base := uint64(16)
+ i := len(buf) - 1
+ for num >= base {
+ buf[i] = hexDigits[num%base]
+ num /= base
+ i--
+ }
+ buf[i] = hexDigits[num]
+
+ // Add '0x' prefix.
+ i--
+ buf[i] = 'x'
+ i--
+ buf[i] = '0'
+
+ // Strip unused leading bytes.
+ buf = buf[i:]
+ w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+ values []reflect.Value
+ strings []string // either nil or same len and values
+ cs *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted. It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+ vs := &valuesSorter{values: values, cs: cs}
+ if canSortSimply(vs.values[0].Kind()) {
+ return vs
+ }
+ if !cs.DisableMethods {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ b := bytes.Buffer{}
+ if !handleMethods(cs, &b, vs.values[i]) {
+ vs.strings = nil
+ break
+ }
+ vs.strings[i] = b.String()
+ }
+ }
+ if vs.strings == nil && cs.SpewKeys {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+ }
+ }
+ return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+ // This switch parallels valueSortLess, except for the default case.
+ switch kind {
+ case reflect.Bool:
+ return true
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return true
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Uintptr:
+ return true
+ case reflect.Array:
+ return true
+ }
+ return false
+}
+
+// Len returns the number of values in the slice. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+ return len(s.values)
+}
+
+// Swap swaps the values at the passed indices. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ if s.strings != nil {
+ s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+ }
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value. It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return a.Int() < b.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return a.Uint() < b.Uint()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.String:
+ return a.String() < b.String()
+ case reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Array:
+ // Compare the contents of both arrays.
+ l := a.Len()
+ for i := 0; i < l; i++ {
+ av := a.Index(i)
+ bv := b.Index(i)
+ if av.Interface() == bv.Interface() {
+ continue
+ }
+ return valueSortLess(av, bv)
+ }
+ }
+ return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j. It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+ if s.strings == nil {
+ return valueSortLess(s.values[i], s.values[j])
+ }
+ return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer. Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+ if len(values) == 0 {
+ return
+ }
+ sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 000000000..2e3d22f31
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values. There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality. Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation. You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings. See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+ // Indent specifies the string to use for each indentation level. The
+ // global config instance that all top-level functions use set this to a
+ // single space by default. If you would like more indentation, you might
+ // set this to a tab with "\t" or perhaps two spaces with " ".
+ Indent string
+
+ // MaxDepth controls the maximum number of levels to descend into nested
+ // data structures. The default, 0, means there is no limit.
+ //
+ // NOTE: Circular data structures are properly detected, so it is not
+ // necessary to set this value unless you specifically want to limit deeply
+ // nested data structures.
+ MaxDepth int
+
+ // DisableMethods specifies whether or not error and Stringer interfaces are
+ // invoked for types that implement them.
+ DisableMethods bool
+
+ // DisablePointerMethods specifies whether or not to check for and invoke
+ // error and Stringer interfaces on types which only accept a pointer
+ // receiver when the current type is not a pointer.
+ //
+ // NOTE: This might be an unsafe action since calling one of these methods
+ // with a pointer receiver could technically mutate the value, however,
+ // in practice, types which choose to satisify an error or Stringer
+ // interface with a pointer receiver should not be mutating their state
+ // inside these interface methods. As a result, this option relies on
+ // access to the unsafe package, so it will not have any effect when
+ // running in environments without access to the unsafe package such as
+ // Google App Engine or with the "safe" build tag specified.
+ DisablePointerMethods bool
+
+ // DisablePointerAddresses specifies whether to disable the printing of
+ // pointer addresses. This is useful when diffing data structures in tests.
+ DisablePointerAddresses bool
+
+ // DisableCapacities specifies whether to disable the printing of capacities
+ // for arrays, slices, maps and channels. This is useful when diffing
+ // data structures in tests.
+ DisableCapacities bool
+
+ // ContinueOnMethod specifies whether or not recursion should continue once
+ // a custom error or Stringer interface is invoked. The default, false,
+ // means it will print the results of invoking the custom error or Stringer
+ // interface and return immediately instead of continuing to recurse into
+ // the internals of the data type.
+ //
+ // NOTE: This flag does not have any effect if method invocation is disabled
+ // via the DisableMethods or DisablePointerMethods options.
+ ContinueOnMethod bool
+
+ // SortKeys specifies map keys should be sorted before being printed. Use
+ // this to have a more deterministic, diffable output. Note that only
+ // native types (bool, int, uint, floats, uintptr and string) and types
+ // that support the error or Stringer interfaces (if methods are
+ // enabled) are supported, with other types sorted according to the
+ // reflect.Value.String() output which guarantees display stability.
+ SortKeys bool
+
+ // SpewKeys specifies that, as a last resort attempt, map keys should
+ // be spewed to strings and sorted by those strings. This is only
+ // considered if SortKeys is true.
+ SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the formatted string as a value that satisfies error. See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+ return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+ fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+ fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(c, &buf, a...)
+ return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = newFormatter(c, arg)
+ }
+ return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// Indent: " "
+// MaxDepth: 0
+// DisableMethods: false
+// DisablePointerMethods: false
+// ContinueOnMethod: false
+// SortKeys: false
+func NewDefaultConfig() *ConfigState {
+ return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 000000000..aacaac6f1
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output (only when using
+ Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+ * Dump style which prints with newlines, customizable indentation,
+ and additional debug information such as types and all pointer addresses
+ used to indirect to the final value
+ * A custom Formatter interface that integrates cleanly with the standard fmt
+ package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+ similar to the default %v while providing the additional functionality
+ outlined above and passing unsupported format verbs such as %x and %q
+ along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew. See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+ spew.Dump(myVar1, myVar2, ...)
+ spew.Fdump(someWriter, myVar1, myVar2, ...)
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+The following configuration options are available:
+ * Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+ * MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+ * DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+ * DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables.
+ Pointer method invocation is enabled by default.
+
+ * DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+ * DisableCapacities
+ DisableCapacities specifies whether to disable the printing of
+ capacities for arrays, slices, maps and channels. This is useful when
+ diffing data structures in tests.
+
+ * ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+ * SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are
+ supported with other types sorted according to the
+ reflect.Value.String() output which guarantees display
+ stability. Natural map order is used by default.
+
+ * SpewKeys
+ Specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only
+ considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+ spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer. For example, to dump to standard error:
+
+ spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+ (main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr) <nil>
+ }),
+ ExportedField: (map[interface {}]interface {}) (len=1) {
+ (string) (len=3) "one": (bool) true
+ }
+ }
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+ ([]uint8) (len=32 cap=32) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+ }
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
+functions have syntax you are most likely already familiar with:
+
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Println(myVar, myVar2)
+ spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+ %v: <*>{1 <*><shown>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output. Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 000000000..df1d582a7
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ // uint8Type is a reflect.Type representing a uint8. It is used to
+ // convert cgo types to uint8 slices for hexdumping.
+ uint8Type = reflect.TypeOf(uint8(0))
+
+ // cCharRE is a regular expression that matches a cgo char.
+ // It is used to detect character arrays to hexdump them.
+ cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
+
+ // cUnsignedCharRE is a regular expression that matches a cgo unsigned
+ // char. It is used to detect unsigned character arrays to hexdump
+ // them.
+ cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
+
+ // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+ // It is used to detect uint8_t arrays to hexdump them.
+ cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+ w io.Writer
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ ignoreNextIndent bool
+ cs *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+ if d.ignoreNextIndent {
+ d.ignoreNextIndent = false
+ return
+ }
+ d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range d.pointers {
+ if depth >= d.depth {
+ delete(d.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by dereferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ d.pointers[addr] = d.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type information.
+ d.w.Write(openParenBytes)
+ d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+ d.w.Write([]byte(ve.Type().String()))
+ d.w.Write(closeParenBytes)
+
+ // Display pointer information.
+ if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+ d.w.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ d.w.Write(pointerChainBytes)
+ }
+ printHexPtr(d.w, addr)
+ }
+ d.w.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ d.w.Write(openParenBytes)
+ switch {
+ case nilFound == true:
+ d.w.Write(nilAngleBytes)
+
+ case cycleFound == true:
+ d.w.Write(circularBytes)
+
+ default:
+ d.ignoreNextType = true
+ d.dump(ve)
+ }
+ d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+ // Determine whether this type should be hex dumped or not. Also,
+ // for types which should be hexdumped, try to use the underlying data
+ // first, then fall back to trying to convert them to a uint8 slice.
+ var buf []uint8
+ doConvert := false
+ doHexDump := false
+ numEntries := v.Len()
+ if numEntries > 0 {
+ vt := v.Index(0).Type()
+ vts := vt.String()
+ switch {
+ // C types that need to be converted.
+ case cCharRE.MatchString(vts):
+ fallthrough
+ case cUnsignedCharRE.MatchString(vts):
+ fallthrough
+ case cUint8tCharRE.MatchString(vts):
+ doConvert = true
+
+ // Try to use existing uint8 slices and fall back to converting
+ // and copying if that fails.
+ case vt.Kind() == reflect.Uint8:
+ // We need an addressable interface to convert the type
+ // to a byte slice. However, the reflect package won't
+ // give us an interface on certain things like
+ // unexported struct fields in order to enforce
+ // visibility rules. We use unsafe, when available, to
+ // bypass these restrictions since this package does not
+ // mutate the values.
+ vs := v
+ if !vs.CanInterface() || !vs.CanAddr() {
+ vs = unsafeReflectValue(vs)
+ }
+ if !UnsafeDisabled {
+ vs = vs.Slice(0, numEntries)
+
+ // Use the existing uint8 slice if it can be
+ // type asserted.
+ iface := vs.Interface()
+ if slice, ok := iface.([]uint8); ok {
+ buf = slice
+ doHexDump = true
+ break
+ }
+ }
+
+ // The underlying data needs to be converted if it can't
+ // be type asserted to a uint8 slice.
+ doConvert = true
+ }
+
+ // Copy and convert the underlying type if needed.
+ if doConvert && vt.ConvertibleTo(uint8Type) {
+ // Convert and copy each element into a uint8 byte
+ // slice.
+ buf = make([]uint8, numEntries)
+ for i := 0; i < numEntries; i++ {
+ vv := v.Index(i)
+ buf[i] = uint8(vv.Convert(uint8Type).Uint())
+ }
+ doHexDump = true
+ }
+ }
+
+ // Hexdump the entire slice as needed.
+ if doHexDump {
+ indent := strings.Repeat(d.cs.Indent, d.depth)
+ str := indent + hex.Dump(buf)
+ str = strings.Replace(str, "\n", "\n"+indent, -1)
+ str = strings.TrimRight(str, d.cs.Indent)
+ d.w.Write([]byte(str))
+ return
+ }
+
+ // Recursively call dump for each item.
+ for i := 0; i < numEntries; i++ {
+ d.dump(d.unpackValue(v.Index(i)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+}
+
+// dump is the main workhorse for dumping a value. It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately. It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ d.w.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ d.indent()
+ d.dumpPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !d.ignoreNextType {
+ d.indent()
+ d.w.Write(openParenBytes)
+ d.w.Write([]byte(v.Type().String()))
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+ d.ignoreNextType = false
+
+ // Display length and capacity if the built-in len and cap functions
+ // work with the value's kind and the len/cap itself is non-zero.
+ valueLen, valueCap := 0, 0
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ valueLen, valueCap = v.Len(), v.Cap()
+ case reflect.Map, reflect.String:
+ valueLen = v.Len()
+ }
+ if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+ d.w.Write(openParenBytes)
+ if valueLen != 0 {
+ d.w.Write(lenEqualsBytes)
+ printInt(d.w, int64(valueLen), 10)
+ }
+ if !d.cs.DisableCapacities && valueCap != 0 {
+ if valueLen != 0 {
+ d.w.Write(spaceBytes)
+ }
+ d.w.Write(capEqualsBytes)
+ printInt(d.w, int64(valueCap), 10)
+ }
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+
+ // Call Stringer/error interfaces if they exist and the handle methods flag
+ // is enabled
+ if !d.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(d.cs, d.w, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(d.w, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(d.w, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(d.w, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(d.w, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(d.w, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(d.w, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(d.w, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ d.dumpSlice(v)
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.String:
+ d.w.Write([]byte(strconv.Quote(v.String())))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ numEntries := v.Len()
+ keys := v.MapKeys()
+ if d.cs.SortKeys {
+ sortValues(keys, d.cs)
+ }
+ for i, key := range keys {
+ d.dump(d.unpackValue(key))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.MapIndex(key)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Struct:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ vt := v.Type()
+ numFields := v.NumField()
+ for i := 0; i < numFields; i++ {
+ d.indent()
+ vtf := vt.Field(i)
+ d.w.Write([]byte(vtf.Name))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.Field(i)))
+ if i < (numFields - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(d.w, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(d.w, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it in case any new
+ // types are added.
+ default:
+ if v.CanInterface() {
+ fmt.Fprintf(d.w, "%v", v.Interface())
+ } else {
+ fmt.Fprintf(d.w, "%v", v.String())
+ }
+ }
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+ for _, arg := range a {
+ if arg == nil {
+ w.Write(interfaceBytes)
+ w.Write(spaceBytes)
+ w.Write(nilAngleBytes)
+ w.Write(newlineBytes)
+ continue
+ }
+
+ d := dumpState{w: w, cs: cs}
+ d.pointers = make(map[uintptr]int)
+ d.dump(reflect.ValueOf(arg))
+ d.w.Write(newlineBytes)
+ }
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+ fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(&Config, &buf, a...)
+ return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+ fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 000000000..c49875bac
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation. The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+ value interface{}
+ fs fmt.State
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ cs *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type. Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ buf.WriteRune('v')
+
+ format = buf.String()
+ return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package. This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ if width, ok := f.fs.Width(); ok {
+ buf.WriteString(strconv.Itoa(width))
+ }
+
+ if precision, ok := f.fs.Precision(); ok {
+ buf.Write(precisionBytes)
+ buf.WriteString(strconv.Itoa(precision))
+ }
+
+ buf.WriteRune(verb)
+
+ format = buf.String()
+ return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface {
+ f.ignoreNextType = false
+ if !v.IsNil() {
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+ // Display nil if top level pointer is nil.
+ showTypes := f.fs.Flag('#')
+ if v.IsNil() && (!showTypes || f.ignoreNextType) {
+ f.fs.Write(nilAngleBytes)
+ return
+ }
+
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range f.pointers {
+ if depth >= f.depth {
+ delete(f.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to possibly show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by derferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ f.pointers[addr] = f.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type or indirection level depending on flags.
+ if showTypes && !f.ignoreNextType {
+ f.fs.Write(openParenBytes)
+ f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+ f.fs.Write([]byte(ve.Type().String()))
+ f.fs.Write(closeParenBytes)
+ } else {
+ if nilFound || cycleFound {
+ indirects += strings.Count(ve.Type().String(), "*")
+ }
+ f.fs.Write(openAngleBytes)
+ f.fs.Write([]byte(strings.Repeat("*", indirects)))
+ f.fs.Write(closeAngleBytes)
+ }
+
+ // Display pointer information depending on flags.
+ if f.fs.Flag('+') && (len(pointerChain) > 0) {
+ f.fs.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ f.fs.Write(pointerChainBytes)
+ }
+ printHexPtr(f.fs, addr)
+ }
+ f.fs.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ switch {
+ case nilFound == true:
+ f.fs.Write(nilAngleBytes)
+
+ case cycleFound == true:
+ f.fs.Write(circularShortBytes)
+
+ default:
+ f.ignoreNextType = true
+ f.format(ve)
+ }
+}
+
+// format is the main workhorse for providing the Formatter interface. It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately. It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ f.fs.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ f.formatPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !f.ignoreNextType && f.fs.Flag('#') {
+ f.fs.Write(openParenBytes)
+ f.fs.Write([]byte(v.Type().String()))
+ f.fs.Write(closeParenBytes)
+ }
+ f.ignoreNextType = false
+
+ // Call Stringer/error interfaces if they exist and the handle methods
+ // flag is enabled.
+ if !f.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(f.cs, f.fs, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(f.fs, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(f.fs, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(f.fs, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(f.fs, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(f.fs, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(f.fs, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(f.fs, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ f.fs.Write(openBracketBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ numEntries := v.Len()
+ for i := 0; i < numEntries; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.Index(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBracketBytes)
+
+ case reflect.String:
+ f.fs.Write([]byte(v.String()))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+
+ f.fs.Write(openMapBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ keys := v.MapKeys()
+ if f.cs.SortKeys {
+ sortValues(keys, f.cs)
+ }
+ for i, key := range keys {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(key))
+ f.fs.Write(colonBytes)
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.MapIndex(key)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeMapBytes)
+
+ case reflect.Struct:
+ numFields := v.NumField()
+ f.fs.Write(openBraceBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ vt := v.Type()
+ for i := 0; i < numFields; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ vtf := vt.Field(i)
+ if f.fs.Flag('+') || f.fs.Flag('#') {
+ f.fs.Write([]byte(vtf.Name))
+ f.fs.Write(colonBytes)
+ }
+ f.format(f.unpackValue(v.Field(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(f.fs, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(f.fs, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it if any get added.
+ default:
+ format := f.buildDefaultFormat()
+ if v.CanInterface() {
+ fmt.Fprintf(f.fs, format, v.Interface())
+ } else {
+ fmt.Fprintf(f.fs, format, v.String())
+ }
+ }
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+ f.fs = fs
+
+ // Use standard formatting for verbs that are not v.
+ if verb != 'v' {
+ format := f.constructOrigFormat(verb)
+ fmt.Fprintf(fs, format, f.value)
+ return
+ }
+
+ if f.value == nil {
+ if fs.Flag('#') {
+ fs.Write(interfaceBytes)
+ }
+ fs.Write(nilAngleBytes)
+ return
+ }
+
+ f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+ fs := &formatState{value: v, cs: cs}
+ fs.pointers = make(map[uintptr]int)
+ return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 000000000..32c0e3388
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "fmt"
+ "io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the formatted string as a value that satisfies error. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+ return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = NewFormatter(arg)
+ }
+ return formatters
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/dgrijalva/jwt-go/LICENSE
new file mode 100644
index 000000000..b15dc8d76
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/LICENSE
@@ -0,0 +1,7 @@
+Copyright (c) 2012 Dave Grijalva
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md
new file mode 100644
index 000000000..fc23d7ccd
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/README.md
@@ -0,0 +1,85 @@
+A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
+
+[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go)
+
+**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
+
+**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect.
+
+
+## What the heck is a JWT?
+
+JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
+
+In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way.
+
+The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
+
+The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own.
+
+## What's in the box?
+
+This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
+
+## Examples
+
+See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage:
+
+* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example_Parse_hmac)
+* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example_New_hmac)
+* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples)
+
+## Extensions
+
+This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
+
+Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go
+
+## Compliance
+
+This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences:
+
+* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
+
+## Project Status & Versioning
+
+This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
+
+This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
+
+While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning.
+
+## Usage Tips
+
+### Signing vs Encryption
+
+A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
+
+* The author of the token was in the possession of the signing secret
+* The data has not been modified since it was signed
+
+It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
+
+### Choosing a Signing Method
+
+There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric.
+
+Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
+
+Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
+
+### JWT and OAuth
+
+It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
+
+Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
+
+* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
+* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
+* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
+
+## More
+
+Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
+
+The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in to documentation.
diff --git a/vendor/github.com/dgrijalva/jwt-go/claims.go b/vendor/github.com/dgrijalva/jwt-go/claims.go
new file mode 100644
index 000000000..f0228f02e
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/claims.go
@@ -0,0 +1,134 @@
+package jwt
+
+import (
+ "crypto/subtle"
+ "fmt"
+ "time"
+)
+
+// For a type to be a Claims object, it must just have a Valid method that determines
+// if the token is invalid for any supported reason
+type Claims interface {
+ Valid() error
+}
+
+// Structured version of Claims Section, as referenced at
+// https://tools.ietf.org/html/rfc7519#section-4.1
+// See examples for how to use this with your own claim types
+type StandardClaims struct {
+ Audience string `json:"aud,omitempty"`
+ ExpiresAt int64 `json:"exp,omitempty"`
+ Id string `json:"jti,omitempty"`
+ IssuedAt int64 `json:"iat,omitempty"`
+ Issuer string `json:"iss,omitempty"`
+ NotBefore int64 `json:"nbf,omitempty"`
+ Subject string `json:"sub,omitempty"`
+}
+
+// Validates time based claims "exp, iat, nbf".
+// There is no accounting for clock skew.
+// As well, if any of the above claims are not in the token, it will still
+// be considered a valid claim.
+func (c StandardClaims) Valid() error {
+ vErr := new(ValidationError)
+ now := TimeFunc().Unix()
+
+ // The claims below are optional, by default, so if they are set to the
+ // default value in Go, let's not fail the verification for them.
+ if c.VerifyExpiresAt(now, false) == false {
+ delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
+ vErr.Inner = fmt.Errorf("token is expired by %v", delta)
+ vErr.Errors |= ValidationErrorExpired
+ }
+
+ if c.VerifyIssuedAt(now, false) == false {
+ vErr.Inner = fmt.Errorf("Token used before issued")
+ vErr.Errors |= ValidationErrorIssuedAt
+ }
+
+ if c.VerifyNotBefore(now, false) == false {
+ vErr.Inner = fmt.Errorf("token is not valid yet")
+ vErr.Errors |= ValidationErrorNotValidYet
+ }
+
+ if vErr.valid() {
+ return nil
+ }
+
+ return vErr
+}
+
+// Compares the aud claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
+ return verifyAud(c.Audience, cmp, req)
+}
+
+// Compares the exp claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
+ return verifyExp(c.ExpiresAt, cmp, req)
+}
+
+// Compares the iat claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
+ return verifyIat(c.IssuedAt, cmp, req)
+}
+
+// Compares the iss claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
+ return verifyIss(c.Issuer, cmp, req)
+}
+
+// Compares the nbf claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
+ return verifyNbf(c.NotBefore, cmp, req)
+}
+
+// ----- helpers
+
+func verifyAud(aud string, cmp string, required bool) bool {
+ if aud == "" {
+ return !required
+ }
+ if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 {
+ return true
+ } else {
+ return false
+ }
+}
+
+func verifyExp(exp int64, now int64, required bool) bool {
+ if exp == 0 {
+ return !required
+ }
+ return now <= exp
+}
+
+func verifyIat(iat int64, now int64, required bool) bool {
+ if iat == 0 {
+ return !required
+ }
+ return now >= iat
+}
+
+func verifyIss(iss string, cmp string, required bool) bool {
+ if iss == "" {
+ return !required
+ }
+ if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 {
+ return true
+ } else {
+ return false
+ }
+}
+
+func verifyNbf(nbf int64, now int64, required bool) bool {
+ if nbf == 0 {
+ return !required
+ }
+ return now >= nbf
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/dgrijalva/jwt-go/doc.go
new file mode 100644
index 000000000..a86dc1a3b
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/doc.go
@@ -0,0 +1,4 @@
+// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
+//
+// See README.md for more info.
+package jwt
diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go
new file mode 100644
index 000000000..2f59a2223
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go
@@ -0,0 +1,147 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "errors"
+ "math/big"
+)
+
+var (
+ // Sadly this is missing from crypto/ecdsa compared to crypto/rsa
+ ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
+)
+
+// Implements the ECDSA family of signing methods signing methods
+type SigningMethodECDSA struct {
+ Name string
+ Hash crypto.Hash
+ KeySize int
+ CurveBits int
+}
+
+// Specific instances for EC256 and company
+var (
+ SigningMethodES256 *SigningMethodECDSA
+ SigningMethodES384 *SigningMethodECDSA
+ SigningMethodES512 *SigningMethodECDSA
+)
+
+func init() {
+ // ES256
+ SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
+ RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
+ return SigningMethodES256
+ })
+
+ // ES384
+ SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
+ RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
+ return SigningMethodES384
+ })
+
+ // ES512
+ SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
+ RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
+ return SigningMethodES512
+ })
+}
+
+func (m *SigningMethodECDSA) Alg() string {
+ return m.Name
+}
+
+// Implements the Verify method from SigningMethod
+// For this verify method, key must be an ecdsa.PublicKey struct
+func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
+ var err error
+
+ // Decode the signature
+ var sig []byte
+ if sig, err = DecodeSegment(signature); err != nil {
+ return err
+ }
+
+ // Get the key
+ var ecdsaKey *ecdsa.PublicKey
+ switch k := key.(type) {
+ case *ecdsa.PublicKey:
+ ecdsaKey = k
+ default:
+ return ErrInvalidKeyType
+ }
+
+ if len(sig) != 2*m.KeySize {
+ return ErrECDSAVerification
+ }
+
+ r := big.NewInt(0).SetBytes(sig[:m.KeySize])
+ s := big.NewInt(0).SetBytes(sig[m.KeySize:])
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Verify the signature
+ if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true {
+ return nil
+ } else {
+ return ErrECDSAVerification
+ }
+}
+
+// Implements the Sign method from SigningMethod
+// For this signing method, key must be an ecdsa.PrivateKey struct
+func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
+ // Get the key
+ var ecdsaKey *ecdsa.PrivateKey
+ switch k := key.(type) {
+ case *ecdsa.PrivateKey:
+ ecdsaKey = k
+ default:
+ return "", ErrInvalidKeyType
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return "", ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return r, s
+ if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
+ curveBits := ecdsaKey.Curve.Params().BitSize
+
+ if m.CurveBits != curveBits {
+ return "", ErrInvalidKey
+ }
+
+ keyBytes := curveBits / 8
+ if curveBits%8 > 0 {
+ keyBytes += 1
+ }
+
+ // We serialize the outpus (r and s) into big-endian byte arrays and pad
+ // them with zeros on the left to make sure the sizes work out. Both arrays
+ // must be keyBytes long, and the output must be 2*keyBytes long.
+ rBytes := r.Bytes()
+ rBytesPadded := make([]byte, keyBytes)
+ copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
+
+ sBytes := s.Bytes()
+ sBytesPadded := make([]byte, keyBytes)
+ copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
+
+ out := append(rBytesPadded, sBytesPadded...)
+
+ return EncodeSegment(out), nil
+ } else {
+ return "", err
+ }
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go
new file mode 100644
index 000000000..d19624b72
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go
@@ -0,0 +1,67 @@
+package jwt
+
+import (
+ "crypto/ecdsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key")
+ ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key")
+)
+
+// Parse PEM encoded Elliptic Curve Private Key Structure
+func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+
+ var pkey *ecdsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
+ return nil, ErrNotECPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// Parse PEM encoded PKCS1 or PKCS8 public key
+func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ return nil, err
+ }
+ }
+
+ var pkey *ecdsa.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
+ return nil, ErrNotECPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go
new file mode 100644
index 000000000..662df19d4
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/errors.go
@@ -0,0 +1,63 @@
+package jwt
+
+import (
+ "errors"
+)
+
+// Error constants
+var (
+ ErrInvalidKey = errors.New("key is invalid")
+ ErrInvalidKeyType = errors.New("key is of invalid type")
+ ErrHashUnavailable = errors.New("the requested hash function is unavailable")
+)
+
+// The errors that might occur when parsing and validating a token
+const (
+ ValidationErrorMalformed uint32 = 1 << iota // Token is malformed
+ ValidationErrorUnverifiable // Token could not be verified because of signing problems
+ ValidationErrorSignatureInvalid // Signature validation failed
+
+ // Standard Claim validation errors
+ ValidationErrorAudience // AUD validation failed
+ ValidationErrorExpired // EXP validation failed
+ ValidationErrorIssuedAt // IAT validation failed
+ ValidationErrorIssuer // ISS validation failed
+ ValidationErrorNotValidYet // NBF validation failed
+ ValidationErrorId // JTI validation failed
+ ValidationErrorClaimsInvalid // Generic claims validation error
+)
+
+// Helper for constructing a ValidationError with a string error message
+func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
+ return &ValidationError{
+ text: errorText,
+ Errors: errorFlags,
+ }
+}
+
+// The error from Parse if token is not valid
+type ValidationError struct {
+ Inner error // stores the error returned by external dependencies, i.e.: KeyFunc
+ Errors uint32 // bitfield. see ValidationError... constants
+ text string // errors that do not have a valid error just have text
+}
+
+// Validation error is an error type
+func (e ValidationError) Error() string {
+ if e.Inner != nil {
+ return e.Inner.Error()
+ } else if e.text != "" {
+ return e.text
+ } else {
+ return "token is invalid"
+ }
+ return e.Inner.Error()
+}
+
+// No errors
+func (e *ValidationError) valid() bool {
+ if e.Errors > 0 {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go
new file mode 100644
index 000000000..c22991925
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/hmac.go
@@ -0,0 +1,94 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/hmac"
+ "errors"
+)
+
+// Implements the HMAC-SHA family of signing methods signing methods
+type SigningMethodHMAC struct {
+ Name string
+ Hash crypto.Hash
+}
+
+// Specific instances for HS256 and company
+var (
+ SigningMethodHS256 *SigningMethodHMAC
+ SigningMethodHS384 *SigningMethodHMAC
+ SigningMethodHS512 *SigningMethodHMAC
+ ErrSignatureInvalid = errors.New("signature is invalid")
+)
+
+func init() {
+ // HS256
+ SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
+ RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
+ return SigningMethodHS256
+ })
+
+ // HS384
+ SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
+ RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
+ return SigningMethodHS384
+ })
+
+ // HS512
+ SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
+ RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
+ return SigningMethodHS512
+ })
+}
+
+func (m *SigningMethodHMAC) Alg() string {
+ return m.Name
+}
+
+// Verify the signature of HSXXX tokens. Returns nil if the signature is valid.
+func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
+ // Verify the key is the right type
+ keyBytes, ok := key.([]byte)
+ if !ok {
+ return ErrInvalidKeyType
+ }
+
+ // Decode signature, for comparison
+ sig, err := DecodeSegment(signature)
+ if err != nil {
+ return err
+ }
+
+ // Can we use the specified hashing method?
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+
+ // This signing method is symmetric, so we validate the signature
+ // by reproducing the signature from the signing string and key, then
+ // comparing that against the provided signature.
+ hasher := hmac.New(m.Hash.New, keyBytes)
+ hasher.Write([]byte(signingString))
+ if !hmac.Equal(sig, hasher.Sum(nil)) {
+ return ErrSignatureInvalid
+ }
+
+ // No validation errors. Signature is good.
+ return nil
+}
+
+// Implements the Sign method from SigningMethod for this signing method.
+// Key must be []byte
+func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
+ if keyBytes, ok := key.([]byte); ok {
+ if !m.Hash.Available() {
+ return "", ErrHashUnavailable
+ }
+
+ hasher := hmac.New(m.Hash.New, keyBytes)
+ hasher.Write([]byte(signingString))
+
+ return EncodeSegment(hasher.Sum(nil)), nil
+ }
+
+ return "", ErrInvalidKey
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/map_claims.go
new file mode 100644
index 000000000..291213c46
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/map_claims.go
@@ -0,0 +1,94 @@
+package jwt
+
+import (
+ "encoding/json"
+ "errors"
+ // "fmt"
+)
+
+// Claims type that uses the map[string]interface{} for JSON decoding
+// This is the default claims type if you don't supply one
+type MapClaims map[string]interface{}
+
+// Compares the aud claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
+ aud, _ := m["aud"].(string)
+ return verifyAud(aud, cmp, req)
+}
+
+// Compares the exp claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
+ switch exp := m["exp"].(type) {
+ case float64:
+ return verifyExp(int64(exp), cmp, req)
+ case json.Number:
+ v, _ := exp.Int64()
+ return verifyExp(v, cmp, req)
+ }
+ return req == false
+}
+
+// Compares the iat claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
+ switch iat := m["iat"].(type) {
+ case float64:
+ return verifyIat(int64(iat), cmp, req)
+ case json.Number:
+ v, _ := iat.Int64()
+ return verifyIat(v, cmp, req)
+ }
+ return req == false
+}
+
+// Compares the iss claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
+ iss, _ := m["iss"].(string)
+ return verifyIss(iss, cmp, req)
+}
+
+// Compares the nbf claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
+ switch nbf := m["nbf"].(type) {
+ case float64:
+ return verifyNbf(int64(nbf), cmp, req)
+ case json.Number:
+ v, _ := nbf.Int64()
+ return verifyNbf(v, cmp, req)
+ }
+ return req == false
+}
+
+// Validates time based claims "exp, iat, nbf".
+// There is no accounting for clock skew.
+// As well, if any of the above claims are not in the token, it will still
+// be considered a valid claim.
+func (m MapClaims) Valid() error {
+ vErr := new(ValidationError)
+ now := TimeFunc().Unix()
+
+ if m.VerifyExpiresAt(now, false) == false {
+ vErr.Inner = errors.New("Token is expired")
+ vErr.Errors |= ValidationErrorExpired
+ }
+
+ if m.VerifyIssuedAt(now, false) == false {
+ vErr.Inner = errors.New("Token used before issued")
+ vErr.Errors |= ValidationErrorIssuedAt
+ }
+
+ if m.VerifyNotBefore(now, false) == false {
+ vErr.Inner = errors.New("Token is not valid yet")
+ vErr.Errors |= ValidationErrorNotValidYet
+ }
+
+ if vErr.valid() {
+ return nil
+ }
+
+ return vErr
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/dgrijalva/jwt-go/none.go
new file mode 100644
index 000000000..f04d189d0
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/none.go
@@ -0,0 +1,52 @@
+package jwt
+
+// Implements the none signing method. This is required by the spec
+// but you probably should never use it.
+var SigningMethodNone *signingMethodNone
+
+const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
+
+var NoneSignatureTypeDisallowedError error
+
+type signingMethodNone struct{}
+type unsafeNoneMagicConstant string
+
+func init() {
+ SigningMethodNone = &signingMethodNone{}
+ NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid)
+
+ RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
+ return SigningMethodNone
+ })
+}
+
+func (m *signingMethodNone) Alg() string {
+ return "none"
+}
+
+// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) {
+ // Key must be UnsafeAllowNoneSignatureType to prevent accidentally
+ // accepting 'none' signing method
+ if _, ok := key.(unsafeNoneMagicConstant); !ok {
+ return NoneSignatureTypeDisallowedError
+ }
+ // If signing method is none, signature must be an empty string
+ if signature != "" {
+ return NewValidationError(
+ "'none' signing method with non-empty signature",
+ ValidationErrorSignatureInvalid,
+ )
+ }
+
+ // Accept 'none' signing method.
+ return nil
+}
+
+// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) {
+ if _, ok := key.(unsafeNoneMagicConstant); ok {
+ return "", nil
+ }
+ return "", NoneSignatureTypeDisallowedError
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go
new file mode 100644
index 000000000..7020c52a1
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/parser.go
@@ -0,0 +1,128 @@
+package jwt
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+type Parser struct {
+ ValidMethods []string // If populated, only these methods will be considered valid
+ UseJSONNumber bool // Use JSON Number format in JSON decoder
+}
+
+// Parse, validate, and return a token.
+// keyFunc will receive the parsed token and should return the key for validating.
+// If everything is kosher, err will be nil
+func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+ return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
+}
+
+func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+ parts := strings.Split(tokenString, ".")
+ if len(parts) != 3 {
+ return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
+ }
+
+ var err error
+ token := &Token{Raw: tokenString}
+
+ // parse Header
+ var headerBytes []byte
+ if headerBytes, err = DecodeSegment(parts[0]); err != nil {
+ if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
+ return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
+ }
+ return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+ if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
+ return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+
+ // parse Claims
+ var claimBytes []byte
+ token.Claims = claims
+
+ if claimBytes, err = DecodeSegment(parts[1]); err != nil {
+ return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+ dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
+ if p.UseJSONNumber {
+ dec.UseNumber()
+ }
+ // JSON Decode. Special case for map type to avoid weird pointer behavior
+ if c, ok := token.Claims.(MapClaims); ok {
+ err = dec.Decode(&c)
+ } else {
+ err = dec.Decode(&claims)
+ }
+ // Handle decode error
+ if err != nil {
+ return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+
+ // Lookup signature method
+ if method, ok := token.Header["alg"].(string); ok {
+ if token.Method = GetSigningMethod(method); token.Method == nil {
+ return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
+ }
+ } else {
+ return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
+ }
+
+ // Verify signing method is in the required set
+ if p.ValidMethods != nil {
+ var signingMethodValid = false
+ var alg = token.Method.Alg()
+ for _, m := range p.ValidMethods {
+ if m == alg {
+ signingMethodValid = true
+ break
+ }
+ }
+ if !signingMethodValid {
+ // signing method is not in the listed set
+ return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
+ }
+ }
+
+ // Lookup key
+ var key interface{}
+ if keyFunc == nil {
+ // keyFunc was not provided. short circuiting validation
+ return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
+ }
+ if key, err = keyFunc(token); err != nil {
+ // keyFunc returned an error
+ return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
+ }
+
+ vErr := &ValidationError{}
+
+ // Validate Claims
+ if err := token.Claims.Valid(); err != nil {
+
+ // If the Claims Valid returned an error, check if it is a validation error,
+ // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
+ if e, ok := err.(*ValidationError); !ok {
+ vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
+ } else {
+ vErr = e
+ }
+ }
+
+ // Perform validation
+ token.Signature = parts[2]
+ if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
+ vErr.Inner = err
+ vErr.Errors |= ValidationErrorSignatureInvalid
+ }
+
+ if vErr.valid() {
+ token.Valid = true
+ return token, nil
+ }
+
+ return token, vErr
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go
new file mode 100644
index 000000000..0ae0b1984
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/rsa.go
@@ -0,0 +1,100 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+)
+
+// Implements the RSA family of signing methods signing methods
+type SigningMethodRSA struct {
+ Name string
+ Hash crypto.Hash
+}
+
+// Specific instances for RS256 and company
+var (
+ SigningMethodRS256 *SigningMethodRSA
+ SigningMethodRS384 *SigningMethodRSA
+ SigningMethodRS512 *SigningMethodRSA
+)
+
+func init() {
+ // RS256
+ SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
+ RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
+ return SigningMethodRS256
+ })
+
+ // RS384
+ SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
+ RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
+ return SigningMethodRS384
+ })
+
+ // RS512
+ SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
+ RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
+ return SigningMethodRS512
+ })
+}
+
+func (m *SigningMethodRSA) Alg() string {
+ return m.Name
+}
+
+// Implements the Verify method from SigningMethod
+// For this signing method, must be an rsa.PublicKey structure.
+func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
+ var err error
+
+ // Decode the signature
+ var sig []byte
+ if sig, err = DecodeSegment(signature); err != nil {
+ return err
+ }
+
+ var rsaKey *rsa.PublicKey
+ var ok bool
+
+ if rsaKey, ok = key.(*rsa.PublicKey); !ok {
+ return ErrInvalidKeyType
+ }
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Verify the signature
+ return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
+}
+
+// Implements the Sign method from SigningMethod
+// For this signing method, must be an rsa.PrivateKey structure.
+func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
+ var rsaKey *rsa.PrivateKey
+ var ok bool
+
+ // Validate type of key
+ if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
+ return "", ErrInvalidKey
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return "", ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return the encoded bytes
+ if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
+ return EncodeSegment(sigBytes), nil
+ } else {
+ return "", err
+ }
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go
new file mode 100644
index 000000000..10ee9db8a
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go
@@ -0,0 +1,126 @@
+// +build go1.4
+
+package jwt
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+)
+
+// Implements the RSAPSS family of signing methods signing methods
+type SigningMethodRSAPSS struct {
+ *SigningMethodRSA
+ Options *rsa.PSSOptions
+}
+
+// Specific instances for RS/PS and company
+var (
+ SigningMethodPS256 *SigningMethodRSAPSS
+ SigningMethodPS384 *SigningMethodRSAPSS
+ SigningMethodPS512 *SigningMethodRSAPSS
+)
+
+func init() {
+ // PS256
+ SigningMethodPS256 = &SigningMethodRSAPSS{
+ &SigningMethodRSA{
+ Name: "PS256",
+ Hash: crypto.SHA256,
+ },
+ &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ Hash: crypto.SHA256,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
+ return SigningMethodPS256
+ })
+
+ // PS384
+ SigningMethodPS384 = &SigningMethodRSAPSS{
+ &SigningMethodRSA{
+ Name: "PS384",
+ Hash: crypto.SHA384,
+ },
+ &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ Hash: crypto.SHA384,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
+ return SigningMethodPS384
+ })
+
+ // PS512
+ SigningMethodPS512 = &SigningMethodRSAPSS{
+ &SigningMethodRSA{
+ Name: "PS512",
+ Hash: crypto.SHA512,
+ },
+ &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ Hash: crypto.SHA512,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
+ return SigningMethodPS512
+ })
+}
+
+// Implements the Verify method from SigningMethod
+// For this verify method, key must be an rsa.PublicKey struct
+func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
+ var err error
+
+ // Decode the signature
+ var sig []byte
+ if sig, err = DecodeSegment(signature); err != nil {
+ return err
+ }
+
+ var rsaKey *rsa.PublicKey
+ switch k := key.(type) {
+ case *rsa.PublicKey:
+ rsaKey = k
+ default:
+ return ErrInvalidKey
+ }
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options)
+}
+
+// Implements the Sign method from SigningMethod
+// For this signing method, key must be an rsa.PrivateKey struct
+func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
+ var rsaKey *rsa.PrivateKey
+
+ switch k := key.(type) {
+ case *rsa.PrivateKey:
+ rsaKey = k
+ default:
+ return "", ErrInvalidKeyType
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return "", ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return the encoded bytes
+ if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
+ return EncodeSegment(sigBytes), nil
+ } else {
+ return "", err
+ }
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go
new file mode 100644
index 000000000..213a90dbb
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go
@@ -0,0 +1,69 @@
+package jwt
+
+import (
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key")
+ ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key")
+ ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key")
+)
+
+// Parse PEM encoded PKCS1 or PKCS8 private key
+func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// Parse PEM encoded PKCS1 or PKCS8 public key
+func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
+ return nil, ErrNotRSAPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/signing_method.go
new file mode 100644
index 000000000..ed1f212b2
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/signing_method.go
@@ -0,0 +1,35 @@
+package jwt
+
+import (
+ "sync"
+)
+
+var signingMethods = map[string]func() SigningMethod{}
+var signingMethodLock = new(sync.RWMutex)
+
+// Implement SigningMethod to add new methods for signing or verifying tokens.
+type SigningMethod interface {
+ Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid
+ Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error
+ Alg() string // returns the alg identifier for this method (example: 'HS256')
+}
+
+// Register the "alg" name and a factory function for signing method.
+// This is typically done during init() in the method's implementation
+func RegisterSigningMethod(alg string, f func() SigningMethod) {
+ signingMethodLock.Lock()
+ defer signingMethodLock.Unlock()
+
+ signingMethods[alg] = f
+}
+
+// Get a signing method from an "alg" string
+func GetSigningMethod(alg string) (method SigningMethod) {
+ signingMethodLock.RLock()
+ defer signingMethodLock.RUnlock()
+
+ if methodF, ok := signingMethods[alg]; ok {
+ method = methodF()
+ }
+ return
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/dgrijalva/jwt-go/token.go
new file mode 100644
index 000000000..d637e0867
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/token.go
@@ -0,0 +1,108 @@
+package jwt
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "strings"
+ "time"
+)
+
+// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
+// You can override it to use another time value. This is useful for testing or if your
+// server uses a different time zone than your tokens.
+var TimeFunc = time.Now
+
+// Parse methods use this callback function to supply
+// the key for verification. The function receives the parsed,
+// but unverified Token. This allows you to use properties in the
+// Header of the token (such as `kid`) to identify which key to use.
+type Keyfunc func(*Token) (interface{}, error)
+
+// A JWT Token. Different fields will be used depending on whether you're
+// creating or parsing/verifying a token.
+type Token struct {
+ Raw string // The raw token. Populated when you Parse a token
+ Method SigningMethod // The signing method used or to be used
+ Header map[string]interface{} // The first segment of the token
+ Claims Claims // The second segment of the token
+ Signature string // The third segment of the token. Populated when you Parse a token
+ Valid bool // Is the token valid? Populated when you Parse/Verify a token
+}
+
+// Create a new Token. Takes a signing method
+func New(method SigningMethod) *Token {
+ return NewWithClaims(method, MapClaims{})
+}
+
+func NewWithClaims(method SigningMethod, claims Claims) *Token {
+ return &Token{
+ Header: map[string]interface{}{
+ "typ": "JWT",
+ "alg": method.Alg(),
+ },
+ Claims: claims,
+ Method: method,
+ }
+}
+
+// Get the complete, signed token
+func (t *Token) SignedString(key interface{}) (string, error) {
+ var sig, sstr string
+ var err error
+ if sstr, err = t.SigningString(); err != nil {
+ return "", err
+ }
+ if sig, err = t.Method.Sign(sstr, key); err != nil {
+ return "", err
+ }
+ return strings.Join([]string{sstr, sig}, "."), nil
+}
+
+// Generate the signing string. This is the
+// most expensive part of the whole deal. Unless you
+// need this for something special, just go straight for
+// the SignedString.
+func (t *Token) SigningString() (string, error) {
+ var err error
+ parts := make([]string, 2)
+ for i, _ := range parts {
+ var jsonValue []byte
+ if i == 0 {
+ if jsonValue, err = json.Marshal(t.Header); err != nil {
+ return "", err
+ }
+ } else {
+ if jsonValue, err = json.Marshal(t.Claims); err != nil {
+ return "", err
+ }
+ }
+
+ parts[i] = EncodeSegment(jsonValue)
+ }
+ return strings.Join(parts, "."), nil
+}
+
+// Parse, validate, and return a token.
+// keyFunc will receive the parsed token and should return the key for validating.
+// If everything is kosher, err will be nil
+func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+ return new(Parser).Parse(tokenString, keyFunc)
+}
+
+func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+ return new(Parser).ParseWithClaims(tokenString, claims, keyFunc)
+}
+
+// Encode JWT specific base64url encoding with padding stripped
+func EncodeSegment(seg []byte) string {
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
+}
+
+// Decode JWT specific base64url encoding with padding stripped
+func DecodeSegment(seg string) ([]byte, error) {
+ if l := len(seg) % 4; l > 0 {
+ seg += strings.Repeat("=", 4-l)
+ }
+
+ return base64.URLEncoding.DecodeString(seg)
+}
diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE
new file mode 100644
index 000000000..e06d20818
--- /dev/null
+++ b/vendor/github.com/docker/distribution/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md
new file mode 100644
index 000000000..998878850
--- /dev/null
+++ b/vendor/github.com/docker/distribution/README.md
@@ -0,0 +1,130 @@
+# Distribution
+
+The Docker toolset to pack, ship, store, and deliver content.
+
+This repository's main product is the Docker Registry 2.0 implementation
+for storing and distributing Docker images. It supersedes the
+[docker/docker-registry](https://github.com/docker/docker-registry)
+project with a new API design, focused around security and performance.
+
+<img src="https://www.docker.com/sites/default/files/oyster-registry-3.png" width=200px/>
+
+[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master)
+[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution)
+
+This repository contains the following components:
+
+|**Component** |Description |
+|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. |
+| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
+| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) |
+| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. |
+
+### How does this integrate with Docker engine?
+
+This project should provide an implementation to a V2 API for use in the [Docker
+core project](https://github.com/docker/docker). The API should be embeddable
+and simplify the process of securely pulling and pushing content from `docker`
+daemons.
+
+### What are the long term goals of the Distribution project?
+
+The _Distribution_ project has the further long term goal of providing a
+secure tool chain for distributing content. The specifications, APIs and tools
+should be as useful with Docker as they are without.
+
+Our goal is to design a professional grade and extensible content distribution
+system that allow users to:
+
+* Enjoy an efficient, secured and reliable way to store, manage, package and
+ exchange content
+* Hack/roll their own on top of healthy open-source components
+* Implement their own home made solution through good specs, and solid
+ extensions mechanism.
+
+## More about Registry 2.0
+
+The new registry implementation provides the following benefits:
+
+- faster push and pull
+- new, more efficient implementation
+- simplified deployment
+- pluggable storage backend
+- webhook notifications
+
+For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md).
+
+### Who needs to deploy a registry?
+
+By default, Docker users pull images from Docker's public registry instance.
+[Installing Docker](https://docs.docker.com/engine/installation/) gives users this
+ability. Users can also push images to a repository on Docker's public registry,
+if they have a [Docker Hub](https://hub.docker.com/) account.
+
+For some users and even companies, this default behavior is sufficient. For
+others, it is not.
+
+For example, users with their own software products may want to maintain a
+registry for private, company images. Also, you may wish to deploy your own
+image repository for images used to test or in continuous integration. For these
+use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md)
+may be the better choice.
+
+### Migration to Registry 2.0
+
+For those who have previously deployed their own registry based on the Registry
+1.0 implementation and wish to deploy a Registry 2.0 while retaining images,
+data migration is required. A tool to assist with migration efforts has been
+created. For more information see [docker/migrator](https://github.com/docker/migrator).
+
+## Contribute
+
+Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
+issues, fixes, and patches to this project. If you are contributing code, see
+the instructions for [building a development environment](BUILDING.md).
+
+## Support
+
+If any issues are encountered while using the _Distribution_ project, several
+avenues are available for support:
+
+<table>
+<tr>
+ <th align="left">
+ IRC
+ </th>
+ <td>
+ #docker-distribution on FreeNode
+ </td>
+</tr>
+<tr>
+ <th align="left">
+ Issue Tracker
+ </th>
+ <td>
+ github.com/docker/distribution/issues
+ </td>
+</tr>
+<tr>
+ <th align="left">
+ Google Groups
+ </th>
+ <td>
+ https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
+ </td>
+</tr>
+<tr>
+ <th align="left">
+ Mailing List
+ </th>
+ <td>
+ docker@dockerproject.org
+ </td>
+</tr>
+</table>
+
+
+## License
+
+This project is distributed under [Apache License, Version 2.0](LICENSE).
diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go
new file mode 100644
index 000000000..01d309029
--- /dev/null
+++ b/vendor/github.com/docker/distribution/blobs.go
@@ -0,0 +1,257 @@
+package distribution
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/docker/distribution/context"
+ "github.com/docker/distribution/reference"
+ "github.com/opencontainers/go-digest"
+)
+
+var (
+ // ErrBlobExists returned when blob already exists
+ ErrBlobExists = errors.New("blob exists")
+
+ // ErrBlobDigestUnsupported when blob digest is an unsupported version.
+ ErrBlobDigestUnsupported = errors.New("unsupported blob digest")
+
+ // ErrBlobUnknown when blob is not found.
+ ErrBlobUnknown = errors.New("unknown blob")
+
+ // ErrBlobUploadUnknown returned when upload is not found.
+ ErrBlobUploadUnknown = errors.New("blob upload unknown")
+
+ // ErrBlobInvalidLength returned when the blob has an expected length on
+ // commit, meaning mismatched with the descriptor or an invalid value.
+ ErrBlobInvalidLength = errors.New("blob invalid length")
+)
+
+// ErrBlobInvalidDigest returned when digest check fails.
+type ErrBlobInvalidDigest struct {
+ Digest digest.Digest
+ Reason error
+}
+
+func (err ErrBlobInvalidDigest) Error() string {
+ return fmt.Sprintf("invalid digest for referenced layer: %v, %v",
+ err.Digest, err.Reason)
+}
+
+// ErrBlobMounted returned when a blob is mounted from another repository
+// instead of initiating an upload session.
+type ErrBlobMounted struct {
+ From reference.Canonical
+ Descriptor Descriptor
+}
+
+func (err ErrBlobMounted) Error() string {
+ return fmt.Sprintf("blob mounted from: %v to: %v",
+ err.From, err.Descriptor)
+}
+
+// Descriptor describes targeted content. Used in conjunction with a blob
+// store, a descriptor can be used to fetch, store and target any kind of
+// blob. The struct also describes the wire protocol format. Fields should
+// only be added but never changed.
+type Descriptor struct {
+ // MediaType describe the type of the content. All text based formats are
+ // encoded as utf-8.
+ MediaType string `json:"mediaType,omitempty"`
+
+ // Size in bytes of content.
+ Size int64 `json:"size,omitempty"`
+
+ // Digest uniquely identifies the content. A byte stream can be verified
+ // against against this digest.
+ Digest digest.Digest `json:"digest,omitempty"`
+
+ // URLs contains the source URLs of this content.
+ URLs []string `json:"urls,omitempty"`
+
+ // NOTE: Before adding a field here, please ensure that all
+ // other options have been exhausted. Much of the type relationships
+ // depend on the simplicity of this type.
+}
+
+// Descriptor returns the descriptor, to make it satisfy the Describable
+// interface. Note that implementations of Describable are generally objects
+// which can be described, not simply descriptors; this exception is in place
+// to make it more convenient to pass actual descriptors to functions that
+// expect Describable objects.
+func (d Descriptor) Descriptor() Descriptor {
+ return d
+}
+
+// BlobStatter makes blob descriptors available by digest. The service may
+// provide a descriptor of a different digest if the provided digest is not
+// canonical.
+type BlobStatter interface {
+ // Stat provides metadata about a blob identified by the digest. If the
+ // blob is unknown to the describer, ErrBlobUnknown will be returned.
+ Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error)
+}
+
+// BlobDeleter enables deleting blobs from storage.
+type BlobDeleter interface {
+ Delete(ctx context.Context, dgst digest.Digest) error
+}
+
+// BlobEnumerator enables iterating over blobs from storage
+type BlobEnumerator interface {
+ Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error
+}
+
+// BlobDescriptorService manages metadata about a blob by digest. Most
+// implementations will not expose such an interface explicitly. Such mappings
+// should be maintained by interacting with the BlobIngester. Hence, this is
+// left off of BlobService and BlobStore.
+type BlobDescriptorService interface {
+ BlobStatter
+
+ // SetDescriptor assigns the descriptor to the digest. The provided digest and
+ // the digest in the descriptor must map to identical content but they may
+ // differ on their algorithm. The descriptor must have the canonical
+ // digest of the content and the digest algorithm must match the
+ // annotators canonical algorithm.
+ //
+ // Such a facility can be used to map blobs between digest domains, with
+ // the restriction that the algorithm of the descriptor must match the
+ // canonical algorithm (ie sha256) of the annotator.
+ SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error
+
+ // Clear enables descriptors to be unlinked
+ Clear(ctx context.Context, dgst digest.Digest) error
+}
+
+// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService.
+type BlobDescriptorServiceFactory interface {
+ BlobAccessController(svc BlobDescriptorService) BlobDescriptorService
+}
+
+// ReadSeekCloser is the primary reader type for blob data, combining
+// io.ReadSeeker with io.Closer.
+type ReadSeekCloser interface {
+ io.ReadSeeker
+ io.Closer
+}
+
+// BlobProvider describes operations for getting blob data.
+type BlobProvider interface {
+ // Get returns the entire blob identified by digest along with the descriptor.
+ Get(ctx context.Context, dgst digest.Digest) ([]byte, error)
+
+ // Open provides a ReadSeekCloser to the blob identified by the provided
+ // descriptor. If the blob is not known to the service, an error will be
+ // returned.
+ Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error)
+}
+
+// BlobServer can serve blobs via http.
+type BlobServer interface {
+ // ServeBlob attempts to serve the blob, identified by dgst, via http. The
+ // service may decide to redirect the client elsewhere or serve the data
+ // directly.
+ //
+ // This handler only issues successful responses, such as 2xx or 3xx,
+ // meaning it serves data or issues a redirect. If the blob is not
+ // available, an error will be returned and the caller may still issue a
+ // response.
+ //
+ // The implementation may serve the same blob from a different digest
+ // domain. The appropriate headers will be set for the blob, unless they
+ // have already been set by the caller.
+ ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error
+}
+
+// BlobIngester ingests blob data.
+type BlobIngester interface {
+ // Put inserts the content p into the blob service, returning a descriptor
+ // or an error.
+ Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error)
+
+ // Create allocates a new blob writer to add a blob to this service. The
+ // returned handle can be written to and later resumed using an opaque
+ // identifier. With this approach, one can Close and Resume a BlobWriter
+ // multiple times until the BlobWriter is committed or cancelled.
+ Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error)
+
+ // Resume attempts to resume a write to a blob, identified by an id.
+ Resume(ctx context.Context, id string) (BlobWriter, error)
+}
+
+// BlobCreateOption is a general extensible function argument for blob creation
+// methods. A BlobIngester may choose to honor any or none of the given
+// BlobCreateOptions, which can be specific to the implementation of the
+// BlobIngester receiving them.
+// TODO (brianbland): unify this with ManifestServiceOption in the future
+type BlobCreateOption interface {
+ Apply(interface{}) error
+}
+
+// CreateOptions is a collection of blob creation modifiers relevant to general
+// blob storage intended to be configured by the BlobCreateOption.Apply method.
+type CreateOptions struct {
+ Mount struct {
+ ShouldMount bool
+ From reference.Canonical
+ // Stat allows to pass precalculated descriptor to link and return.
+ // Blob access check will be skipped if set.
+ Stat *Descriptor
+ }
+}
+
+// BlobWriter provides a handle for inserting data into a blob store.
+// Instances should be obtained from BlobWriteService.Writer and
+// BlobWriteService.Resume. If supported by the store, a writer can be
+// recovered with the id.
+type BlobWriter interface {
+ io.WriteCloser
+ io.ReaderFrom
+
+ // Size returns the number of bytes written to this blob.
+ Size() int64
+
+ // ID returns the identifier for this writer. The ID can be used with the
+ // Blob service to later resume the write.
+ ID() string
+
+ // StartedAt returns the time this blob write was started.
+ StartedAt() time.Time
+
+ // Commit completes the blob writer process. The content is verified
+ // against the provided provisional descriptor, which may result in an
+ // error. Depending on the implementation, written data may be validated
+ // against the provisional descriptor fields. If MediaType is not present,
+ // the implementation may reject the commit or assign "application/octet-
+ // stream" to the blob. The returned descriptor may have a different
+ // digest depending on the blob store, referred to as the canonical
+ // descriptor.
+ Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error)
+
+ // Cancel ends the blob write without storing any data and frees any
+ // associated resources. Any data written thus far will be lost. Cancel
+ // implementations should allow multiple calls even after a commit that
+ // result in a no-op. This allows use of Cancel in a defer statement,
+ // increasing the assurance that it is correctly called.
+ Cancel(ctx context.Context) error
+}
+
+// BlobService combines the operations to access, read and write blobs. This
+// can be used to describe remote blob services.
+type BlobService interface {
+ BlobStatter
+ BlobProvider
+ BlobIngester
+}
+
+// BlobStore represent the entire suite of blob related operations. Such an
+// implementation can access, read, write, delete and serve blobs.
+type BlobStore interface {
+ BlobService
+ BlobServer
+ BlobDeleter
+}
diff --git a/vendor/github.com/docker/distribution/context/context.go b/vendor/github.com/docker/distribution/context/context.go
new file mode 100644
index 000000000..23cbf5b54
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/context.go
@@ -0,0 +1,85 @@
+package context
+
+import (
+ "sync"
+
+ "github.com/docker/distribution/uuid"
+ "golang.org/x/net/context"
+)
+
+// Context is a copy of Context from the golang.org/x/net/context package.
+type Context interface {
+ context.Context
+}
+
+// instanceContext is a context that provides only an instance id. It is
+// provided as the main background context.
+type instanceContext struct {
+ Context
+ id string // id of context, logged as "instance.id"
+ once sync.Once // once protect generation of the id
+}
+
+func (ic *instanceContext) Value(key interface{}) interface{} {
+ if key == "instance.id" {
+ ic.once.Do(func() {
+ // We want to lazy initialize the UUID such that we don't
+ // call a random generator from the package initialization
+ // code. For various reasons random could not be available
+ // https://github.com/docker/distribution/issues/782
+ ic.id = uuid.Generate().String()
+ })
+ return ic.id
+ }
+
+ return ic.Context.Value(key)
+}
+
+var background = &instanceContext{
+ Context: context.Background(),
+}
+
+// Background returns a non-nil, empty Context. The background context
+// provides a single key, "instance.id" that is globally unique to the
+// process.
+func Background() Context {
+ return background
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val. Use context Values only for request-scoped data that transits processes
+// and APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key, val interface{}) Context {
+ return context.WithValue(parent, key, val)
+}
+
+// stringMapContext is a simple context implementation that checks a map for a
+// key, falling back to a parent if not present.
+type stringMapContext struct {
+ context.Context
+ m map[string]interface{}
+}
+
+// WithValues returns a context that proxies lookups through a map. Only
+// supports string keys.
+func WithValues(ctx context.Context, m map[string]interface{}) context.Context {
+ mo := make(map[string]interface{}, len(m)) // make our own copy.
+ for k, v := range m {
+ mo[k] = v
+ }
+
+ return stringMapContext{
+ Context: ctx,
+ m: mo,
+ }
+}
+
+func (smc stringMapContext) Value(key interface{}) interface{} {
+ if ks, ok := key.(string); ok {
+ if v, ok := smc.m[ks]; ok {
+ return v
+ }
+ }
+
+ return smc.Context.Value(key)
+}
diff --git a/vendor/github.com/docker/distribution/context/doc.go b/vendor/github.com/docker/distribution/context/doc.go
new file mode 100644
index 000000000..9b623074e
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/doc.go
@@ -0,0 +1,89 @@
+// Package context provides several utilities for working with
+// golang.org/x/net/context in http requests. Primarily, the focus is on
+// logging relevant request information but this package is not limited to
+// that purpose.
+//
+// The easiest way to get started is to get the background context:
+//
+// ctx := context.Background()
+//
+// The returned context should be passed around your application and be the
+// root of all other context instances. If the application has a version, this
+// line should be called before anything else:
+//
+// ctx := context.WithVersion(context.Background(), version)
+//
+// The above will store the version in the context and will be available to
+// the logger.
+//
+// Logging
+//
+// The most useful aspect of this package is GetLogger. This function takes
+// any context.Context interface and returns the current logger from the
+// context. Canonical usage looks like this:
+//
+// GetLogger(ctx).Infof("something interesting happened")
+//
+// GetLogger also takes optional key arguments. The keys will be looked up in
+// the context and reported with the logger. The following example would
+// return a logger that prints the version with each log message:
+//
+// ctx := context.Context(context.Background(), "version", version)
+// GetLogger(ctx, "version").Infof("this log message has a version field")
+//
+// The above would print out a log message like this:
+//
+// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m
+//
+// When used with WithLogger, we gain the ability to decorate the context with
+// loggers that have information from disparate parts of the call stack.
+// Following from the version example, we can build a new context with the
+// configured logger such that we always print the version field:
+//
+// ctx = WithLogger(ctx, GetLogger(ctx, "version"))
+//
+// Since the logger has been pushed to the context, we can now get the version
+// field for free with our log messages. Future calls to GetLogger on the new
+// context will have the version field:
+//
+// GetLogger(ctx).Infof("this log message has a version field")
+//
+// This becomes more powerful when we start stacking loggers. Let's say we
+// have the version logger from above but also want a request id. Using the
+// context above, in our request scoped function, we place another logger in
+// the context:
+//
+// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context
+// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id"))
+//
+// When GetLogger is called on the new context, "http.request.id" will be
+// included as a logger field, along with the original "version" field:
+//
+// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m
+//
+// Note that this only affects the new context, the previous context, with the
+// version field, can be used independently. Put another way, the new logger,
+// added to the request context, is unique to that context and can have
+// request scoped variables.
+//
+// HTTP Requests
+//
+// This package also contains several methods for working with http requests.
+// The concepts are very similar to those described above. We simply place the
+// request in the context using WithRequest. This makes the request variables
+// available. GetRequestLogger can then be called to get request specific
+// variables in a log line:
+//
+// ctx = WithRequest(ctx, req)
+// GetRequestLogger(ctx).Infof("request variables")
+//
+// Like above, if we want to include the request data in all log messages in
+// the context, we push the logger to a new context and use that one:
+//
+// ctx = WithLogger(ctx, GetRequestLogger(ctx))
+//
+// The concept is fairly powerful and ensures that calls throughout the stack
+// can be traced in log messages. Using the fields like "http.request.id", one
+// can analyze call flow for a particular request with a simple grep of the
+// logs.
+package context
diff --git a/vendor/github.com/docker/distribution/context/http.go b/vendor/github.com/docker/distribution/context/http.go
new file mode 100644
index 000000000..7d65c8524
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/http.go
@@ -0,0 +1,366 @@
+package context
+
+import (
+ "errors"
+ "net"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/docker/distribution/uuid"
+ "github.com/gorilla/mux"
+ log "github.com/sirupsen/logrus"
+)
+
+// Common errors used with this package.
+var (
+ ErrNoRequestContext = errors.New("no http request in context")
+ ErrNoResponseWriterContext = errors.New("no http response in context")
+)
+
+func parseIP(ipStr string) net.IP {
+ ip := net.ParseIP(ipStr)
+ if ip == nil {
+ log.Warnf("invalid remote IP address: %q", ipStr)
+ }
+ return ip
+}
+
+// RemoteAddr extracts the remote address of the request, taking into
+// account proxy headers.
+func RemoteAddr(r *http.Request) string {
+ if prior := r.Header.Get("X-Forwarded-For"); prior != "" {
+ proxies := strings.Split(prior, ",")
+ if len(proxies) > 0 {
+ remoteAddr := strings.Trim(proxies[0], " ")
+ if parseIP(remoteAddr) != nil {
+ return remoteAddr
+ }
+ }
+ }
+ // X-Real-Ip is less supported, but worth checking in the
+ // absence of X-Forwarded-For
+ if realIP := r.Header.Get("X-Real-Ip"); realIP != "" {
+ if parseIP(realIP) != nil {
+ return realIP
+ }
+ }
+
+ return r.RemoteAddr
+}
+
+// RemoteIP extracts the remote IP of the request, taking into
+// account proxy headers.
+func RemoteIP(r *http.Request) string {
+ addr := RemoteAddr(r)
+
+ // Try parsing it as "IP:port"
+ if ip, _, err := net.SplitHostPort(addr); err == nil {
+ return ip
+ }
+
+ return addr
+}
+
+// WithRequest places the request on the context. The context of the request
+// is assigned a unique id, available at "http.request.id". The request itself
+// is available at "http.request". Other common attributes are available under
+// the prefix "http.request.". If a request is already present on the context,
+// this method will panic.
+func WithRequest(ctx Context, r *http.Request) Context {
+ if ctx.Value("http.request") != nil {
+ // NOTE(stevvooe): This needs to be considered a programming error. It
+ // is unlikely that we'd want to have more than one request in
+ // context.
+ panic("only one request per context")
+ }
+
+ return &httpRequestContext{
+ Context: ctx,
+ startedAt: time.Now(),
+ id: uuid.Generate().String(),
+ r: r,
+ }
+}
+
+// GetRequest returns the http request in the given context. Returns
+// ErrNoRequestContext if the context does not have an http request associated
+// with it.
+func GetRequest(ctx Context) (*http.Request, error) {
+ if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok {
+ return r, nil
+ }
+ return nil, ErrNoRequestContext
+}
+
+// GetRequestID attempts to resolve the current request id, if possible. An
+// error is return if it is not available on the context.
+func GetRequestID(ctx Context) string {
+ return GetStringValue(ctx, "http.request.id")
+}
+
+// WithResponseWriter returns a new context and response writer that makes
+// interesting response statistics available within the context.
+func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) {
+ if closeNotifier, ok := w.(http.CloseNotifier); ok {
+ irwCN := &instrumentedResponseWriterCN{
+ instrumentedResponseWriter: instrumentedResponseWriter{
+ ResponseWriter: w,
+ Context: ctx,
+ },
+ CloseNotifier: closeNotifier,
+ }
+
+ return irwCN, irwCN
+ }
+
+ irw := instrumentedResponseWriter{
+ ResponseWriter: w,
+ Context: ctx,
+ }
+ return &irw, &irw
+}
+
+// GetResponseWriter returns the http.ResponseWriter from the provided
+// context. If not present, ErrNoResponseWriterContext is returned. The
+// returned instance provides instrumentation in the context.
+func GetResponseWriter(ctx Context) (http.ResponseWriter, error) {
+ v := ctx.Value("http.response")
+
+ rw, ok := v.(http.ResponseWriter)
+ if !ok || rw == nil {
+ return nil, ErrNoResponseWriterContext
+ }
+
+ return rw, nil
+}
+
+// getVarsFromRequest let's us change request vars implementation for testing
+// and maybe future changes.
+var getVarsFromRequest = mux.Vars
+
+// WithVars extracts gorilla/mux vars and makes them available on the returned
+// context. Variables are available at keys with the prefix "vars.". For
+// example, if looking for the variable "name", it can be accessed as
+// "vars.name". Implementations that are accessing values need not know that
+// the underlying context is implemented with gorilla/mux vars.
+func WithVars(ctx Context, r *http.Request) Context {
+ return &muxVarsContext{
+ Context: ctx,
+ vars: getVarsFromRequest(r),
+ }
+}
+
+// GetRequestLogger returns a logger that contains fields from the request in
+// the current context. If the request is not available in the context, no
+// fields will display. Request loggers can safely be pushed onto the context.
+func GetRequestLogger(ctx Context) Logger {
+ return GetLogger(ctx,
+ "http.request.id",
+ "http.request.method",
+ "http.request.host",
+ "http.request.uri",
+ "http.request.referer",
+ "http.request.useragent",
+ "http.request.remoteaddr",
+ "http.request.contenttype")
+}
+
+// GetResponseLogger reads the current response stats and builds a logger.
+// Because the values are read at call time, pushing a logger returned from
+// this function on the context will lead to missing or invalid data. Only
+// call this at the end of a request, after the response has been written.
+func GetResponseLogger(ctx Context) Logger {
+ l := getLogrusLogger(ctx,
+ "http.response.written",
+ "http.response.status",
+ "http.response.contenttype")
+
+ duration := Since(ctx, "http.request.startedat")
+
+ if duration > 0 {
+ l = l.WithField("http.response.duration", duration.String())
+ }
+
+ return l
+}
+
+// httpRequestContext makes information about a request available to context.
+type httpRequestContext struct {
+ Context
+
+ startedAt time.Time
+ id string
+ r *http.Request
+}
+
+// Value returns a keyed element of the request for use in the context. To get
+// the request itself, query "request". For other components, access them as
+// "request.<component>". For example, r.RequestURI
+func (ctx *httpRequestContext) Value(key interface{}) interface{} {
+ if keyStr, ok := key.(string); ok {
+ if keyStr == "http.request" {
+ return ctx.r
+ }
+
+ if !strings.HasPrefix(keyStr, "http.request.") {
+ goto fallback
+ }
+
+ parts := strings.Split(keyStr, ".")
+
+ if len(parts) != 3 {
+ goto fallback
+ }
+
+ switch parts[2] {
+ case "uri":
+ return ctx.r.RequestURI
+ case "remoteaddr":
+ return RemoteAddr(ctx.r)
+ case "method":
+ return ctx.r.Method
+ case "host":
+ return ctx.r.Host
+ case "referer":
+ referer := ctx.r.Referer()
+ if referer != "" {
+ return referer
+ }
+ case "useragent":
+ return ctx.r.UserAgent()
+ case "id":
+ return ctx.id
+ case "startedat":
+ return ctx.startedAt
+ case "contenttype":
+ ct := ctx.r.Header.Get("Content-Type")
+ if ct != "" {
+ return ct
+ }
+ }
+ }
+
+fallback:
+ return ctx.Context.Value(key)
+}
+
+type muxVarsContext struct {
+ Context
+ vars map[string]string
+}
+
+func (ctx *muxVarsContext) Value(key interface{}) interface{} {
+ if keyStr, ok := key.(string); ok {
+ if keyStr == "vars" {
+ return ctx.vars
+ }
+
+ if strings.HasPrefix(keyStr, "vars.") {
+ keyStr = strings.TrimPrefix(keyStr, "vars.")
+ }
+
+ if v, ok := ctx.vars[keyStr]; ok {
+ return v
+ }
+ }
+
+ return ctx.Context.Value(key)
+}
+
+// instrumentedResponseWriterCN provides response writer information in a
+// context. It implements http.CloseNotifier so that users can detect
+// early disconnects.
+type instrumentedResponseWriterCN struct {
+ instrumentedResponseWriter
+ http.CloseNotifier
+}
+
+// instrumentedResponseWriter provides response writer information in a
+// context. This variant is only used in the case where CloseNotifier is not
+// implemented by the parent ResponseWriter.
+type instrumentedResponseWriter struct {
+ http.ResponseWriter
+ Context
+
+ mu sync.Mutex
+ status int
+ written int64
+}
+
+func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) {
+ n, err = irw.ResponseWriter.Write(p)
+
+ irw.mu.Lock()
+ irw.written += int64(n)
+
+ // Guess the likely status if not set.
+ if irw.status == 0 {
+ irw.status = http.StatusOK
+ }
+
+ irw.mu.Unlock()
+
+ return
+}
+
+func (irw *instrumentedResponseWriter) WriteHeader(status int) {
+ irw.ResponseWriter.WriteHeader(status)
+
+ irw.mu.Lock()
+ irw.status = status
+ irw.mu.Unlock()
+}
+
+func (irw *instrumentedResponseWriter) Flush() {
+ if flusher, ok := irw.ResponseWriter.(http.Flusher); ok {
+ flusher.Flush()
+ }
+}
+
+func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} {
+ if keyStr, ok := key.(string); ok {
+ if keyStr == "http.response" {
+ return irw
+ }
+
+ if !strings.HasPrefix(keyStr, "http.response.") {
+ goto fallback
+ }
+
+ parts := strings.Split(keyStr, ".")
+
+ if len(parts) != 3 {
+ goto fallback
+ }
+
+ irw.mu.Lock()
+ defer irw.mu.Unlock()
+
+ switch parts[2] {
+ case "written":
+ return irw.written
+ case "status":
+ return irw.status
+ case "contenttype":
+ contentType := irw.Header().Get("Content-Type")
+ if contentType != "" {
+ return contentType
+ }
+ }
+ }
+
+fallback:
+ return irw.Context.Value(key)
+}
+
+func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} {
+ if keyStr, ok := key.(string); ok {
+ if keyStr == "http.response" {
+ return irw
+ }
+ }
+
+ return irw.instrumentedResponseWriter.Value(key)
+}
diff --git a/vendor/github.com/docker/distribution/context/logger.go b/vendor/github.com/docker/distribution/context/logger.go
new file mode 100644
index 000000000..86c5964e4
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/logger.go
@@ -0,0 +1,116 @@
+package context
+
+import (
+ "fmt"
+
+ "github.com/sirupsen/logrus"
+ "runtime"
+)
+
+// Logger provides a leveled-logging interface.
+type Logger interface {
+ // standard logger methods
+ Print(args ...interface{})
+ Printf(format string, args ...interface{})
+ Println(args ...interface{})
+
+ Fatal(args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Fatalln(args ...interface{})
+
+ Panic(args ...interface{})
+ Panicf(format string, args ...interface{})
+ Panicln(args ...interface{})
+
+ // Leveled methods, from logrus
+ Debug(args ...interface{})
+ Debugf(format string, args ...interface{})
+ Debugln(args ...interface{})
+
+ Error(args ...interface{})
+ Errorf(format string, args ...interface{})
+ Errorln(args ...interface{})
+
+ Info(args ...interface{})
+ Infof(format string, args ...interface{})
+ Infoln(args ...interface{})
+
+ Warn(args ...interface{})
+ Warnf(format string, args ...interface{})
+ Warnln(args ...interface{})
+}
+
+// WithLogger creates a new context with provided logger.
+func WithLogger(ctx Context, logger Logger) Context {
+ return WithValue(ctx, "logger", logger)
+}
+
+// GetLoggerWithField returns a logger instance with the specified field key
+// and value without affecting the context. Extra specified keys will be
+// resolved from the context.
+func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger {
+ return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value)
+}
+
+// GetLoggerWithFields returns a logger instance with the specified fields
+// without affecting the context. Extra specified keys will be resolved from
+// the context.
+func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger {
+ // must convert from interface{} -> interface{} to string -> interface{} for logrus.
+ lfields := make(logrus.Fields, len(fields))
+ for key, value := range fields {
+ lfields[fmt.Sprint(key)] = value
+ }
+
+ return getLogrusLogger(ctx, keys...).WithFields(lfields)
+}
+
+// GetLogger returns the logger from the current context, if present. If one
+// or more keys are provided, they will be resolved on the context and
+// included in the logger. While context.Value takes an interface, any key
+// argument passed to GetLogger will be passed to fmt.Sprint when expanded as
+// a logging key field. If context keys are integer constants, for example,
+// its recommended that a String method is implemented.
+func GetLogger(ctx Context, keys ...interface{}) Logger {
+ return getLogrusLogger(ctx, keys...)
+}
+
+// GetLogrusLogger returns the logrus logger for the context. If one more keys
+// are provided, they will be resolved on the context and included in the
+// logger. Only use this function if specific logrus functionality is
+// required.
+func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry {
+ var logger *logrus.Entry
+
+ // Get a logger, if it is present.
+ loggerInterface := ctx.Value("logger")
+ if loggerInterface != nil {
+ if lgr, ok := loggerInterface.(*logrus.Entry); ok {
+ logger = lgr
+ }
+ }
+
+ if logger == nil {
+ fields := logrus.Fields{}
+
+ // Fill in the instance id, if we have it.
+ instanceID := ctx.Value("instance.id")
+ if instanceID != nil {
+ fields["instance.id"] = instanceID
+ }
+
+ fields["go.version"] = runtime.Version()
+ // If no logger is found, just return the standard logger.
+ logger = logrus.StandardLogger().WithFields(fields)
+ }
+
+ fields := logrus.Fields{}
+ for _, key := range keys {
+ v := ctx.Value(key)
+ if v != nil {
+ fields[fmt.Sprint(key)] = v
+ }
+ }
+
+ return logger.WithFields(fields)
+}
diff --git a/vendor/github.com/docker/distribution/context/trace.go b/vendor/github.com/docker/distribution/context/trace.go
new file mode 100644
index 000000000..721964a84
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/trace.go
@@ -0,0 +1,104 @@
+package context
+
+import (
+ "runtime"
+ "time"
+
+ "github.com/docker/distribution/uuid"
+)
+
+// WithTrace allocates a traced timing span in a new context. This allows a
+// caller to track the time between calling WithTrace and the returned done
+// function. When the done function is called, a log message is emitted with a
+// "trace.duration" field, corresponding to the elapsed time and a
+// "trace.func" field, corresponding to the function that called WithTrace.
+//
+// The logging keys "trace.id" and "trace.parent.id" are provided to implement
+// dapper-like tracing. This function should be complemented with a WithSpan
+// method that could be used for tracing distributed RPC calls.
+//
+// The main benefit of this function is to post-process log messages or
+// intercept them in a hook to provide timing data. Trace ids and parent ids
+// can also be linked to provide call tracing, if so required.
+//
+// Here is an example of the usage:
+//
+// func timedOperation(ctx Context) {
+// ctx, done := WithTrace(ctx)
+// defer done("this will be the log message")
+// // ... function body ...
+// }
+//
+// If the function ran for roughly 1s, such a usage would emit a log message
+// as follows:
+//
+// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id=<id> ...
+//
+// Notice that the function name is automatically resolved, along with the
+// package and a trace id is emitted that can be linked with parent ids.
+func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) {
+ if ctx == nil {
+ ctx = Background()
+ }
+
+ pc, file, line, _ := runtime.Caller(1)
+ f := runtime.FuncForPC(pc)
+ ctx = &traced{
+ Context: ctx,
+ id: uuid.Generate().String(),
+ start: time.Now(),
+ parent: GetStringValue(ctx, "trace.id"),
+ fnname: f.Name(),
+ file: file,
+ line: line,
+ }
+
+ return ctx, func(format string, a ...interface{}) {
+ GetLogger(ctx,
+ "trace.duration",
+ "trace.id",
+ "trace.parent.id",
+ "trace.func",
+ "trace.file",
+ "trace.line").
+ Debugf(format, a...)
+ }
+}
+
+// traced represents a context that is traced for function call timing. It
+// also provides fast lookup for the various attributes that are available on
+// the trace.
+type traced struct {
+ Context
+ id string
+ parent string
+ start time.Time
+ fnname string
+ file string
+ line int
+}
+
+func (ts *traced) Value(key interface{}) interface{} {
+ switch key {
+ case "trace.start":
+ return ts.start
+ case "trace.duration":
+ return time.Since(ts.start)
+ case "trace.id":
+ return ts.id
+ case "trace.parent.id":
+ if ts.parent == "" {
+ return nil // must return nil to signal no parent.
+ }
+
+ return ts.parent
+ case "trace.func":
+ return ts.fnname
+ case "trace.file":
+ return ts.file
+ case "trace.line":
+ return ts.line
+ }
+
+ return ts.Context.Value(key)
+}
diff --git a/vendor/github.com/docker/distribution/context/util.go b/vendor/github.com/docker/distribution/context/util.go
new file mode 100644
index 000000000..cb9ef52e3
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/util.go
@@ -0,0 +1,24 @@
+package context
+
+import (
+ "time"
+)
+
+// Since looks up key, which should be a time.Time, and returns the duration
+// since that time. If the key is not found, the value returned will be zero.
+// This is helpful when inferring metrics related to context execution times.
+func Since(ctx Context, key interface{}) time.Duration {
+ if startedAt, ok := ctx.Value(key).(time.Time); ok {
+ return time.Since(startedAt)
+ }
+ return 0
+}
+
+// GetStringValue returns a string value from the context. The empty string
+// will be returned if not found.
+func GetStringValue(ctx Context, key interface{}) (value string) {
+ if valuev, ok := ctx.Value(key).(string); ok {
+ value = valuev
+ }
+ return value
+}
diff --git a/vendor/github.com/docker/distribution/context/version.go b/vendor/github.com/docker/distribution/context/version.go
new file mode 100644
index 000000000..746cda02e
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/version.go
@@ -0,0 +1,16 @@
+package context
+
+// WithVersion stores the application version in the context. The new context
+// gets a logger to ensure log messages are marked with the application
+// version.
+func WithVersion(ctx Context, version string) Context {
+ ctx = WithValue(ctx, "version", version)
+ // push a new logger onto the stack
+ return WithLogger(ctx, GetLogger(ctx, "version"))
+}
+
+// GetVersion returns the application version from the context. An empty
+// string may returned if the version was not set on the context.
+func GetVersion(ctx Context) string {
+ return GetStringValue(ctx, "version")
+}
diff --git a/vendor/github.com/docker/distribution/digestset/set.go b/vendor/github.com/docker/distribution/digestset/set.go
new file mode 100644
index 000000000..71327dca7
--- /dev/null
+++ b/vendor/github.com/docker/distribution/digestset/set.go
@@ -0,0 +1,247 @@
+package digestset
+
+import (
+ "errors"
+ "sort"
+ "strings"
+ "sync"
+
+ digest "github.com/opencontainers/go-digest"
+)
+
+var (
+ // ErrDigestNotFound is used when a matching digest
+ // could not be found in a set.
+ ErrDigestNotFound = errors.New("digest not found")
+
+ // ErrDigestAmbiguous is used when multiple digests
+ // are found in a set. None of the matching digests
+ // should be considered valid matches.
+ ErrDigestAmbiguous = errors.New("ambiguous digest string")
+)
+
+// Set is used to hold a unique set of digests which
+// may be easily referenced by easily referenced by a string
+// representation of the digest as well as short representation.
+// The uniqueness of the short representation is based on other
+// digests in the set. If digests are omitted from this set,
+// collisions in a larger set may not be detected, therefore it
+// is important to always do short representation lookups on
+// the complete set of digests. To mitigate collisions, an
+// appropriately long short code should be used.
+type Set struct {
+ mutex sync.RWMutex
+ entries digestEntries
+}
+
+// NewSet creates an empty set of digests
+// which may have digests added.
+func NewSet() *Set {
+ return &Set{
+ entries: digestEntries{},
+ }
+}
+
+// checkShortMatch checks whether two digests match as either whole
+// values or short values. This function does not test equality,
+// rather whether the second value could match against the first
+// value.
+func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
+ if len(hex) == len(shortHex) {
+ if hex != shortHex {
+ return false
+ }
+ if len(shortAlg) > 0 && string(alg) != shortAlg {
+ return false
+ }
+ } else if !strings.HasPrefix(hex, shortHex) {
+ return false
+ } else if len(shortAlg) > 0 && string(alg) != shortAlg {
+ return false
+ }
+ return true
+}
+
+// Lookup looks for a digest matching the given string representation.
+// If no digests could be found ErrDigestNotFound will be returned
+// with an empty digest value. If multiple matches are found
+// ErrDigestAmbiguous will be returned with an empty digest value.
+func (dst *Set) Lookup(d string) (digest.Digest, error) {
+ dst.mutex.RLock()
+ defer dst.mutex.RUnlock()
+ if len(dst.entries) == 0 {
+ return "", ErrDigestNotFound
+ }
+ var (
+ searchFunc func(int) bool
+ alg digest.Algorithm
+ hex string
+ )
+ dgst, err := digest.Parse(d)
+ if err == digest.ErrDigestInvalidFormat {
+ hex = d
+ searchFunc = func(i int) bool {
+ return dst.entries[i].val >= d
+ }
+ } else {
+ hex = dgst.Hex()
+ alg = dgst.Algorithm()
+ searchFunc = func(i int) bool {
+ if dst.entries[i].val == hex {
+ return dst.entries[i].alg >= alg
+ }
+ return dst.entries[i].val >= hex
+ }
+ }
+ idx := sort.Search(len(dst.entries), searchFunc)
+ if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
+ return "", ErrDigestNotFound
+ }
+ if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
+ return dst.entries[idx].digest, nil
+ }
+ if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
+ return "", ErrDigestAmbiguous
+ }
+
+ return dst.entries[idx].digest, nil
+}
+
+// Add adds the given digest to the set. An error will be returned
+// if the given digest is invalid. If the digest already exists in the
+// set, this operation will be a no-op.
+func (dst *Set) Add(d digest.Digest) error {
+ if err := d.Validate(); err != nil {
+ return err
+ }
+ dst.mutex.Lock()
+ defer dst.mutex.Unlock()
+ entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
+ searchFunc := func(i int) bool {
+ if dst.entries[i].val == entry.val {
+ return dst.entries[i].alg >= entry.alg
+ }
+ return dst.entries[i].val >= entry.val
+ }
+ idx := sort.Search(len(dst.entries), searchFunc)
+ if idx == len(dst.entries) {
+ dst.entries = append(dst.entries, entry)
+ return nil
+ } else if dst.entries[idx].digest == d {
+ return nil
+ }
+
+ entries := append(dst.entries, nil)
+ copy(entries[idx+1:], entries[idx:len(entries)-1])
+ entries[idx] = entry
+ dst.entries = entries
+ return nil
+}
+
+// Remove removes the given digest from the set. An err will be
+// returned if the given digest is invalid. If the digest does
+// not exist in the set, this operation will be a no-op.
+func (dst *Set) Remove(d digest.Digest) error {
+ if err := d.Validate(); err != nil {
+ return err
+ }
+ dst.mutex.Lock()
+ defer dst.mutex.Unlock()
+ entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
+ searchFunc := func(i int) bool {
+ if dst.entries[i].val == entry.val {
+ return dst.entries[i].alg >= entry.alg
+ }
+ return dst.entries[i].val >= entry.val
+ }
+ idx := sort.Search(len(dst.entries), searchFunc)
+ // Not found if idx is after or value at idx is not digest
+ if idx == len(dst.entries) || dst.entries[idx].digest != d {
+ return nil
+ }
+
+ entries := dst.entries
+ copy(entries[idx:], entries[idx+1:])
+ entries = entries[:len(entries)-1]
+ dst.entries = entries
+
+ return nil
+}
+
+// All returns all the digests in the set
+func (dst *Set) All() []digest.Digest {
+ dst.mutex.RLock()
+ defer dst.mutex.RUnlock()
+ retValues := make([]digest.Digest, len(dst.entries))
+ for i := range dst.entries {
+ retValues[i] = dst.entries[i].digest
+ }
+
+ return retValues
+}
+
+// ShortCodeTable returns a map of Digest to unique short codes. The
+// length represents the minimum value, the maximum length may be the
+// entire value of digest if uniqueness cannot be achieved without the
+// full value. This function will attempt to make short codes as short
+// as possible to be unique.
+func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
+ dst.mutex.RLock()
+ defer dst.mutex.RUnlock()
+ m := make(map[digest.Digest]string, len(dst.entries))
+ l := length
+ resetIdx := 0
+ for i := 0; i < len(dst.entries); i++ {
+ var short string
+ extended := true
+ for extended {
+ extended = false
+ if len(dst.entries[i].val) <= l {
+ short = dst.entries[i].digest.String()
+ } else {
+ short = dst.entries[i].val[:l]
+ for j := i + 1; j < len(dst.entries); j++ {
+ if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
+ if j > resetIdx {
+ resetIdx = j
+ }
+ extended = true
+ } else {
+ break
+ }
+ }
+ if extended {
+ l++
+ }
+ }
+ }
+ m[dst.entries[i].digest] = short
+ if i >= resetIdx {
+ l = length
+ }
+ }
+ return m
+}
+
+type digestEntry struct {
+ alg digest.Algorithm
+ val string
+ digest digest.Digest
+}
+
+type digestEntries []*digestEntry
+
+func (d digestEntries) Len() int {
+ return len(d)
+}
+
+func (d digestEntries) Less(i, j int) bool {
+ if d[i].val != d[j].val {
+ return d[i].val < d[j].val
+ }
+ return d[i].alg < d[j].alg
+}
+
+func (d digestEntries) Swap(i, j int) {
+ d[i], d[j] = d[j], d[i]
+}
diff --git a/vendor/github.com/docker/distribution/doc.go b/vendor/github.com/docker/distribution/doc.go
new file mode 100644
index 000000000..bdd8cb708
--- /dev/null
+++ b/vendor/github.com/docker/distribution/doc.go
@@ -0,0 +1,7 @@
+// Package distribution will define the interfaces for the components of
+// docker distribution. The goal is to allow users to reliably package, ship
+// and store content related to docker images.
+//
+// This is currently a work in progress. More details are available in the
+// README.md.
+package distribution
diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go
new file mode 100644
index 000000000..020d33258
--- /dev/null
+++ b/vendor/github.com/docker/distribution/errors.go
@@ -0,0 +1,115 @@
+package distribution
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/opencontainers/go-digest"
+)
+
+// ErrAccessDenied is returned when an access to a requested resource is
+// denied.
+var ErrAccessDenied = errors.New("access denied")
+
+// ErrManifestNotModified is returned when a conditional manifest GetByTag
+// returns nil due to the client indicating it has the latest version
+var ErrManifestNotModified = errors.New("manifest not modified")
+
+// ErrUnsupported is returned when an unimplemented or unsupported action is
+// performed
+var ErrUnsupported = errors.New("operation unsupported")
+
+// ErrTagUnknown is returned if the given tag is not known by the tag service
+type ErrTagUnknown struct {
+ Tag string
+}
+
+func (err ErrTagUnknown) Error() string {
+ return fmt.Sprintf("unknown tag=%s", err.Tag)
+}
+
+// ErrRepositoryUnknown is returned if the named repository is not known by
+// the registry.
+type ErrRepositoryUnknown struct {
+ Name string
+}
+
+func (err ErrRepositoryUnknown) Error() string {
+ return fmt.Sprintf("unknown repository name=%s", err.Name)
+}
+
+// ErrRepositoryNameInvalid should be used to denote an invalid repository
+// name. Reason may set, indicating the cause of invalidity.
+type ErrRepositoryNameInvalid struct {
+ Name string
+ Reason error
+}
+
+func (err ErrRepositoryNameInvalid) Error() string {
+ return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason)
+}
+
+// ErrManifestUnknown is returned if the manifest is not known by the
+// registry.
+type ErrManifestUnknown struct {
+ Name string
+ Tag string
+}
+
+func (err ErrManifestUnknown) Error() string {
+ return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag)
+}
+
+// ErrManifestUnknownRevision is returned when a manifest cannot be found by
+// revision within a repository.
+type ErrManifestUnknownRevision struct {
+ Name string
+ Revision digest.Digest
+}
+
+func (err ErrManifestUnknownRevision) Error() string {
+ return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision)
+}
+
+// ErrManifestUnverified is returned when the registry is unable to verify
+// the manifest.
+type ErrManifestUnverified struct{}
+
+func (ErrManifestUnverified) Error() string {
+ return "unverified manifest"
+}
+
+// ErrManifestVerification provides a type to collect errors encountered
+// during manifest verification. Currently, it accepts errors of all types,
+// but it may be narrowed to those involving manifest verification.
+type ErrManifestVerification []error
+
+func (errs ErrManifestVerification) Error() string {
+ var parts []string
+ for _, err := range errs {
+ parts = append(parts, err.Error())
+ }
+
+ return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ","))
+}
+
+// ErrManifestBlobUnknown returned when a referenced blob cannot be found.
+type ErrManifestBlobUnknown struct {
+ Digest digest.Digest
+}
+
+func (err ErrManifestBlobUnknown) Error() string {
+ return fmt.Sprintf("unknown blob %v on manifest", err.Digest)
+}
+
+// ErrManifestNameInvalid should be used to denote an invalid manifest
+// name. Reason may set, indicating the cause of invalidity.
+type ErrManifestNameInvalid struct {
+ Name string
+ Reason error
+}
+
+func (err ErrManifestNameInvalid) Error() string {
+ return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason)
+}
diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go
new file mode 100644
index 000000000..2c99f25d3
--- /dev/null
+++ b/vendor/github.com/docker/distribution/manifests.go
@@ -0,0 +1,125 @@
+package distribution
+
+import (
+ "fmt"
+ "mime"
+
+ "github.com/docker/distribution/context"
+ "github.com/opencontainers/go-digest"
+)
+
+// Manifest represents a registry object specifying a set of
+// references and an optional target
+type Manifest interface {
+ // References returns a list of objects which make up this manifest.
+ // A reference is anything which can be represented by a
+ // distribution.Descriptor. These can consist of layers, resources or other
+ // manifests.
+ //
+ // While no particular order is required, implementations should return
+ // them from highest to lowest priority. For example, one might want to
+ // return the base layer before the top layer.
+ References() []Descriptor
+
+ // Payload provides the serialized format of the manifest, in addition to
+ // the media type.
+ Payload() (mediaType string, payload []byte, err error)
+}
+
+// ManifestBuilder creates a manifest allowing one to include dependencies.
+// Instances can be obtained from a version-specific manifest package. Manifest
+// specific data is passed into the function which creates the builder.
+type ManifestBuilder interface {
+ // Build creates the manifest from his builder.
+ Build(ctx context.Context) (Manifest, error)
+
+ // References returns a list of objects which have been added to this
+ // builder. The dependencies are returned in the order they were added,
+ // which should be from base to head.
+ References() []Descriptor
+
+ // AppendReference includes the given object in the manifest after any
+ // existing dependencies. If the add fails, such as when adding an
+ // unsupported dependency, an error may be returned.
+ //
+ // The destination of the reference is dependent on the manifest type and
+ // the dependency type.
+ AppendReference(dependency Describable) error
+}
+
+// ManifestService describes operations on image manifests.
+type ManifestService interface {
+ // Exists returns true if the manifest exists.
+ Exists(ctx context.Context, dgst digest.Digest) (bool, error)
+
+ // Get retrieves the manifest specified by the given digest
+ Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error)
+
+ // Put creates or updates the given manifest returning the manifest digest
+ Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error)
+
+ // Delete removes the manifest specified by the given digest. Deleting
+ // a manifest that doesn't exist will return ErrManifestNotFound
+ Delete(ctx context.Context, dgst digest.Digest) error
+}
+
+// ManifestEnumerator enables iterating over manifests
+type ManifestEnumerator interface {
+ // Enumerate calls ingester for each manifest.
+ Enumerate(ctx context.Context, ingester func(digest.Digest) error) error
+}
+
+// Describable is an interface for descriptors
+type Describable interface {
+ Descriptor() Descriptor
+}
+
+// ManifestMediaTypes returns the supported media types for manifests.
+func ManifestMediaTypes() (mediaTypes []string) {
+ for t := range mappings {
+ if t != "" {
+ mediaTypes = append(mediaTypes, t)
+ }
+ }
+ return
+}
+
+// UnmarshalFunc implements manifest unmarshalling a given MediaType
+type UnmarshalFunc func([]byte) (Manifest, Descriptor, error)
+
+var mappings = make(map[string]UnmarshalFunc, 0)
+
+// UnmarshalManifest looks up manifest unmarshal functions based on
+// MediaType
+func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) {
+ // Need to look up by the actual media type, not the raw contents of
+ // the header. Strip semicolons and anything following them.
+ var mediaType string
+ if ctHeader != "" {
+ var err error
+ mediaType, _, err = mime.ParseMediaType(ctHeader)
+ if err != nil {
+ return nil, Descriptor{}, err
+ }
+ }
+
+ unmarshalFunc, ok := mappings[mediaType]
+ if !ok {
+ unmarshalFunc, ok = mappings[""]
+ if !ok {
+ return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType)
+ }
+ }
+
+ return unmarshalFunc(p)
+}
+
+// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This
+// should be called from specific
+func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error {
+ if _, ok := mappings[mediaType]; ok {
+ return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType)
+ }
+ mappings[mediaType] = u
+ return nil
+}
diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/docker/distribution/reference/helpers.go
new file mode 100644
index 000000000..978df7eab
--- /dev/null
+++ b/vendor/github.com/docker/distribution/reference/helpers.go
@@ -0,0 +1,42 @@
+package reference
+
+import "path"
+
+// IsNameOnly returns true if reference only contains a repo name.
+func IsNameOnly(ref Named) bool {
+ if _, ok := ref.(NamedTagged); ok {
+ return false
+ }
+ if _, ok := ref.(Canonical); ok {
+ return false
+ }
+ return true
+}
+
+// FamiliarName returns the familiar name string
+// for the given named, familiarizing if needed.
+func FamiliarName(ref Named) string {
+ if nn, ok := ref.(normalizedNamed); ok {
+ return nn.Familiar().Name()
+ }
+ return ref.Name()
+}
+
+// FamiliarString returns the familiar string representation
+// for the given reference, familiarizing if needed.
+func FamiliarString(ref Reference) string {
+ if nn, ok := ref.(normalizedNamed); ok {
+ return nn.Familiar().String()
+ }
+ return ref.String()
+}
+
+// FamiliarMatch reports whether ref matches the specified pattern.
+// See https://godoc.org/path#Match for supported patterns.
+func FamiliarMatch(pattern string, ref Reference) (bool, error) {
+ matched, err := path.Match(pattern, FamiliarString(ref))
+ if namedRef, isNamed := ref.(Named); isNamed && !matched {
+ matched, _ = path.Match(pattern, FamiliarName(namedRef))
+ }
+ return matched, err
+}
diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go
new file mode 100644
index 000000000..2d71fc5e9
--- /dev/null
+++ b/vendor/github.com/docker/distribution/reference/normalize.go
@@ -0,0 +1,170 @@
+package reference
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/docker/distribution/digestset"
+ "github.com/opencontainers/go-digest"
+)
+
+var (
+ legacyDefaultDomain = "index.docker.io"
+ defaultDomain = "docker.io"
+ officialRepoName = "library"
+ defaultTag = "latest"
+)
+
+// normalizedNamed represents a name which has been
+// normalized and has a familiar form. A familiar name
+// is what is used in Docker UI. An example normalized
+// name is "docker.io/library/ubuntu" and corresponding
+// familiar name of "ubuntu".
+type normalizedNamed interface {
+ Named
+ Familiar() Named
+}
+
+// ParseNormalizedNamed parses a string into a named reference
+// transforming a familiar name from Docker UI to a fully
+// qualified reference. If the value may be an identifier
+// use ParseAnyReference.
+func ParseNormalizedNamed(s string) (Named, error) {
+ if ok := anchoredIdentifierRegexp.MatchString(s); ok {
+ return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
+ }
+ domain, remainder := splitDockerDomain(s)
+ var remoteName string
+ if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
+ remoteName = remainder[:tagSep]
+ } else {
+ remoteName = remainder
+ }
+ if strings.ToLower(remoteName) != remoteName {
+ return nil, errors.New("invalid reference format: repository name must be lowercase")
+ }
+
+ ref, err := Parse(domain + "/" + remainder)
+ if err != nil {
+ return nil, err
+ }
+ named, isNamed := ref.(Named)
+ if !isNamed {
+ return nil, fmt.Errorf("reference %s has no name", ref.String())
+ }
+ return named, nil
+}
+
+// splitDockerDomain splits a repository name to domain and remotename string.
+// If no valid domain is found, the default domain is used. Repository name
+// needs to be already validated before.
+func splitDockerDomain(name string) (domain, remainder string) {
+ i := strings.IndexRune(name, '/')
+ if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
+ domain, remainder = defaultDomain, name
+ } else {
+ domain, remainder = name[:i], name[i+1:]
+ }
+ if domain == legacyDefaultDomain {
+ domain = defaultDomain
+ }
+ if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
+ remainder = officialRepoName + "/" + remainder
+ }
+ return
+}
+
+// familiarizeName returns a shortened version of the name familiar
+// to to the Docker UI. Familiar names have the default domain
+// "docker.io" and "library/" repository prefix removed.
+// For example, "docker.io/library/redis" will have the familiar
+// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
+// Returns a familiarized named only reference.
+func familiarizeName(named namedRepository) repository {
+ repo := repository{
+ domain: named.Domain(),
+ path: named.Path(),
+ }
+
+ if repo.domain == defaultDomain {
+ repo.domain = ""
+ // Handle official repositories which have the pattern "library/<official repo name>"
+ if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
+ repo.path = split[1]
+ }
+ }
+ return repo
+}
+
+func (r reference) Familiar() Named {
+ return reference{
+ namedRepository: familiarizeName(r.namedRepository),
+ tag: r.tag,
+ digest: r.digest,
+ }
+}
+
+func (r repository) Familiar() Named {
+ return familiarizeName(r)
+}
+
+func (t taggedReference) Familiar() Named {
+ return taggedReference{
+ namedRepository: familiarizeName(t.namedRepository),
+ tag: t.tag,
+ }
+}
+
+func (c canonicalReference) Familiar() Named {
+ return canonicalReference{
+ namedRepository: familiarizeName(c.namedRepository),
+ digest: c.digest,
+ }
+}
+
+// TagNameOnly adds the default tag "latest" to a reference if it only has
+// a repo name.
+func TagNameOnly(ref Named) Named {
+ if IsNameOnly(ref) {
+ namedTagged, err := WithTag(ref, defaultTag)
+ if err != nil {
+ // Default tag must be valid, to create a NamedTagged
+ // type with non-validated input the WithTag function
+ // should be used instead
+ panic(err)
+ }
+ return namedTagged
+ }
+ return ref
+}
+
+// ParseAnyReference parses a reference string as a possible identifier,
+// full digest, or familiar name.
+func ParseAnyReference(ref string) (Reference, error) {
+ if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
+ return digestReference("sha256:" + ref), nil
+ }
+ if dgst, err := digest.Parse(ref); err == nil {
+ return digestReference(dgst), nil
+ }
+
+ return ParseNormalizedNamed(ref)
+}
+
+// ParseAnyReferenceWithSet parses a reference string as a possible short
+// identifier to be matched in a digest set, a full digest, or familiar name.
+func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
+ if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
+ dgst, err := ds.Lookup(ref)
+ if err == nil {
+ return digestReference(dgst), nil
+ }
+ } else {
+ if dgst, err := digest.Parse(ref); err == nil {
+ return digestReference(dgst), nil
+ }
+ }
+
+ return ParseNormalizedNamed(ref)
+}
diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go
new file mode 100644
index 000000000..2f66cca87
--- /dev/null
+++ b/vendor/github.com/docker/distribution/reference/reference.go
@@ -0,0 +1,433 @@
+// Package reference provides a general type to represent any way of referencing images within the registry.
+// Its main purpose is to abstract tags and digests (content-addressable hash).
+//
+// Grammar
+//
+// reference := name [ ":" tag ] [ "@" digest ]
+// name := [domain '/'] path-component ['/' path-component]*
+// domain := domain-component ['.' domain-component]* [':' port-number]
+// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
+// port-number := /[0-9]+/
+// path-component := alpha-numeric [separator alpha-numeric]*
+// alpha-numeric := /[a-z0-9]+/
+// separator := /[_.]|__|[-]*/
+//
+// tag := /[\w][\w.-]{0,127}/
+//
+// digest := digest-algorithm ":" digest-hex
+// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
+// digest-algorithm-separator := /[+.-_]/
+// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
+// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
+//
+// identifier := /[a-f0-9]{64}/
+// short-identifier := /[a-f0-9]{6,64}/
+package reference
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/opencontainers/go-digest"
+)
+
+const (
+ // NameTotalLengthMax is the maximum total number of characters in a repository name.
+ NameTotalLengthMax = 255
+)
+
+var (
+ // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
+ ErrReferenceInvalidFormat = errors.New("invalid reference format")
+
+ // ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrTagInvalidFormat = errors.New("invalid tag format")
+
+ // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrDigestInvalidFormat = errors.New("invalid digest format")
+
+ // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
+ ErrNameContainsUppercase = errors.New("repository name must be lowercase")
+
+ // ErrNameEmpty is returned for empty, invalid repository names.
+ ErrNameEmpty = errors.New("repository name must have at least one component")
+
+ // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
+ ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
+
+ // ErrNameNotCanonical is returned when a name is not canonical.
+ ErrNameNotCanonical = errors.New("repository name must be canonical")
+)
+
+// Reference is an opaque object reference identifier that may include
+// modifiers such as a hostname, name, tag, and digest.
+type Reference interface {
+ // String returns the full reference
+ String() string
+}
+
+// Field provides a wrapper type for resolving correct reference types when
+// working with encoding.
+type Field struct {
+ reference Reference
+}
+
+// AsField wraps a reference in a Field for encoding.
+func AsField(reference Reference) Field {
+ return Field{reference}
+}
+
+// Reference unwraps the reference type from the field to
+// return the Reference object. This object should be
+// of the appropriate type to further check for different
+// reference types.
+func (f Field) Reference() Reference {
+ return f.reference
+}
+
+// MarshalText serializes the field to byte text which
+// is the string of the reference.
+func (f Field) MarshalText() (p []byte, err error) {
+ return []byte(f.reference.String()), nil
+}
+
+// UnmarshalText parses text bytes by invoking the
+// reference parser to ensure the appropriately
+// typed reference object is wrapped by field.
+func (f *Field) UnmarshalText(p []byte) error {
+ r, err := Parse(string(p))
+ if err != nil {
+ return err
+ }
+
+ f.reference = r
+ return nil
+}
+
+// Named is an object with a full name
+type Named interface {
+ Reference
+ Name() string
+}
+
+// Tagged is an object which has a tag
+type Tagged interface {
+ Reference
+ Tag() string
+}
+
+// NamedTagged is an object including a name and tag.
+type NamedTagged interface {
+ Named
+ Tag() string
+}
+
+// Digested is an object which has a digest
+// in which it can be referenced by
+type Digested interface {
+ Reference
+ Digest() digest.Digest
+}
+
+// Canonical reference is an object with a fully unique
+// name including a name with domain and digest
+type Canonical interface {
+ Named
+ Digest() digest.Digest
+}
+
+// namedRepository is a reference to a repository with a name.
+// A namedRepository has both domain and path components.
+type namedRepository interface {
+ Named
+ Domain() string
+ Path() string
+}
+
+// Domain returns the domain part of the Named reference
+func Domain(named Named) string {
+ if r, ok := named.(namedRepository); ok {
+ return r.Domain()
+ }
+ domain, _ := splitDomain(named.Name())
+ return domain
+}
+
+// Path returns the name without the domain part of the Named reference
+func Path(named Named) (name string) {
+ if r, ok := named.(namedRepository); ok {
+ return r.Path()
+ }
+ _, path := splitDomain(named.Name())
+ return path
+}
+
+func splitDomain(name string) (string, string) {
+ match := anchoredNameRegexp.FindStringSubmatch(name)
+ if len(match) != 3 {
+ return "", name
+ }
+ return match[1], match[2]
+}
+
+// SplitHostname splits a named reference into a
+// hostname and name string. If no valid hostname is
+// found, the hostname is empty and the full value
+// is returned as name
+// DEPRECATED: Use Domain or Path
+func SplitHostname(named Named) (string, string) {
+ if r, ok := named.(namedRepository); ok {
+ return r.Domain(), r.Path()
+ }
+ return splitDomain(named.Name())
+}
+
+// Parse parses s and returns a syntactically valid Reference.
+// If an error was encountered it is returned, along with a nil Reference.
+// NOTE: Parse will not handle short digests.
+func Parse(s string) (Reference, error) {
+ matches := ReferenceRegexp.FindStringSubmatch(s)
+ if matches == nil {
+ if s == "" {
+ return nil, ErrNameEmpty
+ }
+ if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
+ return nil, ErrNameContainsUppercase
+ }
+ return nil, ErrReferenceInvalidFormat
+ }
+
+ if len(matches[1]) > NameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
+ var repo repository
+
+ nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
+ if nameMatch != nil && len(nameMatch) == 3 {
+ repo.domain = nameMatch[1]
+ repo.path = nameMatch[2]
+ } else {
+ repo.domain = ""
+ repo.path = matches[1]
+ }
+
+ ref := reference{
+ namedRepository: repo,
+ tag: matches[2],
+ }
+ if matches[3] != "" {
+ var err error
+ ref.digest, err = digest.Parse(matches[3])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ r := getBestReferenceType(ref)
+ if r == nil {
+ return nil, ErrNameEmpty
+ }
+
+ return r, nil
+}
+
+// ParseNamed parses s and returns a syntactically valid reference implementing
+// the Named interface. The reference must have a name and be in the canonical
+// form, otherwise an error is returned.
+// If an error was encountered it is returned, along with a nil Reference.
+// NOTE: ParseNamed will not handle short digests.
+func ParseNamed(s string) (Named, error) {
+ named, err := ParseNormalizedNamed(s)
+ if err != nil {
+ return nil, err
+ }
+ if named.String() != s {
+ return nil, ErrNameNotCanonical
+ }
+ return named, nil
+}
+
+// WithName returns a named object representing the given string. If the input
+// is invalid ErrReferenceInvalidFormat will be returned.
+func WithName(name string) (Named, error) {
+ if len(name) > NameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
+ match := anchoredNameRegexp.FindStringSubmatch(name)
+ if match == nil || len(match) != 3 {
+ return nil, ErrReferenceInvalidFormat
+ }
+ return repository{
+ domain: match[1],
+ path: match[2],
+ }, nil
+}
+
+// WithTag combines the name from "name" and the tag from "tag" to form a
+// reference incorporating both the name and the tag.
+func WithTag(name Named, tag string) (NamedTagged, error) {
+ if !anchoredTagRegexp.MatchString(tag) {
+ return nil, ErrTagInvalidFormat
+ }
+ var repo repository
+ if r, ok := name.(namedRepository); ok {
+ repo.domain = r.Domain()
+ repo.path = r.Path()
+ } else {
+ repo.path = name.Name()
+ }
+ if canonical, ok := name.(Canonical); ok {
+ return reference{
+ namedRepository: repo,
+ tag: tag,
+ digest: canonical.Digest(),
+ }, nil
+ }
+ return taggedReference{
+ namedRepository: repo,
+ tag: tag,
+ }, nil
+}
+
+// WithDigest combines the name from "name" and the digest from "digest" to form
+// a reference incorporating both the name and the digest.
+func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
+ if !anchoredDigestRegexp.MatchString(digest.String()) {
+ return nil, ErrDigestInvalidFormat
+ }
+ var repo repository
+ if r, ok := name.(namedRepository); ok {
+ repo.domain = r.Domain()
+ repo.path = r.Path()
+ } else {
+ repo.path = name.Name()
+ }
+ if tagged, ok := name.(Tagged); ok {
+ return reference{
+ namedRepository: repo,
+ tag: tagged.Tag(),
+ digest: digest,
+ }, nil
+ }
+ return canonicalReference{
+ namedRepository: repo,
+ digest: digest,
+ }, nil
+}
+
+// TrimNamed removes any tag or digest from the named reference.
+func TrimNamed(ref Named) Named {
+ domain, path := SplitHostname(ref)
+ return repository{
+ domain: domain,
+ path: path,
+ }
+}
+
+func getBestReferenceType(ref reference) Reference {
+ if ref.Name() == "" {
+ // Allow digest only references
+ if ref.digest != "" {
+ return digestReference(ref.digest)
+ }
+ return nil
+ }
+ if ref.tag == "" {
+ if ref.digest != "" {
+ return canonicalReference{
+ namedRepository: ref.namedRepository,
+ digest: ref.digest,
+ }
+ }
+ return ref.namedRepository
+ }
+ if ref.digest == "" {
+ return taggedReference{
+ namedRepository: ref.namedRepository,
+ tag: ref.tag,
+ }
+ }
+
+ return ref
+}
+
+type reference struct {
+ namedRepository
+ tag string
+ digest digest.Digest
+}
+
+func (r reference) String() string {
+ return r.Name() + ":" + r.tag + "@" + r.digest.String()
+}
+
+func (r reference) Tag() string {
+ return r.tag
+}
+
+func (r reference) Digest() digest.Digest {
+ return r.digest
+}
+
+type repository struct {
+ domain string
+ path string
+}
+
+func (r repository) String() string {
+ return r.Name()
+}
+
+func (r repository) Name() string {
+ if r.domain == "" {
+ return r.path
+ }
+ return r.domain + "/" + r.path
+}
+
+func (r repository) Domain() string {
+ return r.domain
+}
+
+func (r repository) Path() string {
+ return r.path
+}
+
+type digestReference digest.Digest
+
+func (d digestReference) String() string {
+ return digest.Digest(d).String()
+}
+
+func (d digestReference) Digest() digest.Digest {
+ return digest.Digest(d)
+}
+
+type taggedReference struct {
+ namedRepository
+ tag string
+}
+
+func (t taggedReference) String() string {
+ return t.Name() + ":" + t.tag
+}
+
+func (t taggedReference) Tag() string {
+ return t.tag
+}
+
+type canonicalReference struct {
+ namedRepository
+ digest digest.Digest
+}
+
+func (c canonicalReference) String() string {
+ return c.Name() + "@" + c.digest.String()
+}
+
+func (c canonicalReference) Digest() digest.Digest {
+ return c.digest
+}
diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go
new file mode 100644
index 000000000..786034932
--- /dev/null
+++ b/vendor/github.com/docker/distribution/reference/regexp.go
@@ -0,0 +1,143 @@
+package reference
+
+import "regexp"
+
+var (
+ // alphaNumericRegexp defines the alpha numeric atom, typically a
+ // component of names. This only allows lower case characters and digits.
+ alphaNumericRegexp = match(`[a-z0-9]+`)
+
+ // separatorRegexp defines the separators allowed to be embedded in name
+ // components. This allow one period, one or two underscore and multiple
+ // dashes.
+ separatorRegexp = match(`(?:[._]|__|[-]*)`)
+
+ // nameComponentRegexp restricts registry path component names to start
+ // with at least one letter or number, with following parts able to be
+ // separated by one period, one or two underscore and multiple dashes.
+ nameComponentRegexp = expression(
+ alphaNumericRegexp,
+ optional(repeated(separatorRegexp, alphaNumericRegexp)))
+
+ // domainComponentRegexp restricts the registry domain component of a
+ // repository name to start with a component as defined by DomainRegexp
+ // and followed by an optional port.
+ domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
+
+ // DomainRegexp defines the structure of potential domain components
+ // that may be part of image names. This is purposely a subset of what is
+ // allowed by DNS to ensure backwards compatibility with Docker image
+ // names.
+ DomainRegexp = expression(
+ domainComponentRegexp,
+ optional(repeated(literal(`.`), domainComponentRegexp)),
+ optional(literal(`:`), match(`[0-9]+`)))
+
+ // TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
+ TagRegexp = match(`[\w][\w.-]{0,127}`)
+
+ // anchoredTagRegexp matches valid tag names, anchored at the start and
+ // end of the matched string.
+ anchoredTagRegexp = anchored(TagRegexp)
+
+ // DigestRegexp matches valid digests.
+ DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
+
+ // anchoredDigestRegexp matches valid digests, anchored at the start and
+ // end of the matched string.
+ anchoredDigestRegexp = anchored(DigestRegexp)
+
+ // NameRegexp is the format for the name component of references. The
+ // regexp has capturing groups for the domain and name part omitting
+ // the separating forward slash from either.
+ NameRegexp = expression(
+ optional(DomainRegexp, literal(`/`)),
+ nameComponentRegexp,
+ optional(repeated(literal(`/`), nameComponentRegexp)))
+
+ // anchoredNameRegexp is used to parse a name value, capturing the
+ // domain and trailing components.
+ anchoredNameRegexp = anchored(
+ optional(capture(DomainRegexp), literal(`/`)),
+ capture(nameComponentRegexp,
+ optional(repeated(literal(`/`), nameComponentRegexp))))
+
+ // ReferenceRegexp is the full supported format of a reference. The regexp
+ // is anchored and has capturing groups for name, tag, and digest
+ // components.
+ ReferenceRegexp = anchored(capture(NameRegexp),
+ optional(literal(":"), capture(TagRegexp)),
+ optional(literal("@"), capture(DigestRegexp)))
+
+ // IdentifierRegexp is the format for string identifier used as a
+ // content addressable identifier using sha256. These identifiers
+ // are like digests without the algorithm, since sha256 is used.
+ IdentifierRegexp = match(`([a-f0-9]{64})`)
+
+ // ShortIdentifierRegexp is the format used to represent a prefix
+ // of an identifier. A prefix may be used to match a sha256 identifier
+ // within a list of trusted identifiers.
+ ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
+
+ // anchoredIdentifierRegexp is used to check or match an
+ // identifier value, anchored at start and end of string.
+ anchoredIdentifierRegexp = anchored(IdentifierRegexp)
+
+ // anchoredShortIdentifierRegexp is used to check if a value
+ // is a possible identifier prefix, anchored at start and end
+ // of string.
+ anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
+)
+
+// match compiles the string to a regular expression.
+var match = regexp.MustCompile
+
+// literal compiles s into a literal regular expression, escaping any regexp
+// reserved characters.
+func literal(s string) *regexp.Regexp {
+ re := match(regexp.QuoteMeta(s))
+
+ if _, complete := re.LiteralPrefix(); !complete {
+ panic("must be a literal")
+ }
+
+ return re
+}
+
+// expression defines a full expression, where each regular expression must
+// follow the previous.
+func expression(res ...*regexp.Regexp) *regexp.Regexp {
+ var s string
+ for _, re := range res {
+ s += re.String()
+ }
+
+ return match(s)
+}
+
+// optional wraps the expression in a non-capturing group and makes the
+// production optional.
+func optional(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(group(expression(res...)).String() + `?`)
+}
+
+// repeated wraps the regexp in a non-capturing group to get one or more
+// matches.
+func repeated(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(group(expression(res...)).String() + `+`)
+}
+
+// group wraps the regexp in a non-capturing group.
+func group(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`(?:` + expression(res...).String() + `)`)
+}
+
+// capture wraps the expression in a capturing group.
+func capture(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`(` + expression(res...).String() + `)`)
+}
+
+// anchored anchors the regular expression by adding start and end delimiters.
+func anchored(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`^` + expression(res...).String() + `$`)
+}
diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go
new file mode 100644
index 000000000..1da1d533f
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry.go
@@ -0,0 +1,97 @@
+package distribution
+
+import (
+ "github.com/docker/distribution/context"
+ "github.com/docker/distribution/reference"
+)
+
+// Scope defines the set of items that match a namespace.
+type Scope interface {
+ // Contains returns true if the name belongs to the namespace.
+ Contains(name string) bool
+}
+
+type fullScope struct{}
+
+func (f fullScope) Contains(string) bool {
+ return true
+}
+
+// GlobalScope represents the full namespace scope which contains
+// all other scopes.
+var GlobalScope = Scope(fullScope{})
+
+// Namespace represents a collection of repositories, addressable by name.
+// Generally, a namespace is backed by a set of one or more services,
+// providing facilities such as registry access, trust, and indexing.
+type Namespace interface {
+ // Scope describes the names that can be used with this Namespace. The
+ // global namespace will have a scope that matches all names. The scope
+ // effectively provides an identity for the namespace.
+ Scope() Scope
+
+ // Repository should return a reference to the named repository. The
+ // registry may or may not have the repository but should always return a
+ // reference.
+ Repository(ctx context.Context, name reference.Named) (Repository, error)
+
+ // Repositories fills 'repos' with a lexicographically sorted catalog of repositories
+ // up to the size of 'repos' and returns the value 'n' for the number of entries
+ // which were filled. 'last' contains an offset in the catalog, and 'err' will be
+ // set to io.EOF if there are no more entries to obtain.
+ Repositories(ctx context.Context, repos []string, last string) (n int, err error)
+
+ // Blobs returns a blob enumerator to access all blobs
+ Blobs() BlobEnumerator
+
+ // BlobStatter returns a BlobStatter to control
+ BlobStatter() BlobStatter
+}
+
+// RepositoryEnumerator describes an operation to enumerate repositories
+type RepositoryEnumerator interface {
+ Enumerate(ctx context.Context, ingester func(string) error) error
+}
+
+// ManifestServiceOption is a function argument for Manifest Service methods
+type ManifestServiceOption interface {
+ Apply(ManifestService) error
+}
+
+// WithTag allows a tag to be passed into Put
+func WithTag(tag string) ManifestServiceOption {
+ return WithTagOption{tag}
+}
+
+// WithTagOption holds a tag
+type WithTagOption struct{ Tag string }
+
+// Apply conforms to the ManifestServiceOption interface
+func (o WithTagOption) Apply(m ManifestService) error {
+ // no implementation
+ return nil
+}
+
+// Repository is a named collection of manifests and layers.
+type Repository interface {
+ // Named returns the name of the repository.
+ Named() reference.Named
+
+ // Manifests returns a reference to this repository's manifest service.
+ // with the supplied options applied.
+ Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error)
+
+ // Blobs returns a reference to this repository's blob service.
+ Blobs(ctx context.Context) BlobStore
+
+ // TODO(stevvooe): The above BlobStore return can probably be relaxed to
+ // be a BlobService for use with clients. This will allow such
+ // implementations to avoid implementing ServeBlob.
+
+ // Tags returns a reference to this repositories tag service
+ Tags(ctx context.Context) TagService
+}
+
+// TODO(stevvooe): Must add close methods to all these. May want to change the
+// way instances are created to better reflect internal dependency
+// relationships.
diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go
new file mode 100644
index 000000000..6d9bb4b62
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go
@@ -0,0 +1,267 @@
+package errcode
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+// ErrorCoder is the base interface for ErrorCode and Error allowing
+// users of each to just call ErrorCode to get the real ID of each
+type ErrorCoder interface {
+ ErrorCode() ErrorCode
+}
+
+// ErrorCode represents the error type. The errors are serialized via strings
+// and the integer format may change and should *never* be exported.
+type ErrorCode int
+
+var _ error = ErrorCode(0)
+
+// ErrorCode just returns itself
+func (ec ErrorCode) ErrorCode() ErrorCode {
+ return ec
+}
+
+// Error returns the ID/Value
+func (ec ErrorCode) Error() string {
+ // NOTE(stevvooe): Cannot use message here since it may have unpopulated args.
+ return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1))
+}
+
+// Descriptor returns the descriptor for the error code.
+func (ec ErrorCode) Descriptor() ErrorDescriptor {
+ d, ok := errorCodeToDescriptors[ec]
+
+ if !ok {
+ return ErrorCodeUnknown.Descriptor()
+ }
+
+ return d
+}
+
+// String returns the canonical identifier for this error code.
+func (ec ErrorCode) String() string {
+ return ec.Descriptor().Value
+}
+
+// Message returned the human-readable error message for this error code.
+func (ec ErrorCode) Message() string {
+ return ec.Descriptor().Message
+}
+
+// MarshalText encodes the receiver into UTF-8-encoded text and returns the
+// result.
+func (ec ErrorCode) MarshalText() (text []byte, err error) {
+ return []byte(ec.String()), nil
+}
+
+// UnmarshalText decodes the form generated by MarshalText.
+func (ec *ErrorCode) UnmarshalText(text []byte) error {
+ desc, ok := idToDescriptors[string(text)]
+
+ if !ok {
+ desc = ErrorCodeUnknown.Descriptor()
+ }
+
+ *ec = desc.Code
+
+ return nil
+}
+
+// WithMessage creates a new Error struct based on the passed-in info and
+// overrides the Message property.
+func (ec ErrorCode) WithMessage(message string) Error {
+ return Error{
+ Code: ec,
+ Message: message,
+ }
+}
+
+// WithDetail creates a new Error struct based on the passed-in info and
+// set the Detail property appropriately
+func (ec ErrorCode) WithDetail(detail interface{}) Error {
+ return Error{
+ Code: ec,
+ Message: ec.Message(),
+ }.WithDetail(detail)
+}
+
+// WithArgs creates a new Error struct and sets the Args slice
+func (ec ErrorCode) WithArgs(args ...interface{}) Error {
+ return Error{
+ Code: ec,
+ Message: ec.Message(),
+ }.WithArgs(args...)
+}
+
+// Error provides a wrapper around ErrorCode with extra Details provided.
+type Error struct {
+ Code ErrorCode `json:"code"`
+ Message string `json:"message"`
+ Detail interface{} `json:"detail,omitempty"`
+
+ // TODO(duglin): See if we need an "args" property so we can do the
+ // variable substitution right before showing the message to the user
+}
+
+var _ error = Error{}
+
+// ErrorCode returns the ID/Value of this Error
+func (e Error) ErrorCode() ErrorCode {
+ return e.Code
+}
+
+// Error returns a human readable representation of the error.
+func (e Error) Error() string {
+ return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message)
+}
+
+// WithDetail will return a new Error, based on the current one, but with
+// some Detail info added
+func (e Error) WithDetail(detail interface{}) Error {
+ return Error{
+ Code: e.Code,
+ Message: e.Message,
+ Detail: detail,
+ }
+}
+
+// WithArgs uses the passed-in list of interface{} as the substitution
+// variables in the Error's Message string, but returns a new Error
+func (e Error) WithArgs(args ...interface{}) Error {
+ return Error{
+ Code: e.Code,
+ Message: fmt.Sprintf(e.Code.Message(), args...),
+ Detail: e.Detail,
+ }
+}
+
+// ErrorDescriptor provides relevant information about a given error code.
+type ErrorDescriptor struct {
+ // Code is the error code that this descriptor describes.
+ Code ErrorCode
+
+ // Value provides a unique, string key, often captilized with
+ // underscores, to identify the error code. This value is used as the
+ // keyed value when serializing api errors.
+ Value string
+
+ // Message is a short, human readable decription of the error condition
+ // included in API responses.
+ Message string
+
+ // Description provides a complete account of the errors purpose, suitable
+ // for use in documentation.
+ Description string
+
+ // HTTPStatusCode provides the http status code that is associated with
+ // this error condition.
+ HTTPStatusCode int
+}
+
+// ParseErrorCode returns the value by the string error code.
+// `ErrorCodeUnknown` will be returned if the error is not known.
+func ParseErrorCode(value string) ErrorCode {
+ ed, ok := idToDescriptors[value]
+ if ok {
+ return ed.Code
+ }
+
+ return ErrorCodeUnknown
+}
+
+// Errors provides the envelope for multiple errors and a few sugar methods
+// for use within the application.
+type Errors []error
+
+var _ error = Errors{}
+
+func (errs Errors) Error() string {
+ switch len(errs) {
+ case 0:
+ return "<nil>"
+ case 1:
+ return errs[0].Error()
+ default:
+ msg := "errors:\n"
+ for _, err := range errs {
+ msg += err.Error() + "\n"
+ }
+ return msg
+ }
+}
+
+// Len returns the current number of errors.
+func (errs Errors) Len() int {
+ return len(errs)
+}
+
+// MarshalJSON converts slice of error, ErrorCode or Error into a
+// slice of Error - then serializes
+func (errs Errors) MarshalJSON() ([]byte, error) {
+ var tmpErrs struct {
+ Errors []Error `json:"errors,omitempty"`
+ }
+
+ for _, daErr := range errs {
+ var err Error
+
+ switch daErr.(type) {
+ case ErrorCode:
+ err = daErr.(ErrorCode).WithDetail(nil)
+ case Error:
+ err = daErr.(Error)
+ default:
+ err = ErrorCodeUnknown.WithDetail(daErr)
+
+ }
+
+ // If the Error struct was setup and they forgot to set the
+ // Message field (meaning its "") then grab it from the ErrCode
+ msg := err.Message
+ if msg == "" {
+ msg = err.Code.Message()
+ }
+
+ tmpErrs.Errors = append(tmpErrs.Errors, Error{
+ Code: err.Code,
+ Message: msg,
+ Detail: err.Detail,
+ })
+ }
+
+ return json.Marshal(tmpErrs)
+}
+
+// UnmarshalJSON deserializes []Error and then converts it into slice of
+// Error or ErrorCode
+func (errs *Errors) UnmarshalJSON(data []byte) error {
+ var tmpErrs struct {
+ Errors []Error
+ }
+
+ if err := json.Unmarshal(data, &tmpErrs); err != nil {
+ return err
+ }
+
+ var newErrs Errors
+ for _, daErr := range tmpErrs.Errors {
+ // If Message is empty or exactly matches the Code's message string
+ // then just use the Code, no need for a full Error struct
+ if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) {
+ // Error's w/o details get converted to ErrorCode
+ newErrs = append(newErrs, daErr.Code)
+ } else {
+ // Error's w/ details are untouched
+ newErrs = append(newErrs, Error{
+ Code: daErr.Code,
+ Message: daErr.Message,
+ Detail: daErr.Detail,
+ })
+ }
+ }
+
+ *errs = newErrs
+ return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go
new file mode 100644
index 000000000..49a64a86e
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go
@@ -0,0 +1,44 @@
+package errcode
+
+import (
+ "encoding/json"
+ "net/http"
+)
+
+// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err
+// and sets the content-type header to 'application/json'. It will handle
+// ErrorCoder and Errors, and if necessary will create an envelope.
+func ServeJSON(w http.ResponseWriter, err error) error {
+ w.Header().Set("Content-Type", "application/json; charset=utf-8")
+ var sc int
+
+ switch errs := err.(type) {
+ case Errors:
+ if len(errs) < 1 {
+ break
+ }
+
+ if err, ok := errs[0].(ErrorCoder); ok {
+ sc = err.ErrorCode().Descriptor().HTTPStatusCode
+ }
+ case ErrorCoder:
+ sc = errs.ErrorCode().Descriptor().HTTPStatusCode
+ err = Errors{err} // create an envelope.
+ default:
+ // We just have an unhandled error type, so just place in an envelope
+ // and move along.
+ err = Errors{err}
+ }
+
+ if sc == 0 {
+ sc = http.StatusInternalServerError
+ }
+
+ w.WriteHeader(sc)
+
+ if err := json.NewEncoder(w).Encode(err); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go
new file mode 100644
index 000000000..d1e8826c6
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/errcode/register.go
@@ -0,0 +1,138 @@
+package errcode
+
+import (
+ "fmt"
+ "net/http"
+ "sort"
+ "sync"
+)
+
+var (
+ errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{}
+ idToDescriptors = map[string]ErrorDescriptor{}
+ groupToDescriptors = map[string][]ErrorDescriptor{}
+)
+
+var (
+ // ErrorCodeUnknown is a generic error that can be used as a last
+ // resort if there is no situation-specific error message that can be used
+ ErrorCodeUnknown = Register("errcode", ErrorDescriptor{
+ Value: "UNKNOWN",
+ Message: "unknown error",
+ Description: `Generic error returned when the error does not have an
+ API classification.`,
+ HTTPStatusCode: http.StatusInternalServerError,
+ })
+
+ // ErrorCodeUnsupported is returned when an operation is not supported.
+ ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{
+ Value: "UNSUPPORTED",
+ Message: "The operation is unsupported.",
+ Description: `The operation was unsupported due to a missing
+ implementation or invalid set of parameters.`,
+ HTTPStatusCode: http.StatusMethodNotAllowed,
+ })
+
+ // ErrorCodeUnauthorized is returned if a request requires
+ // authentication.
+ ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{
+ Value: "UNAUTHORIZED",
+ Message: "authentication required",
+ Description: `The access controller was unable to authenticate
+ the client. Often this will be accompanied by a
+ Www-Authenticate HTTP response header indicating how to
+ authenticate.`,
+ HTTPStatusCode: http.StatusUnauthorized,
+ })
+
+ // ErrorCodeDenied is returned if a client does not have sufficient
+ // permission to perform an action.
+ ErrorCodeDenied = Register("errcode", ErrorDescriptor{
+ Value: "DENIED",
+ Message: "requested access to the resource is denied",
+ Description: `The access controller denied access for the
+ operation on a resource.`,
+ HTTPStatusCode: http.StatusForbidden,
+ })
+
+ // ErrorCodeUnavailable provides a common error to report unavailability
+ // of a service or endpoint.
+ ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{
+ Value: "UNAVAILABLE",
+ Message: "service unavailable",
+ Description: "Returned when a service is not available",
+ HTTPStatusCode: http.StatusServiceUnavailable,
+ })
+
+ // ErrorCodeTooManyRequests is returned if a client attempts too many
+ // times to contact a service endpoint.
+ ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{
+ Value: "TOOMANYREQUESTS",
+ Message: "too many requests",
+ Description: `Returned when a client attempts to contact a
+ service too many times`,
+ HTTPStatusCode: http.StatusTooManyRequests,
+ })
+)
+
+var nextCode = 1000
+var registerLock sync.Mutex
+
+// Register will make the passed-in error known to the environment and
+// return a new ErrorCode
+func Register(group string, descriptor ErrorDescriptor) ErrorCode {
+ registerLock.Lock()
+ defer registerLock.Unlock()
+
+ descriptor.Code = ErrorCode(nextCode)
+
+ if _, ok := idToDescriptors[descriptor.Value]; ok {
+ panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value))
+ }
+ if _, ok := errorCodeToDescriptors[descriptor.Code]; ok {
+ panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code))
+ }
+
+ groupToDescriptors[group] = append(groupToDescriptors[group], descriptor)
+ errorCodeToDescriptors[descriptor.Code] = descriptor
+ idToDescriptors[descriptor.Value] = descriptor
+
+ nextCode++
+ return descriptor.Code
+}
+
+type byValue []ErrorDescriptor
+
+func (a byValue) Len() int { return len(a) }
+func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
+
+// GetGroupNames returns the list of Error group names that are registered
+func GetGroupNames() []string {
+ keys := []string{}
+
+ for k := range groupToDescriptors {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// GetErrorCodeGroup returns the named group of error descriptors
+func GetErrorCodeGroup(name string) []ErrorDescriptor {
+ desc := groupToDescriptors[name]
+ sort.Sort(byValue(desc))
+ return desc
+}
+
+// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are
+// registered, irrespective of what group they're in
+func GetErrorAllDescriptors() []ErrorDescriptor {
+ result := []ErrorDescriptor{}
+
+ for _, group := range GetGroupNames() {
+ result = append(result, GetErrorCodeGroup(group)...)
+ }
+ sort.Sort(byValue(result))
+ return result
+}
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
new file mode 100644
index 000000000..a9616c58a
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
@@ -0,0 +1,1596 @@
+package v2
+
+import (
+ "net/http"
+ "regexp"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/distribution/registry/api/errcode"
+ "github.com/opencontainers/go-digest"
+)
+
+var (
+ nameParameterDescriptor = ParameterDescriptor{
+ Name: "name",
+ Type: "string",
+ Format: reference.NameRegexp.String(),
+ Required: true,
+ Description: `Name of the target repository.`,
+ }
+
+ referenceParameterDescriptor = ParameterDescriptor{
+ Name: "reference",
+ Type: "string",
+ Format: reference.TagRegexp.String(),
+ Required: true,
+ Description: `Tag or digest of the target manifest.`,
+ }
+
+ uuidParameterDescriptor = ParameterDescriptor{
+ Name: "uuid",
+ Type: "opaque",
+ Required: true,
+ Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.",
+ }
+
+ digestPathParameter = ParameterDescriptor{
+ Name: "digest",
+ Type: "path",
+ Required: true,
+ Format: digest.DigestRegexp.String(),
+ Description: `Digest of desired blob.`,
+ }
+
+ hostHeader = ParameterDescriptor{
+ Name: "Host",
+ Type: "string",
+ Description: "Standard HTTP Host Header. Should be set to the registry host.",
+ Format: "<registry host>",
+ Examples: []string{"registry-1.docker.io"},
+ }
+
+ authHeader = ParameterDescriptor{
+ Name: "Authorization",
+ Type: "string",
+ Description: "An RFC7235 compliant authorization header.",
+ Format: "<scheme> <token>",
+ Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="},
+ }
+
+ authChallengeHeader = ParameterDescriptor{
+ Name: "WWW-Authenticate",
+ Type: "string",
+ Description: "An RFC7235 compliant authentication challenge header.",
+ Format: `<scheme> realm="<realm>", ..."`,
+ Examples: []string{
+ `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`,
+ },
+ }
+
+ contentLengthZeroHeader = ParameterDescriptor{
+ Name: "Content-Length",
+ Description: "The `Content-Length` header must be zero and the body must be empty.",
+ Type: "integer",
+ Format: "0",
+ }
+
+ dockerUploadUUIDHeader = ParameterDescriptor{
+ Name: "Docker-Upload-UUID",
+ Description: "Identifies the docker upload uuid for the current request.",
+ Type: "uuid",
+ Format: "<uuid>",
+ }
+
+ digestHeader = ParameterDescriptor{
+ Name: "Docker-Content-Digest",
+ Description: "Digest of the targeted content for the request.",
+ Type: "digest",
+ Format: "<digest>",
+ }
+
+ linkHeader = ParameterDescriptor{
+ Name: "Link",
+ Type: "link",
+ Description: "RFC5988 compliant rel='next' with URL to next result set, if available",
+ Format: `<<url>?n=<last n value>&last=<last entry from response>>; rel="next"`,
+ }
+
+ paginationParameters = []ParameterDescriptor{
+ {
+ Name: "n",
+ Type: "integer",
+ Description: "Limit the number of entries in each response. It not present, all entries will be returned.",
+ Format: "<integer>",
+ Required: false,
+ },
+ {
+ Name: "last",
+ Type: "string",
+ Description: "Result set will include values lexically after last.",
+ Format: "<integer>",
+ Required: false,
+ },
+ }
+
+ unauthorizedResponseDescriptor = ResponseDescriptor{
+ Name: "Authentication Required",
+ StatusCode: http.StatusUnauthorized,
+ Description: "The client is not authenticated.",
+ Headers: []ParameterDescriptor{
+ authChallengeHeader,
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "Length of the JSON response body.",
+ Format: "<length>",
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ ErrorCodes: []errcode.ErrorCode{
+ errcode.ErrorCodeUnauthorized,
+ },
+ }
+
+ repositoryNotFoundResponseDescriptor = ResponseDescriptor{
+ Name: "No Such Repository Error",
+ StatusCode: http.StatusNotFound,
+ Description: "The repository is not known to the registry.",
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "Length of the JSON response body.",
+ Format: "<length>",
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameUnknown,
+ },
+ }
+
+ deniedResponseDescriptor = ResponseDescriptor{
+ Name: "Access Denied",
+ StatusCode: http.StatusForbidden,
+ Description: "The client does not have required access to the repository.",
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "Length of the JSON response body.",
+ Format: "<length>",
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ ErrorCodes: []errcode.ErrorCode{
+ errcode.ErrorCodeDenied,
+ },
+ }
+
+ tooManyRequestsDescriptor = ResponseDescriptor{
+ Name: "Too Many Requests",
+ StatusCode: http.StatusTooManyRequests,
+ Description: "The client made too many requests within a time interval.",
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "Length of the JSON response body.",
+ Format: "<length>",
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ ErrorCodes: []errcode.ErrorCode{
+ errcode.ErrorCodeTooManyRequests,
+ },
+ }
+)
+
+const (
+ manifestBody = `{
+ "name": <name>,
+ "tag": <tag>,
+ "fsLayers": [
+ {
+ "blobSum": "<digest>"
+ },
+ ...
+ ]
+ ],
+ "history": <v1 images>,
+ "signature": <JWS>
+}`
+
+ errorsBody = `{
+ "errors:" [
+ {
+ "code": <error code>,
+ "message": "<error message>",
+ "detail": ...
+ },
+ ...
+ ]
+}`
+)
+
+// APIDescriptor exports descriptions of the layout of the v2 registry API.
+var APIDescriptor = struct {
+ // RouteDescriptors provides a list of the routes available in the API.
+ RouteDescriptors []RouteDescriptor
+}{
+ RouteDescriptors: routeDescriptors,
+}
+
+// RouteDescriptor describes a route specified by name.
+type RouteDescriptor struct {
+ // Name is the name of the route, as specified in RouteNameXXX exports.
+ // These names a should be considered a unique reference for a route. If
+ // the route is registered with gorilla, this is the name that will be
+ // used.
+ Name string
+
+ // Path is a gorilla/mux-compatible regexp that can be used to match the
+ // route. For any incoming method and path, only one route descriptor
+ // should match.
+ Path string
+
+ // Entity should be a short, human-readalbe description of the object
+ // targeted by the endpoint.
+ Entity string
+
+ // Description should provide an accurate overview of the functionality
+ // provided by the route.
+ Description string
+
+ // Methods should describe the various HTTP methods that may be used on
+ // this route, including request and response formats.
+ Methods []MethodDescriptor
+}
+
+// MethodDescriptor provides a description of the requests that may be
+// conducted with the target method.
+type MethodDescriptor struct {
+
+ // Method is an HTTP method, such as GET, PUT or POST.
+ Method string
+
+ // Description should provide an overview of the functionality provided by
+ // the covered method, suitable for use in documentation. Use of markdown
+ // here is encouraged.
+ Description string
+
+ // Requests is a slice of request descriptors enumerating how this
+ // endpoint may be used.
+ Requests []RequestDescriptor
+}
+
+// RequestDescriptor covers a particular set of headers and parameters that
+// can be carried out with the parent method. Its most helpful to have one
+// RequestDescriptor per API use case.
+type RequestDescriptor struct {
+ // Name provides a short identifier for the request, usable as a title or
+ // to provide quick context for the particular request.
+ Name string
+
+ // Description should cover the requests purpose, covering any details for
+ // this particular use case.
+ Description string
+
+ // Headers describes headers that must be used with the HTTP request.
+ Headers []ParameterDescriptor
+
+ // PathParameters enumerate the parameterized path components for the
+ // given request, as defined in the route's regular expression.
+ PathParameters []ParameterDescriptor
+
+ // QueryParameters provides a list of query parameters for the given
+ // request.
+ QueryParameters []ParameterDescriptor
+
+ // Body describes the format of the request body.
+ Body BodyDescriptor
+
+ // Successes enumerates the possible responses that are considered to be
+ // the result of a successful request.
+ Successes []ResponseDescriptor
+
+ // Failures covers the possible failures from this particular request.
+ Failures []ResponseDescriptor
+}
+
+// ResponseDescriptor describes the components of an API response.
+type ResponseDescriptor struct {
+ // Name provides a short identifier for the response, usable as a title or
+ // to provide quick context for the particular response.
+ Name string
+
+ // Description should provide a brief overview of the role of the
+ // response.
+ Description string
+
+ // StatusCode specifies the status received by this particular response.
+ StatusCode int
+
+ // Headers covers any headers that may be returned from the response.
+ Headers []ParameterDescriptor
+
+ // Fields describes any fields that may be present in the response.
+ Fields []ParameterDescriptor
+
+ // ErrorCodes enumerates the error codes that may be returned along with
+ // the response.
+ ErrorCodes []errcode.ErrorCode
+
+ // Body describes the body of the response, if any.
+ Body BodyDescriptor
+}
+
+// BodyDescriptor describes a request body and its expected content type. For
+// the most part, it should be example json or some placeholder for body
+// data in documentation.
+type BodyDescriptor struct {
+ ContentType string
+ Format string
+}
+
+// ParameterDescriptor describes the format of a request parameter, which may
+// be a header, path parameter or query parameter.
+type ParameterDescriptor struct {
+ // Name is the name of the parameter, either of the path component or
+ // query parameter.
+ Name string
+
+ // Type specifies the type of the parameter, such as string, integer, etc.
+ Type string
+
+ // Description provides a human-readable description of the parameter.
+ Description string
+
+ // Required means the field is required when set.
+ Required bool
+
+ // Format is a specifying the string format accepted by this parameter.
+ Format string
+
+ // Regexp is a compiled regular expression that can be used to validate
+ // the contents of the parameter.
+ Regexp *regexp.Regexp
+
+ // Examples provides multiple examples for the values that might be valid
+ // for this parameter.
+ Examples []string
+}
+
+var routeDescriptors = []RouteDescriptor{
+ {
+ Name: RouteNameBase,
+ Path: "/v2/",
+ Entity: "Base",
+ Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`,
+ Methods: []MethodDescriptor{
+ {
+ Method: "GET",
+ Description: "Check that the endpoint implements Docker Registry API V2.",
+ Requests: []RequestDescriptor{
+ {
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Description: "The API implements V2 protocol and is accessible.",
+ StatusCode: http.StatusOK,
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Description: "The registry does not implement the V2 API.",
+ StatusCode: http.StatusNotFound,
+ },
+ unauthorizedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ Name: RouteNameTags,
+ Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list",
+ Entity: "Tags",
+ Description: "Retrieve information about tags.",
+ Methods: []MethodDescriptor{
+ {
+ Method: "GET",
+ Description: "Fetch the tags under the repository identified by `name`.",
+ Requests: []RequestDescriptor{
+ {
+ Name: "Tags",
+ Description: "Return all tags for the repository",
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ StatusCode: http.StatusOK,
+ Description: "A list of tags for the named repository.",
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "Length of the JSON response body.",
+ Format: "<length>",
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: `{
+ "name": <name>,
+ "tags": [
+ <tag>,
+ ...
+ ]
+}`,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ {
+ Name: "Tags Paginated",
+ Description: "Return a portion of the tags for the specified repository.",
+ PathParameters: []ParameterDescriptor{nameParameterDescriptor},
+ QueryParameters: paginationParameters,
+ Successes: []ResponseDescriptor{
+ {
+ StatusCode: http.StatusOK,
+ Description: "A list of tags for the named repository.",
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "Length of the JSON response body.",
+ Format: "<length>",
+ },
+ linkHeader,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: `{
+ "name": <name>,
+ "tags": [
+ <tag>,
+ ...
+ ],
+}`,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ Name: RouteNameManifest,
+ Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}",
+ Entity: "Manifest",
+ Description: "Create, update, delete and retrieve manifests.",
+ Methods: []MethodDescriptor{
+ {
+ Method: "GET",
+ Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.",
+ Requests: []RequestDescriptor{
+ {
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ referenceParameterDescriptor,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.",
+ StatusCode: http.StatusOK,
+ Headers: []ParameterDescriptor{
+ digestHeader,
+ },
+ Body: BodyDescriptor{
+ ContentType: "<media type of manifest>",
+ Format: manifestBody,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Description: "The name or reference was invalid.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameInvalid,
+ ErrorCodeTagInvalid,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+ {
+ Method: "PUT",
+ Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.",
+ Requests: []RequestDescriptor{
+ {
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ referenceParameterDescriptor,
+ },
+ Body: BodyDescriptor{
+ ContentType: "<media type of manifest>",
+ Format: manifestBody,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.",
+ StatusCode: http.StatusCreated,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Location",
+ Type: "url",
+ Description: "The canonical location url of the uploaded manifest.",
+ Format: "<url>",
+ },
+ contentLengthZeroHeader,
+ digestHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Name: "Invalid Manifest",
+ Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.",
+ StatusCode: http.StatusBadRequest,
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameInvalid,
+ ErrorCodeTagInvalid,
+ ErrorCodeManifestInvalid,
+ ErrorCodeManifestUnverified,
+ ErrorCodeBlobUnknown,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ {
+ Name: "Missing Layer(s)",
+ Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeBlobUnknown,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: `{
+ "errors:" [{
+ "code": "BLOB_UNKNOWN",
+ "message": "blob unknown to registry",
+ "detail": {
+ "digest": "<digest>"
+ }
+ },
+ ...
+ ]
+}`,
+ },
+ },
+ {
+ Name: "Not allowed",
+ Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason",
+ StatusCode: http.StatusMethodNotAllowed,
+ ErrorCodes: []errcode.ErrorCode{
+ errcode.ErrorCodeUnsupported,
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ Method: "DELETE",
+ Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.",
+ Requests: []RequestDescriptor{
+ {
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ referenceParameterDescriptor,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ StatusCode: http.StatusAccepted,
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Name: "Invalid Name or Reference",
+ Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameInvalid,
+ ErrorCodeTagInvalid,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ {
+ Name: "Unknown Manifest",
+ Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.",
+ StatusCode: http.StatusNotFound,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameUnknown,
+ ErrorCodeManifestUnknown,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ Name: "Not allowed",
+ Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.",
+ StatusCode: http.StatusMethodNotAllowed,
+ ErrorCodes: []errcode.ErrorCode{
+ errcode.ErrorCodeUnsupported,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+
+ {
+ Name: RouteNameBlob,
+ Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}",
+ Entity: "Blob",
+ Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.",
+ Methods: []MethodDescriptor{
+ {
+ Method: "GET",
+ Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.",
+ Requests: []RequestDescriptor{
+ {
+ Name: "Fetch Blob",
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ digestPathParameter,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.",
+ StatusCode: http.StatusOK,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "The length of the requested blob content.",
+ Format: "<length>",
+ },
+ digestHeader,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/octet-stream",
+ Format: "<blob binary data>",
+ },
+ },
+ {
+ Description: "The blob identified by `digest` is available at the provided location.",
+ StatusCode: http.StatusTemporaryRedirect,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Location",
+ Type: "url",
+ Description: "The location where the layer should be accessible.",
+ Format: "<blob location>",
+ },
+ digestHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameInvalid,
+ ErrorCodeDigestInvalid,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ Description: "The blob, identified by `name` and `digest`, is unknown to the registry.",
+ StatusCode: http.StatusNotFound,
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameUnknown,
+ ErrorCodeBlobUnknown,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ {
+ Name: "Fetch Blob Part",
+ Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.",
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ {
+ Name: "Range",
+ Type: "string",
+ Description: "HTTP Range header specifying blob chunk.",
+ Format: "bytes=<start>-<end>",
+ },
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ digestPathParameter,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.",
+ StatusCode: http.StatusPartialContent,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "The length of the requested blob chunk.",
+ Format: "<length>",
+ },
+ {
+ Name: "Content-Range",
+ Type: "byte range",
+ Description: "Content range of blob chunk.",
+ Format: "bytes <start>-<end>/<size>",
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/octet-stream",
+ Format: "<blob binary data>",
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameInvalid,
+ ErrorCodeDigestInvalid,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ StatusCode: http.StatusNotFound,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameUnknown,
+ ErrorCodeBlobUnknown,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.",
+ StatusCode: http.StatusRequestedRangeNotSatisfiable,
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+ {
+ Method: "DELETE",
+ Description: "Delete the blob identified by `name` and `digest`",
+ Requests: []RequestDescriptor{
+ {
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ digestPathParameter,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ StatusCode: http.StatusAccepted,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "0",
+ Format: "0",
+ },
+ digestHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Name: "Invalid Name or Digest",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeDigestInvalid,
+ ErrorCodeNameInvalid,
+ },
+ },
+ {
+ Description: "The blob, identified by `name` and `digest`, is unknown to the registry.",
+ StatusCode: http.StatusNotFound,
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameUnknown,
+ ErrorCodeBlobUnknown,
+ },
+ },
+ {
+ Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled",
+ StatusCode: http.StatusMethodNotAllowed,
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ ErrorCodes: []errcode.ErrorCode{
+ errcode.ErrorCodeUnsupported,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+
+ // TODO(stevvooe): We may want to add a PUT request here to
+ // kickoff an upload of a blob, integrated with the blob upload
+ // API.
+ },
+ },
+
+ {
+ Name: RouteNameBlobUpload,
+ Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/",
+ Entity: "Initiate Blob Upload",
+ Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.",
+ Methods: []MethodDescriptor{
+ {
+ Method: "POST",
+ Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.",
+ Requests: []RequestDescriptor{
+ {
+ Name: "Initiate Monolithic Blob Upload",
+ Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.",
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Format: "<length of blob>",
+ },
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ },
+ QueryParameters: []ParameterDescriptor{
+ {
+ Name: "digest",
+ Type: "query",
+ Format: "<digest>",
+ Regexp: digest.DigestRegexp,
+ Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`,
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/octect-stream",
+ Format: "<binary data>",
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Description: "The blob has been created in the registry and is available at the provided location.",
+ StatusCode: http.StatusCreated,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Location",
+ Type: "url",
+ Format: "<blob location>",
+ },
+ contentLengthZeroHeader,
+ dockerUploadUUIDHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Name: "Invalid Name or Digest",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeDigestInvalid,
+ ErrorCodeNameInvalid,
+ },
+ },
+ {
+ Name: "Not allowed",
+ Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason",
+ StatusCode: http.StatusMethodNotAllowed,
+ ErrorCodes: []errcode.ErrorCode{
+ errcode.ErrorCodeUnsupported,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ {
+ Name: "Initiate Resumable Blob Upload",
+ Description: "Initiate a resumable blob upload with an empty request body.",
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ contentLengthZeroHeader,
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.",
+ StatusCode: http.StatusAccepted,
+ Headers: []ParameterDescriptor{
+ contentLengthZeroHeader,
+ {
+ Name: "Location",
+ Type: "url",
+ Format: "/v2/<name>/blobs/uploads/<uuid>",
+ Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.",
+ },
+ {
+ Name: "Range",
+ Format: "0-0",
+ Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.",
+ },
+ dockerUploadUUIDHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Name: "Invalid Name or Digest",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeDigestInvalid,
+ ErrorCodeNameInvalid,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ {
+ Name: "Mount Blob",
+ Description: "Mount a blob identified by the `mount` parameter from another repository.",
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ contentLengthZeroHeader,
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ },
+ QueryParameters: []ParameterDescriptor{
+ {
+ Name: "mount",
+ Type: "query",
+ Format: "<digest>",
+ Regexp: digest.DigestRegexp,
+ Description: `Digest of blob to mount from the source repository.`,
+ },
+ {
+ Name: "from",
+ Type: "query",
+ Format: "<repository name>",
+ Regexp: reference.NameRegexp,
+ Description: `Name of the source repository.`,
+ },
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Description: "The blob has been mounted in the repository and is available at the provided location.",
+ StatusCode: http.StatusCreated,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Location",
+ Type: "url",
+ Format: "<blob location>",
+ },
+ contentLengthZeroHeader,
+ dockerUploadUUIDHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Name: "Invalid Name or Digest",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeDigestInvalid,
+ ErrorCodeNameInvalid,
+ },
+ },
+ {
+ Name: "Not allowed",
+ Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason",
+ StatusCode: http.StatusMethodNotAllowed,
+ ErrorCodes: []errcode.ErrorCode{
+ errcode.ErrorCodeUnsupported,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+ },
+ },
+
+ {
+ Name: RouteNameBlobUploadChunk,
+ Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}",
+ Entity: "Blob Upload",
+ Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.",
+ Methods: []MethodDescriptor{
+ {
+ Method: "GET",
+ Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.",
+ Requests: []RequestDescriptor{
+ {
+ Description: "Retrieve the progress of the current upload, as reported by the `Range` header.",
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ uuidParameterDescriptor,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Name: "Upload Progress",
+ Description: "The upload is known and in progress. The last received offset is available in the `Range` header.",
+ StatusCode: http.StatusNoContent,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Range",
+ Type: "header",
+ Format: "0-<offset>",
+ Description: "Range indicating the current progress of the upload.",
+ },
+ contentLengthZeroHeader,
+ dockerUploadUUIDHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Description: "There was an error processing the upload and it must be restarted.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeDigestInvalid,
+ ErrorCodeNameInvalid,
+ ErrorCodeBlobUploadInvalid,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ Description: "The upload is unknown to the registry. The upload must be restarted.",
+ StatusCode: http.StatusNotFound,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeBlobUploadUnknown,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+ {
+ Method: "PATCH",
+ Description: "Upload a chunk of data for the specified upload.",
+ Requests: []RequestDescriptor{
+ {
+ Name: "Stream upload",
+ Description: "Upload a stream of data to upload without completing the upload.",
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ uuidParameterDescriptor,
+ },
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/octet-stream",
+ Format: "<binary data>",
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Name: "Data Accepted",
+ Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.",
+ StatusCode: http.StatusNoContent,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Location",
+ Type: "url",
+ Format: "/v2/<name>/blobs/uploads/<uuid>",
+ Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.",
+ },
+ {
+ Name: "Range",
+ Type: "header",
+ Format: "0-<offset>",
+ Description: "Range indicating the current progress of the upload.",
+ },
+ contentLengthZeroHeader,
+ dockerUploadUUIDHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Description: "There was an error processing the upload and it must be restarted.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeDigestInvalid,
+ ErrorCodeNameInvalid,
+ ErrorCodeBlobUploadInvalid,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ Description: "The upload is unknown to the registry. The upload must be restarted.",
+ StatusCode: http.StatusNotFound,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeBlobUploadUnknown,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ {
+ Name: "Chunked upload",
+ Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.",
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ uuidParameterDescriptor,
+ },
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ {
+ Name: "Content-Range",
+ Type: "header",
+ Format: "<start of range>-<end of range, inclusive>",
+ Required: true,
+ Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.",
+ },
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Format: "<length of chunk>",
+ Description: "Length of the chunk being uploaded, corresponding the length of the request body.",
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/octet-stream",
+ Format: "<binary chunk>",
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Name: "Chunk Accepted",
+ Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.",
+ StatusCode: http.StatusNoContent,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Location",
+ Type: "url",
+ Format: "/v2/<name>/blobs/uploads/<uuid>",
+ Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.",
+ },
+ {
+ Name: "Range",
+ Type: "header",
+ Format: "0-<offset>",
+ Description: "Range indicating the current progress of the upload.",
+ },
+ contentLengthZeroHeader,
+ dockerUploadUUIDHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Description: "There was an error processing the upload and it must be restarted.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeDigestInvalid,
+ ErrorCodeNameInvalid,
+ ErrorCodeBlobUploadInvalid,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ Description: "The upload is unknown to the registry. The upload must be restarted.",
+ StatusCode: http.StatusNotFound,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeBlobUploadUnknown,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.",
+ StatusCode: http.StatusRequestedRangeNotSatisfiable,
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+ {
+ Method: "PUT",
+ Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.",
+ Requests: []RequestDescriptor{
+ {
+ Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.",
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Format: "<length of data>",
+ Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.",
+ },
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ uuidParameterDescriptor,
+ },
+ QueryParameters: []ParameterDescriptor{
+ {
+ Name: "digest",
+ Type: "string",
+ Format: "<digest>",
+ Regexp: digest.DigestRegexp,
+ Required: true,
+ Description: `Digest of uploaded blob.`,
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/octet-stream",
+ Format: "<binary data>",
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Name: "Upload Complete",
+ Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.",
+ StatusCode: http.StatusNoContent,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Location",
+ Type: "url",
+ Format: "<blob location>",
+ Description: "The canonical location of the blob for retrieval",
+ },
+ {
+ Name: "Content-Range",
+ Type: "header",
+ Format: "<start of range>-<end of range, inclusive>",
+ Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.",
+ },
+ contentLengthZeroHeader,
+ digestHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Description: "There was an error processing the upload and it must be restarted.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeDigestInvalid,
+ ErrorCodeNameInvalid,
+ ErrorCodeBlobUploadInvalid,
+ errcode.ErrorCodeUnsupported,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ Description: "The upload is unknown to the registry. The upload must be restarted.",
+ StatusCode: http.StatusNotFound,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeBlobUploadUnknown,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+ {
+ Method: "DELETE",
+ Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.",
+ Requests: []RequestDescriptor{
+ {
+ Description: "Cancel the upload specified by `uuid`.",
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ uuidParameterDescriptor,
+ },
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ contentLengthZeroHeader,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Name: "Upload Deleted",
+ Description: "The upload has been successfully deleted.",
+ StatusCode: http.StatusNoContent,
+ Headers: []ParameterDescriptor{
+ contentLengthZeroHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Description: "An error was encountered processing the delete. The client may ignore this error.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameInvalid,
+ ErrorCodeBlobUploadInvalid,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.",
+ StatusCode: http.StatusNotFound,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeBlobUploadUnknown,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ Name: RouteNameCatalog,
+ Path: "/v2/_catalog",
+ Entity: "Catalog",
+ Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.",
+ Methods: []MethodDescriptor{
+ {
+ Method: "GET",
+ Description: "Retrieve a sorted, json list of repositories available in the registry.",
+ Requests: []RequestDescriptor{
+ {
+ Name: "Catalog Fetch",
+ Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.",
+ Successes: []ResponseDescriptor{
+ {
+ Description: "Returns the unabridged list of repositories as a json response.",
+ StatusCode: http.StatusOK,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "Length of the JSON response body.",
+ Format: "<length>",
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: `{
+ "repositories": [
+ <name>,
+ ...
+ ]
+}`,
+ },
+ },
+ },
+ },
+ {
+ Name: "Catalog Fetch Paginated",
+ Description: "Return the specified portion of repositories.",
+ QueryParameters: paginationParameters,
+ Successes: []ResponseDescriptor{
+ {
+ StatusCode: http.StatusOK,
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: `{
+ "repositories": [
+ <name>,
+ ...
+ ]
+ "next": "<url>?last=<name>&n=<last value of n>"
+}`,
+ },
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "Length of the JSON response body.",
+ Format: "<length>",
+ },
+ linkHeader,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+}
+
+var routeDescriptorsMap map[string]RouteDescriptor
+
+func init() {
+ routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors))
+
+ for _, descriptor := range routeDescriptors {
+ routeDescriptorsMap[descriptor.Name] = descriptor
+ }
+}
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/doc.go b/vendor/github.com/docker/distribution/registry/api/v2/doc.go
new file mode 100644
index 000000000..cde011959
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/v2/doc.go
@@ -0,0 +1,9 @@
+// Package v2 describes routes, urls and the error codes used in the Docker
+// Registry JSON HTTP API V2. In addition to declarations, descriptors are
+// provided for routes and error codes that can be used for implementation and
+// automatically generating documentation.
+//
+// Definitions here are considered to be locked down for the V2 registry api.
+// Any changes must be considered carefully and should not proceed without a
+// change proposal in docker core.
+package v2
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/github.com/docker/distribution/registry/api/v2/errors.go
new file mode 100644
index 000000000..97d6923aa
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/v2/errors.go
@@ -0,0 +1,136 @@
+package v2
+
+import (
+ "net/http"
+
+ "github.com/docker/distribution/registry/api/errcode"
+)
+
+const errGroup = "registry.api.v2"
+
+var (
+ // ErrorCodeDigestInvalid is returned when uploading a blob if the
+ // provided digest does not match the blob contents.
+ ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "DIGEST_INVALID",
+ Message: "provided digest did not match uploaded content",
+ Description: `When a blob is uploaded, the registry will check that
+ the content matches the digest provided by the client. The error may
+ include a detail structure with the key "digest", including the
+ invalid digest string. This error may also be returned when a manifest
+ includes an invalid layer digest.`,
+ HTTPStatusCode: http.StatusBadRequest,
+ })
+
+ // ErrorCodeSizeInvalid is returned when uploading a blob if the provided
+ ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "SIZE_INVALID",
+ Message: "provided length did not match content length",
+ Description: `When a layer is uploaded, the provided size will be
+ checked against the uploaded content. If they do not match, this error
+ will be returned.`,
+ HTTPStatusCode: http.StatusBadRequest,
+ })
+
+ // ErrorCodeNameInvalid is returned when the name in the manifest does not
+ // match the provided name.
+ ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "NAME_INVALID",
+ Message: "invalid repository name",
+ Description: `Invalid repository name encountered either during
+ manifest validation or any API operation.`,
+ HTTPStatusCode: http.StatusBadRequest,
+ })
+
+ // ErrorCodeTagInvalid is returned when the tag in the manifest does not
+ // match the provided tag.
+ ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "TAG_INVALID",
+ Message: "manifest tag did not match URI",
+ Description: `During a manifest upload, if the tag in the manifest
+ does not match the uri tag, this error will be returned.`,
+ HTTPStatusCode: http.StatusBadRequest,
+ })
+
+ // ErrorCodeNameUnknown when the repository name is not known.
+ ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "NAME_UNKNOWN",
+ Message: "repository name not known to registry",
+ Description: `This is returned if the name used during an operation is
+ unknown to the registry.`,
+ HTTPStatusCode: http.StatusNotFound,
+ })
+
+ // ErrorCodeManifestUnknown returned when image manifest is unknown.
+ ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "MANIFEST_UNKNOWN",
+ Message: "manifest unknown",
+ Description: `This error is returned when the manifest, identified by
+ name and tag is unknown to the repository.`,
+ HTTPStatusCode: http.StatusNotFound,
+ })
+
+ // ErrorCodeManifestInvalid returned when an image manifest is invalid,
+ // typically during a PUT operation. This error encompasses all errors
+ // encountered during manifest validation that aren't signature errors.
+ ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "MANIFEST_INVALID",
+ Message: "manifest invalid",
+ Description: `During upload, manifests undergo several checks ensuring
+ validity. If those checks fail, this error may be returned, unless a
+ more specific error is included. The detail will contain information
+ the failed validation.`,
+ HTTPStatusCode: http.StatusBadRequest,
+ })
+
+ // ErrorCodeManifestUnverified is returned when the manifest fails
+ // signature verification.
+ ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "MANIFEST_UNVERIFIED",
+ Message: "manifest failed signature verification",
+ Description: `During manifest upload, if the manifest fails signature
+ verification, this error will be returned.`,
+ HTTPStatusCode: http.StatusBadRequest,
+ })
+
+ // ErrorCodeManifestBlobUnknown is returned when a manifest blob is
+ // unknown to the registry.
+ ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "MANIFEST_BLOB_UNKNOWN",
+ Message: "blob unknown to registry",
+ Description: `This error may be returned when a manifest blob is
+ unknown to the registry.`,
+ HTTPStatusCode: http.StatusBadRequest,
+ })
+
+ // ErrorCodeBlobUnknown is returned when a blob is unknown to the
+ // registry. This can happen when the manifest references a nonexistent
+ // layer or the result is not found by a blob fetch.
+ ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "BLOB_UNKNOWN",
+ Message: "blob unknown to registry",
+ Description: `This error may be returned when a blob is unknown to the
+ registry in a specified repository. This can be returned with a
+ standard get or if a manifest references an unknown layer during
+ upload.`,
+ HTTPStatusCode: http.StatusNotFound,
+ })
+
+ // ErrorCodeBlobUploadUnknown is returned when an upload is unknown.
+ ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "BLOB_UPLOAD_UNKNOWN",
+ Message: "blob upload unknown to registry",
+ Description: `If a blob upload has been cancelled or was never
+ started, this error code may be returned.`,
+ HTTPStatusCode: http.StatusNotFound,
+ })
+
+ // ErrorCodeBlobUploadInvalid is returned when an upload is invalid.
+ ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "BLOB_UPLOAD_INVALID",
+ Message: "blob upload invalid",
+ Description: `The blob upload encountered an error and can no
+ longer proceed.`,
+ HTTPStatusCode: http.StatusNotFound,
+ })
+)
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go
new file mode 100644
index 000000000..9bc41a3a6
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go
@@ -0,0 +1,161 @@
+package v2
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+ "unicode"
+)
+
+var (
+ // according to rfc7230
+ reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`)
+ reQuotedValue = regexp.MustCompile(`^[^\\"]+`)
+ reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`)
+)
+
+// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains
+// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The
+// function parses only the first element of the list, which is set by the very first proxy. It returns a map
+// of corresponding key-value pairs and an unparsed slice of the input string.
+//
+// Examples of Forwarded header values:
+//
+// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown
+// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80"
+//
+// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into
+// {"for": "192.0.2.43:443", "host": "registry.example.org"}.
+func parseForwardedHeader(forwarded string) (map[string]string, string, error) {
+ // Following are states of forwarded header parser. Any state could transition to a failure.
+ const (
+ // terminating state; can transition to Parameter
+ stateElement = iota
+ // terminating state; can transition to KeyValueDelimiter
+ stateParameter
+ // can transition to Value
+ stateKeyValueDelimiter
+ // can transition to one of { QuotedValue, PairEnd }
+ stateValue
+ // can transition to one of { EscapedCharacter, PairEnd }
+ stateQuotedValue
+ // can transition to one of { QuotedValue }
+ stateEscapedCharacter
+ // terminating state; can transition to one of { Parameter, Element }
+ statePairEnd
+ )
+
+ var (
+ parameter string
+ value string
+ parse = forwarded[:]
+ res = map[string]string{}
+ state = stateElement
+ )
+
+Loop:
+ for {
+ // skip spaces unless in quoted value
+ if state != stateQuotedValue && state != stateEscapedCharacter {
+ parse = strings.TrimLeftFunc(parse, unicode.IsSpace)
+ }
+
+ if len(parse) == 0 {
+ if state != stateElement && state != statePairEnd && state != stateParameter {
+ return nil, parse, fmt.Errorf("unexpected end of input")
+ }
+ // terminating
+ break
+ }
+
+ switch state {
+ // terminate at list element delimiter
+ case stateElement:
+ if parse[0] == ',' {
+ parse = parse[1:]
+ break Loop
+ }
+ state = stateParameter
+
+ // parse parameter (the key of key-value pair)
+ case stateParameter:
+ match := reToken.FindString(parse)
+ if len(match) == 0 {
+ return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse))
+ }
+ parameter = strings.ToLower(match)
+ parse = parse[len(match):]
+ state = stateKeyValueDelimiter
+
+ // parse '='
+ case stateKeyValueDelimiter:
+ if parse[0] != '=' {
+ return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse))
+ }
+ parse = parse[1:]
+ state = stateValue
+
+ // parse value or quoted value
+ case stateValue:
+ if parse[0] == '"' {
+ parse = parse[1:]
+ state = stateQuotedValue
+ } else {
+ value = reToken.FindString(parse)
+ if len(value) == 0 {
+ return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse))
+ }
+ if _, exists := res[parameter]; exists {
+ return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse))
+ }
+ res[parameter] = value
+ parse = parse[len(value):]
+ value = ""
+ state = statePairEnd
+ }
+
+ // parse a part of quoted value until the first backslash
+ case stateQuotedValue:
+ match := reQuotedValue.FindString(parse)
+ value += match
+ parse = parse[len(match):]
+ switch {
+ case len(parse) == 0:
+ return nil, parse, fmt.Errorf("unterminated quoted string")
+ case parse[0] == '"':
+ res[parameter] = value
+ value = ""
+ parse = parse[1:]
+ state = statePairEnd
+ case parse[0] == '\\':
+ parse = parse[1:]
+ state = stateEscapedCharacter
+ }
+
+ // parse escaped character in a quoted string, ignore the backslash
+ // transition back to QuotedValue state
+ case stateEscapedCharacter:
+ c := reEscapedCharacter.FindString(parse)
+ if len(c) == 0 {
+ return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1)
+ }
+ value += c
+ parse = parse[1:]
+ state = stateQuotedValue
+
+ // expect either a new key-value pair, new list or end of input
+ case statePairEnd:
+ switch parse[0] {
+ case ';':
+ parse = parse[1:]
+ state = stateParameter
+ case ',':
+ state = stateElement
+ default:
+ return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse))
+ }
+ }
+ }
+
+ return res, parse, nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/github.com/docker/distribution/registry/api/v2/routes.go
new file mode 100644
index 000000000..5b80d5be7
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/v2/routes.go
@@ -0,0 +1,49 @@
+package v2
+
+import "github.com/gorilla/mux"
+
+// The following are definitions of the name under which all V2 routes are
+// registered. These symbols can be used to look up a route based on the name.
+const (
+ RouteNameBase = "base"
+ RouteNameManifest = "manifest"
+ RouteNameTags = "tags"
+ RouteNameBlob = "blob"
+ RouteNameBlobUpload = "blob-upload"
+ RouteNameBlobUploadChunk = "blob-upload-chunk"
+ RouteNameCatalog = "catalog"
+)
+
+var allEndpoints = []string{
+ RouteNameManifest,
+ RouteNameCatalog,
+ RouteNameTags,
+ RouteNameBlob,
+ RouteNameBlobUpload,
+ RouteNameBlobUploadChunk,
+}
+
+// Router builds a gorilla router with named routes for the various API
+// methods. This can be used directly by both server implementations and
+// clients.
+func Router() *mux.Router {
+ return RouterWithPrefix("")
+}
+
+// RouterWithPrefix builds a gorilla router with a configured prefix
+// on all routes.
+func RouterWithPrefix(prefix string) *mux.Router {
+ rootRouter := mux.NewRouter()
+ router := rootRouter
+ if prefix != "" {
+ router = router.PathPrefix(prefix).Subrouter()
+ }
+
+ router.StrictSlash(true)
+
+ for _, descriptor := range routeDescriptors {
+ router.Path(descriptor.Path).Name(descriptor.Name)
+ }
+
+ return rootRouter
+}
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go
new file mode 100644
index 000000000..1337bdb12
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/v2/urls.go
@@ -0,0 +1,266 @@
+package v2
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/docker/distribution/reference"
+ "github.com/gorilla/mux"
+)
+
+// URLBuilder creates registry API urls from a single base endpoint. It can be
+// used to create urls for use in a registry client or server.
+//
+// All urls will be created from the given base, including the api version.
+// For example, if a root of "/foo/" is provided, urls generated will be fall
+// under "/foo/v2/...". Most application will only provide a schema, host and
+// port, such as "https://localhost:5000/".
+type URLBuilder struct {
+ root *url.URL // url root (ie http://localhost/)
+ router *mux.Router
+ relative bool
+}
+
+// NewURLBuilder creates a URLBuilder with provided root url object.
+func NewURLBuilder(root *url.URL, relative bool) *URLBuilder {
+ return &URLBuilder{
+ root: root,
+ router: Router(),
+ relative: relative,
+ }
+}
+
+// NewURLBuilderFromString workes identically to NewURLBuilder except it takes
+// a string argument for the root, returning an error if it is not a valid
+// url.
+func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) {
+ u, err := url.Parse(root)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewURLBuilder(u, relative), nil
+}
+
+// NewURLBuilderFromRequest uses information from an *http.Request to
+// construct the root url.
+func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder {
+ var (
+ scheme = "http"
+ host = r.Host
+ )
+
+ if r.TLS != nil {
+ scheme = "https"
+ } else if len(r.URL.Scheme) > 0 {
+ scheme = r.URL.Scheme
+ }
+
+ // Handle fowarded headers
+ // Prefer "Forwarded" header as defined by rfc7239 if given
+ // see https://tools.ietf.org/html/rfc7239
+ if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 {
+ forwardedHeader, _, err := parseForwardedHeader(forwarded)
+ if err == nil {
+ if fproto := forwardedHeader["proto"]; len(fproto) > 0 {
+ scheme = fproto
+ }
+ if fhost := forwardedHeader["host"]; len(fhost) > 0 {
+ host = fhost
+ }
+ }
+ } else {
+ if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 {
+ scheme = forwardedProto
+ }
+ if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 {
+ // According to the Apache mod_proxy docs, X-Forwarded-Host can be a
+ // comma-separated list of hosts, to which each proxy appends the
+ // requested host. We want to grab the first from this comma-separated
+ // list.
+ hosts := strings.SplitN(forwardedHost, ",", 2)
+ host = strings.TrimSpace(hosts[0])
+ }
+ }
+
+ basePath := routeDescriptorsMap[RouteNameBase].Path
+
+ requestPath := r.URL.Path
+ index := strings.Index(requestPath, basePath)
+
+ u := &url.URL{
+ Scheme: scheme,
+ Host: host,
+ }
+
+ if index > 0 {
+ // N.B. index+1 is important because we want to include the trailing /
+ u.Path = requestPath[0 : index+1]
+ }
+
+ return NewURLBuilder(u, relative)
+}
+
+// BuildBaseURL constructs a base url for the API, typically just "/v2/".
+func (ub *URLBuilder) BuildBaseURL() (string, error) {
+ route := ub.cloneRoute(RouteNameBase)
+
+ baseURL, err := route.URL()
+ if err != nil {
+ return "", err
+ }
+
+ return baseURL.String(), nil
+}
+
+// BuildCatalogURL constructs a url get a catalog of repositories
+func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) {
+ route := ub.cloneRoute(RouteNameCatalog)
+
+ catalogURL, err := route.URL()
+ if err != nil {
+ return "", err
+ }
+
+ return appendValuesURL(catalogURL, values...).String(), nil
+}
+
+// BuildTagsURL constructs a url to list the tags in the named repository.
+func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) {
+ route := ub.cloneRoute(RouteNameTags)
+
+ tagsURL, err := route.URL("name", name.Name())
+ if err != nil {
+ return "", err
+ }
+
+ return tagsURL.String(), nil
+}
+
+// BuildManifestURL constructs a url for the manifest identified by name and
+// reference. The argument reference may be either a tag or digest.
+func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) {
+ route := ub.cloneRoute(RouteNameManifest)
+
+ tagOrDigest := ""
+ switch v := ref.(type) {
+ case reference.Tagged:
+ tagOrDigest = v.Tag()
+ case reference.Digested:
+ tagOrDigest = v.Digest().String()
+ default:
+ return "", fmt.Errorf("reference must have a tag or digest")
+ }
+
+ manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest)
+ if err != nil {
+ return "", err
+ }
+
+ return manifestURL.String(), nil
+}
+
+// BuildBlobURL constructs the url for the blob identified by name and dgst.
+func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) {
+ route := ub.cloneRoute(RouteNameBlob)
+
+ layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String())
+ if err != nil {
+ return "", err
+ }
+
+ return layerURL.String(), nil
+}
+
+// BuildBlobUploadURL constructs a url to begin a blob upload in the
+// repository identified by name.
+func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) {
+ route := ub.cloneRoute(RouteNameBlobUpload)
+
+ uploadURL, err := route.URL("name", name.Name())
+ if err != nil {
+ return "", err
+ }
+
+ return appendValuesURL(uploadURL, values...).String(), nil
+}
+
+// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid,
+// including any url values. This should generally not be used by clients, as
+// this url is provided by server implementations during the blob upload
+// process.
+func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) {
+ route := ub.cloneRoute(RouteNameBlobUploadChunk)
+
+ uploadURL, err := route.URL("name", name.Name(), "uuid", uuid)
+ if err != nil {
+ return "", err
+ }
+
+ return appendValuesURL(uploadURL, values...).String(), nil
+}
+
+// clondedRoute returns a clone of the named route from the router. Routes
+// must be cloned to avoid modifying them during url generation.
+func (ub *URLBuilder) cloneRoute(name string) clonedRoute {
+ route := new(mux.Route)
+ root := new(url.URL)
+
+ *route = *ub.router.GetRoute(name) // clone the route
+ *root = *ub.root
+
+ return clonedRoute{Route: route, root: root, relative: ub.relative}
+}
+
+type clonedRoute struct {
+ *mux.Route
+ root *url.URL
+ relative bool
+}
+
+func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) {
+ routeURL, err := cr.Route.URL(pairs...)
+ if err != nil {
+ return nil, err
+ }
+
+ if cr.relative {
+ return routeURL, nil
+ }
+
+ if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" {
+ routeURL.Path = routeURL.Path[1:]
+ }
+
+ url := cr.root.ResolveReference(routeURL)
+ url.Scheme = cr.root.Scheme
+ return url, nil
+}
+
+// appendValuesURL appends the parameters to the url.
+func appendValuesURL(u *url.URL, values ...url.Values) *url.URL {
+ merged := u.Query()
+
+ for _, v := range values {
+ for k, vv := range v {
+ merged[k] = append(merged[k], vv...)
+ }
+ }
+
+ u.RawQuery = merged.Encode()
+ return u
+}
+
+// appendValues appends the parameters to the url. Panics if the string is not
+// a url.
+func appendValues(u string, values ...url.Values) string {
+ up, err := url.Parse(u)
+
+ if err != nil {
+ panic(err) // should never happen
+ }
+
+ return appendValuesURL(up, values...).String()
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
new file mode 100644
index 000000000..2c3ebe165
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
@@ -0,0 +1,27 @@
+package challenge
+
+import (
+ "net/url"
+ "strings"
+)
+
+// FROM: https://golang.org/src/net/http/http.go
+// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
+// return true if the string includes a port.
+func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
+
+// FROM: http://golang.org/src/net/http/transport.go
+var portMap = map[string]string{
+ "http": "80",
+ "https": "443",
+}
+
+// canonicalAddr returns url.Host but always with a ":port" suffix
+// FROM: http://golang.org/src/net/http/transport.go
+func canonicalAddr(url *url.URL) string {
+ addr := url.Host
+ if !hasPort(addr) {
+ return addr + ":" + portMap[url.Scheme]
+ }
+ return addr
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
new file mode 100644
index 000000000..c9bdfc355
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
@@ -0,0 +1,237 @@
+package challenge
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+)
+
+// Challenge carries information from a WWW-Authenticate response header.
+// See RFC 2617.
+type Challenge struct {
+ // Scheme is the auth-scheme according to RFC 2617
+ Scheme string
+
+ // Parameters are the auth-params according to RFC 2617
+ Parameters map[string]string
+}
+
+// Manager manages the challenges for endpoints.
+// The challenges are pulled out of HTTP responses. Only
+// responses which expect challenges should be added to
+// the manager, since a non-unauthorized request will be
+// viewed as not requiring challenges.
+type Manager interface {
+ // GetChallenges returns the challenges for the given
+ // endpoint URL.
+ GetChallenges(endpoint url.URL) ([]Challenge, error)
+
+ // AddResponse adds the response to the challenge
+ // manager. The challenges will be parsed out of
+ // the WWW-Authenicate headers and added to the
+ // URL which was produced the response. If the
+ // response was authorized, any challenges for the
+ // endpoint will be cleared.
+ AddResponse(resp *http.Response) error
+}
+
+// NewSimpleManager returns an instance of
+// Manger which only maps endpoints to challenges
+// based on the responses which have been added the
+// manager. The simple manager will make no attempt to
+// perform requests on the endpoints or cache the responses
+// to a backend.
+func NewSimpleManager() Manager {
+ return &simpleManager{
+ Challanges: make(map[string][]Challenge),
+ }
+}
+
+type simpleManager struct {
+ sync.RWMutex
+ Challanges map[string][]Challenge
+}
+
+func normalizeURL(endpoint *url.URL) {
+ endpoint.Host = strings.ToLower(endpoint.Host)
+ endpoint.Host = canonicalAddr(endpoint)
+}
+
+func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
+ normalizeURL(&endpoint)
+
+ m.RLock()
+ defer m.RUnlock()
+ challenges := m.Challanges[endpoint.String()]
+ return challenges, nil
+}
+
+func (m *simpleManager) AddResponse(resp *http.Response) error {
+ challenges := ResponseChallenges(resp)
+ if resp.Request == nil {
+ return fmt.Errorf("missing request reference")
+ }
+ urlCopy := url.URL{
+ Path: resp.Request.URL.Path,
+ Host: resp.Request.URL.Host,
+ Scheme: resp.Request.URL.Scheme,
+ }
+ normalizeURL(&urlCopy)
+
+ m.Lock()
+ defer m.Unlock()
+ m.Challanges[urlCopy.String()] = challenges
+ return nil
+}
+
+// Octet types from RFC 2616.
+type octetType byte
+
+var octetTypes [256]octetType
+
+const (
+ isToken octetType = 1 << iota
+ isSpace
+)
+
+func init() {
+ // OCTET = <any 8-bit sequence of data>
+ // CHAR = <any US-ASCII character (octets 0 - 127)>
+ // CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
+ // CR = <US-ASCII CR, carriage return (13)>
+ // LF = <US-ASCII LF, linefeed (10)>
+ // SP = <US-ASCII SP, space (32)>
+ // HT = <US-ASCII HT, horizontal-tab (9)>
+ // <"> = <US-ASCII double-quote mark (34)>
+ // CRLF = CR LF
+ // LWS = [CRLF] 1*( SP | HT )
+ // TEXT = <any OCTET except CTLs, but including LWS>
+ // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+ // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+ // token = 1*<any CHAR except CTLs or separators>
+ // qdtext = <any TEXT except <">>
+
+ for c := 0; c < 256; c++ {
+ var t octetType
+ isCtl := c <= 31 || c == 127
+ isChar := 0 <= c && c <= 127
+ isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
+ if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
+ t |= isSpace
+ }
+ if isChar && !isCtl && !isSeparator {
+ t |= isToken
+ }
+ octetTypes[c] = t
+ }
+}
+
+// ResponseChallenges returns a list of authorization challenges
+// for the given http Response. Challenges are only checked if
+// the response status code was a 401.
+func ResponseChallenges(resp *http.Response) []Challenge {
+ if resp.StatusCode == http.StatusUnauthorized {
+ // Parse the WWW-Authenticate Header and store the challenges
+ // on this endpoint object.
+ return parseAuthHeader(resp.Header)
+ }
+
+ return nil
+}
+
+func parseAuthHeader(header http.Header) []Challenge {
+ challenges := []Challenge{}
+ for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
+ v, p := parseValueAndParams(h)
+ if v != "" {
+ challenges = append(challenges, Challenge{Scheme: v, Parameters: p})
+ }
+ }
+ return challenges
+}
+
+func parseValueAndParams(header string) (value string, params map[string]string) {
+ params = make(map[string]string)
+ value, s := expectToken(header)
+ if value == "" {
+ return
+ }
+ value = strings.ToLower(value)
+ s = "," + skipSpace(s)
+ for strings.HasPrefix(s, ",") {
+ var pkey string
+ pkey, s = expectToken(skipSpace(s[1:]))
+ if pkey == "" {
+ return
+ }
+ if !strings.HasPrefix(s, "=") {
+ return
+ }
+ var pvalue string
+ pvalue, s = expectTokenOrQuoted(s[1:])
+ if pvalue == "" {
+ return
+ }
+ pkey = strings.ToLower(pkey)
+ params[pkey] = pvalue
+ s = skipSpace(s)
+ }
+ return
+}
+
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isSpace == 0 {
+ break
+ }
+ }
+ return s[i:]
+}
+
+func expectToken(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isToken == 0 {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+func expectTokenOrQuoted(s string) (value string, rest string) {
+ if !strings.HasPrefix(s, "\"") {
+ return expectToken(s)
+ }
+ s = s[1:]
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '"':
+ return s[:i], s[i+1:]
+ case '\\':
+ p := make([]byte, len(s)-1)
+ j := copy(p, s[:i])
+ escape := true
+ for i = i + 1; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case escape:
+ escape = false
+ p[j] = b
+ j++
+ case b == '\\':
+ escape = true
+ case b == '"':
+ return string(p[:j]), s[i+1:]
+ default:
+ p[j] = b
+ j++
+ }
+ }
+ return "", ""
+ }
+ }
+ return "", ""
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go
new file mode 100644
index 000000000..e3ffcb00f
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/blob_writer.go
@@ -0,0 +1,162 @@
+package client
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "time"
+
+ "github.com/docker/distribution"
+ "github.com/docker/distribution/context"
+)
+
+type httpBlobUpload struct {
+ statter distribution.BlobStatter
+ client *http.Client
+
+ uuid string
+ startedAt time.Time
+
+ location string // always the last value of the location header.
+ offset int64
+ closed bool
+}
+
+func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) {
+ panic("Not implemented")
+}
+
+func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error {
+ if resp.StatusCode == http.StatusNotFound {
+ return distribution.ErrBlobUploadUnknown
+ }
+ return HandleErrorResponse(resp)
+}
+
+func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) {
+ req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r))
+ if err != nil {
+ return 0, err
+ }
+ defer req.Body.Close()
+
+ resp, err := hbu.client.Do(req)
+ if err != nil {
+ return 0, err
+ }
+
+ if !SuccessStatus(resp.StatusCode) {
+ return 0, hbu.handleErrorResponse(resp)
+ }
+
+ hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
+ hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
+ if err != nil {
+ return 0, err
+ }
+ rng := resp.Header.Get("Range")
+ var start, end int64
+ if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
+ return 0, err
+ } else if n != 2 || end < start {
+ return 0, fmt.Errorf("bad range format: %s", rng)
+ }
+
+ return (end - start + 1), nil
+
+}
+
+func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) {
+ req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p))
+ if err != nil {
+ return 0, err
+ }
+ req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1)))
+ req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p)))
+ req.Header.Set("Content-Type", "application/octet-stream")
+
+ resp, err := hbu.client.Do(req)
+ if err != nil {
+ return 0, err
+ }
+
+ if !SuccessStatus(resp.StatusCode) {
+ return 0, hbu.handleErrorResponse(resp)
+ }
+
+ hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
+ hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
+ if err != nil {
+ return 0, err
+ }
+ rng := resp.Header.Get("Range")
+ var start, end int
+ if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
+ return 0, err
+ } else if n != 2 || end < start {
+ return 0, fmt.Errorf("bad range format: %s", rng)
+ }
+
+ return (end - start + 1), nil
+
+}
+
+func (hbu *httpBlobUpload) Size() int64 {
+ return hbu.offset
+}
+
+func (hbu *httpBlobUpload) ID() string {
+ return hbu.uuid
+}
+
+func (hbu *httpBlobUpload) StartedAt() time.Time {
+ return hbu.startedAt
+}
+
+func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
+ // TODO(dmcgowan): Check if already finished, if so just fetch
+ req, err := http.NewRequest("PUT", hbu.location, nil)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+
+ values := req.URL.Query()
+ values.Set("digest", desc.Digest.String())
+ req.URL.RawQuery = values.Encode()
+
+ resp, err := hbu.client.Do(req)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ defer resp.Body.Close()
+
+ if !SuccessStatus(resp.StatusCode) {
+ return distribution.Descriptor{}, hbu.handleErrorResponse(resp)
+ }
+
+ return hbu.statter.Stat(ctx, desc.Digest)
+}
+
+func (hbu *httpBlobUpload) Cancel(ctx context.Context) error {
+ req, err := http.NewRequest("DELETE", hbu.location, nil)
+ if err != nil {
+ return err
+ }
+ resp, err := hbu.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) {
+ return nil
+ }
+ return hbu.handleErrorResponse(resp)
+}
+
+func (hbu *httpBlobUpload) Close() error {
+ hbu.closed = true
+ return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go
new file mode 100644
index 000000000..52d49d5d2
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/errors.go
@@ -0,0 +1,139 @@
+package client
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/distribution/registry/api/errcode"
+ "github.com/docker/distribution/registry/client/auth/challenge"
+)
+
+// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty
+// errcode.Errors slice.
+var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body")
+
+// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is
+// returned when making a registry api call.
+type UnexpectedHTTPStatusError struct {
+ Status string
+}
+
+func (e *UnexpectedHTTPStatusError) Error() string {
+ return fmt.Sprintf("received unexpected HTTP status: %s", e.Status)
+}
+
+// UnexpectedHTTPResponseError is returned when an expected HTTP status code
+// is returned, but the content was unexpected and failed to be parsed.
+type UnexpectedHTTPResponseError struct {
+ ParseErr error
+ StatusCode int
+ Response []byte
+}
+
+func (e *UnexpectedHTTPResponseError) Error() string {
+ return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response))
+}
+
+func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
+ var errors errcode.Errors
+ body, err := ioutil.ReadAll(r)
+ if err != nil {
+ return err
+ }
+
+ // For backward compatibility, handle irregularly formatted
+ // messages that contain a "details" field.
+ var detailsErr struct {
+ Details string `json:"details"`
+ }
+ err = json.Unmarshal(body, &detailsErr)
+ if err == nil && detailsErr.Details != "" {
+ switch statusCode {
+ case http.StatusUnauthorized:
+ return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details)
+ case http.StatusTooManyRequests:
+ return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details)
+ default:
+ return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details)
+ }
+ }
+
+ if err := json.Unmarshal(body, &errors); err != nil {
+ return &UnexpectedHTTPResponseError{
+ ParseErr: err,
+ StatusCode: statusCode,
+ Response: body,
+ }
+ }
+
+ if len(errors) == 0 {
+ // If there was no error specified in the body, return
+ // UnexpectedHTTPResponseError.
+ return &UnexpectedHTTPResponseError{
+ ParseErr: ErrNoErrorsInBody,
+ StatusCode: statusCode,
+ Response: body,
+ }
+ }
+
+ return errors
+}
+
+func makeErrorList(err error) []error {
+ if errL, ok := err.(errcode.Errors); ok {
+ return []error(errL)
+ }
+ return []error{err}
+}
+
+func mergeErrors(err1, err2 error) error {
+ return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...))
+}
+
+// HandleErrorResponse returns error parsed from HTTP response for an
+// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
+// UnexpectedHTTPStatusError returned for response code outside of expected
+// range.
+func HandleErrorResponse(resp *http.Response) error {
+ if resp.StatusCode >= 400 && resp.StatusCode < 500 {
+ // Check for OAuth errors within the `WWW-Authenticate` header first
+ // See https://tools.ietf.org/html/rfc6750#section-3
+ for _, c := range challenge.ResponseChallenges(resp) {
+ if c.Scheme == "bearer" {
+ var err errcode.Error
+ // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
+ switch c.Parameters["error"] {
+ case "invalid_token":
+ err.Code = errcode.ErrorCodeUnauthorized
+ case "insufficient_scope":
+ err.Code = errcode.ErrorCodeDenied
+ default:
+ continue
+ }
+ if description := c.Parameters["error_description"]; description != "" {
+ err.Message = description
+ } else {
+ err.Message = err.Code.Message()
+ }
+
+ return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
+ }
+ }
+ err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
+ if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
+ return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
+ }
+ return err
+ }
+ return &UnexpectedHTTPStatusError{Status: resp.Status}
+}
+
+// SuccessStatus returns true if the argument is a successful HTTP response
+// code (in the range 200 - 399 inclusive).
+func SuccessStatus(status int) bool {
+ return status >= 200 && status <= 399
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go
new file mode 100644
index 000000000..b82a968e2
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/repository.go
@@ -0,0 +1,853 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/docker/distribution"
+ "github.com/docker/distribution/context"
+ "github.com/docker/distribution/reference"
+ "github.com/docker/distribution/registry/api/v2"
+ "github.com/docker/distribution/registry/client/transport"
+ "github.com/docker/distribution/registry/storage/cache"
+ "github.com/docker/distribution/registry/storage/cache/memory"
+ "github.com/opencontainers/go-digest"
+)
+
+// Registry provides an interface for calling Repositories, which returns a catalog of repositories.
+type Registry interface {
+ Repositories(ctx context.Context, repos []string, last string) (n int, err error)
+}
+
+// checkHTTPRedirect is a callback that can manipulate redirected HTTP
+// requests. It is used to preserve Accept and Range headers.
+func checkHTTPRedirect(req *http.Request, via []*http.Request) error {
+ if len(via) >= 10 {
+ return errors.New("stopped after 10 redirects")
+ }
+
+ if len(via) > 0 {
+ for headerName, headerVals := range via[0].Header {
+ if headerName != "Accept" && headerName != "Range" {
+ continue
+ }
+ for _, val := range headerVals {
+ // Don't add to redirected request if redirected
+ // request already has a header with the same
+ // name and value.
+ hasValue := false
+ for _, existingVal := range req.Header[headerName] {
+ if existingVal == val {
+ hasValue = true
+ break
+ }
+ }
+ if !hasValue {
+ req.Header.Add(headerName, val)
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// NewRegistry creates a registry namespace which can be used to get a listing of repositories
+func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) {
+ ub, err := v2.NewURLBuilderFromString(baseURL, false)
+ if err != nil {
+ return nil, err
+ }
+
+ client := &http.Client{
+ Transport: transport,
+ Timeout: 1 * time.Minute,
+ CheckRedirect: checkHTTPRedirect,
+ }
+
+ return &registry{
+ client: client,
+ ub: ub,
+ context: ctx,
+ }, nil
+}
+
+type registry struct {
+ client *http.Client
+ ub *v2.URLBuilder
+ context context.Context
+}
+
+// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size
+// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there
+// are no more entries
+func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) {
+ var numFilled int
+ var returnErr error
+
+ values := buildCatalogValues(len(entries), last)
+ u, err := r.ub.BuildCatalogURL(values)
+ if err != nil {
+ return 0, err
+ }
+
+ resp, err := r.client.Get(u)
+ if err != nil {
+ return 0, err
+ }
+ defer resp.Body.Close()
+
+ if SuccessStatus(resp.StatusCode) {
+ var ctlg struct {
+ Repositories []string `json:"repositories"`
+ }
+ decoder := json.NewDecoder(resp.Body)
+
+ if err := decoder.Decode(&ctlg); err != nil {
+ return 0, err
+ }
+
+ for cnt := range ctlg.Repositories {
+ entries[cnt] = ctlg.Repositories[cnt]
+ }
+ numFilled = len(ctlg.Repositories)
+
+ link := resp.Header.Get("Link")
+ if link == "" {
+ returnErr = io.EOF
+ }
+ } else {
+ return 0, HandleErrorResponse(resp)
+ }
+
+ return numFilled, returnErr
+}
+
+// NewRepository creates a new Repository for the given repository name and base URL.
+func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
+ ub, err := v2.NewURLBuilderFromString(baseURL, false)
+ if err != nil {
+ return nil, err
+ }
+
+ client := &http.Client{
+ Transport: transport,
+ CheckRedirect: checkHTTPRedirect,
+ // TODO(dmcgowan): create cookie jar
+ }
+
+ return &repository{
+ client: client,
+ ub: ub,
+ name: name,
+ context: ctx,
+ }, nil
+}
+
+type repository struct {
+ client *http.Client
+ ub *v2.URLBuilder
+ context context.Context
+ name reference.Named
+}
+
+func (r *repository) Named() reference.Named {
+ return r.name
+}
+
+func (r *repository) Blobs(ctx context.Context) distribution.BlobStore {
+ statter := &blobStatter{
+ name: r.name,
+ ub: r.ub,
+ client: r.client,
+ }
+ return &blobs{
+ name: r.name,
+ ub: r.ub,
+ client: r.client,
+ statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter),
+ }
+}
+
+func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
+ // todo(richardscothern): options should be sent over the wire
+ return &manifests{
+ name: r.name,
+ ub: r.ub,
+ client: r.client,
+ etags: make(map[string]string),
+ }, nil
+}
+
+func (r *repository) Tags(ctx context.Context) distribution.TagService {
+ return &tags{
+ client: r.client,
+ ub: r.ub,
+ context: r.context,
+ name: r.Named(),
+ }
+}
+
+// tags implements remote tagging operations.
+type tags struct {
+ client *http.Client
+ ub *v2.URLBuilder
+ context context.Context
+ name reference.Named
+}
+
+// All returns all tags
+func (t *tags) All(ctx context.Context) ([]string, error) {
+ var tags []string
+
+ u, err := t.ub.BuildTagsURL(t.name)
+ if err != nil {
+ return tags, err
+ }
+
+ for {
+ resp, err := t.client.Get(u)
+ if err != nil {
+ return tags, err
+ }
+ defer resp.Body.Close()
+
+ if SuccessStatus(resp.StatusCode) {
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return tags, err
+ }
+
+ tagsResponse := struct {
+ Tags []string `json:"tags"`
+ }{}
+ if err := json.Unmarshal(b, &tagsResponse); err != nil {
+ return tags, err
+ }
+ tags = append(tags, tagsResponse.Tags...)
+ if link := resp.Header.Get("Link"); link != "" {
+ u = strings.Trim(strings.Split(link, ";")[0], "<>")
+ } else {
+ return tags, nil
+ }
+ } else {
+ return tags, HandleErrorResponse(resp)
+ }
+ }
+}
+
+func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) {
+ desc := distribution.Descriptor{}
+ headers := response.Header
+
+ ctHeader := headers.Get("Content-Type")
+ if ctHeader == "" {
+ return distribution.Descriptor{}, errors.New("missing or empty Content-Type header")
+ }
+ desc.MediaType = ctHeader
+
+ digestHeader := headers.Get("Docker-Content-Digest")
+ if digestHeader == "" {
+ bytes, err := ioutil.ReadAll(response.Body)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ return desc, nil
+ }
+
+ dgst, err := digest.Parse(digestHeader)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ desc.Digest = dgst
+
+ lengthHeader := headers.Get("Content-Length")
+ if lengthHeader == "" {
+ return distribution.Descriptor{}, errors.New("missing or empty Content-Length header")
+ }
+ length, err := strconv.ParseInt(lengthHeader, 10, 64)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ desc.Size = length
+
+ return desc, nil
+
+}
+
+// Get issues a HEAD request for a Manifest against its named endpoint in order
+// to construct a descriptor for the tag. If the registry doesn't support HEADing
+// a manifest, fallback to GET.
+func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
+ ref, err := reference.WithTag(t.name, tag)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ u, err := t.ub.BuildManifestURL(ref)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+
+ newRequest := func(method string) (*http.Response, error) {
+ req, err := http.NewRequest(method, u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, t := range distribution.ManifestMediaTypes() {
+ req.Header.Add("Accept", t)
+ }
+ resp, err := t.client.Do(req)
+ return resp, err
+ }
+
+ resp, err := newRequest("HEAD")
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ defer resp.Body.Close()
+
+ switch {
+ case resp.StatusCode >= 200 && resp.StatusCode < 400:
+ return descriptorFromResponse(resp)
+ default:
+ // if the response is an error - there will be no body to decode.
+ // Issue a GET request:
+ // - for data from a server that does not handle HEAD
+ // - to get error details in case of a failure
+ resp, err = newRequest("GET")
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= 200 && resp.StatusCode < 400 {
+ return descriptorFromResponse(resp)
+ }
+ return distribution.Descriptor{}, HandleErrorResponse(resp)
+ }
+}
+
+func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) {
+ panic("not implemented")
+}
+
+func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error {
+ panic("not implemented")
+}
+
+func (t *tags) Untag(ctx context.Context, tag string) error {
+ panic("not implemented")
+}
+
+type manifests struct {
+ name reference.Named
+ ub *v2.URLBuilder
+ client *http.Client
+ etags map[string]string
+}
+
+func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
+ ref, err := reference.WithDigest(ms.name, dgst)
+ if err != nil {
+ return false, err
+ }
+ u, err := ms.ub.BuildManifestURL(ref)
+ if err != nil {
+ return false, err
+ }
+
+ resp, err := ms.client.Head(u)
+ if err != nil {
+ return false, err
+ }
+
+ if SuccessStatus(resp.StatusCode) {
+ return true, nil
+ } else if resp.StatusCode == http.StatusNotFound {
+ return false, nil
+ }
+ return false, HandleErrorResponse(resp)
+}
+
+// AddEtagToTag allows a client to supply an eTag to Get which will be
+// used for a conditional HTTP request. If the eTag matches, a nil manifest
+// and ErrManifestNotModified error will be returned. etag is automatically
+// quoted when added to this map.
+func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption {
+ return etagOption{tag, etag}
+}
+
+type etagOption struct{ tag, etag string }
+
+func (o etagOption) Apply(ms distribution.ManifestService) error {
+ if ms, ok := ms.(*manifests); ok {
+ ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag)
+ return nil
+ }
+ return fmt.Errorf("etag options is a client-only option")
+}
+
+// ReturnContentDigest allows a client to set a the content digest on
+// a successful request from the 'Docker-Content-Digest' header. This
+// returned digest is represents the digest which the registry uses
+// to refer to the content and can be used to delete the content.
+func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption {
+ return contentDigestOption{dgst}
+}
+
+type contentDigestOption struct{ digest *digest.Digest }
+
+func (o contentDigestOption) Apply(ms distribution.ManifestService) error {
+ return nil
+}
+
+func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
+ var (
+ digestOrTag string
+ ref reference.Named
+ err error
+ contentDgst *digest.Digest
+ )
+
+ for _, option := range options {
+ if opt, ok := option.(distribution.WithTagOption); ok {
+ digestOrTag = opt.Tag
+ ref, err = reference.WithTag(ms.name, opt.Tag)
+ if err != nil {
+ return nil, err
+ }
+ } else if opt, ok := option.(contentDigestOption); ok {
+ contentDgst = opt.digest
+ } else {
+ err := option.Apply(ms)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if digestOrTag == "" {
+ digestOrTag = dgst.String()
+ ref, err = reference.WithDigest(ms.name, dgst)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ u, err := ms.ub.BuildManifestURL(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ req, err := http.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, t := range distribution.ManifestMediaTypes() {
+ req.Header.Add("Accept", t)
+ }
+
+ if _, ok := ms.etags[digestOrTag]; ok {
+ req.Header.Set("If-None-Match", ms.etags[digestOrTag])
+ }
+
+ resp, err := ms.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode == http.StatusNotModified {
+ return nil, distribution.ErrManifestNotModified
+ } else if SuccessStatus(resp.StatusCode) {
+ if contentDgst != nil {
+ dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest"))
+ if err == nil {
+ *contentDgst = dgst
+ }
+ }
+ mt := resp.Header.Get("Content-Type")
+ body, err := ioutil.ReadAll(resp.Body)
+
+ if err != nil {
+ return nil, err
+ }
+ m, _, err := distribution.UnmarshalManifest(mt, body)
+ if err != nil {
+ return nil, err
+ }
+ return m, nil
+ }
+ return nil, HandleErrorResponse(resp)
+}
+
+// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the
+// tag name in order to build the correct upload URL.
+func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
+ ref := ms.name
+ var tagged bool
+
+ for _, option := range options {
+ if opt, ok := option.(distribution.WithTagOption); ok {
+ var err error
+ ref, err = reference.WithTag(ref, opt.Tag)
+ if err != nil {
+ return "", err
+ }
+ tagged = true
+ } else {
+ err := option.Apply(ms)
+ if err != nil {
+ return "", err
+ }
+ }
+ }
+ mediaType, p, err := m.Payload()
+ if err != nil {
+ return "", err
+ }
+
+ if !tagged {
+ // generate a canonical digest and Put by digest
+ _, d, err := distribution.UnmarshalManifest(mediaType, p)
+ if err != nil {
+ return "", err
+ }
+ ref, err = reference.WithDigest(ref, d.Digest)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ manifestURL, err := ms.ub.BuildManifestURL(ref)
+ if err != nil {
+ return "", err
+ }
+
+ putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p))
+ if err != nil {
+ return "", err
+ }
+
+ putRequest.Header.Set("Content-Type", mediaType)
+
+ resp, err := ms.client.Do(putRequest)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ if SuccessStatus(resp.StatusCode) {
+ dgstHeader := resp.Header.Get("Docker-Content-Digest")
+ dgst, err := digest.Parse(dgstHeader)
+ if err != nil {
+ return "", err
+ }
+
+ return dgst, nil
+ }
+
+ return "", HandleErrorResponse(resp)
+}
+
+func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error {
+ ref, err := reference.WithDigest(ms.name, dgst)
+ if err != nil {
+ return err
+ }
+ u, err := ms.ub.BuildManifestURL(ref)
+ if err != nil {
+ return err
+ }
+ req, err := http.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return err
+ }
+
+ resp, err := ms.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if SuccessStatus(resp.StatusCode) {
+ return nil
+ }
+ return HandleErrorResponse(resp)
+}
+
+// todo(richardscothern): Restore interface and implementation with merge of #1050
+/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) {
+ panic("not supported")
+}*/
+
+type blobs struct {
+ name reference.Named
+ ub *v2.URLBuilder
+ client *http.Client
+
+ statter distribution.BlobDescriptorService
+ distribution.BlobDeleter
+}
+
+func sanitizeLocation(location, base string) (string, error) {
+ baseURL, err := url.Parse(base)
+ if err != nil {
+ return "", err
+ }
+
+ locationURL, err := url.Parse(location)
+ if err != nil {
+ return "", err
+ }
+
+ return baseURL.ResolveReference(locationURL).String(), nil
+}
+
+func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+ return bs.statter.Stat(ctx, dgst)
+
+}
+
+func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
+ reader, err := bs.Open(ctx, dgst)
+ if err != nil {
+ return nil, err
+ }
+ defer reader.Close()
+
+ return ioutil.ReadAll(reader)
+}
+
+func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
+ ref, err := reference.WithDigest(bs.name, dgst)
+ if err != nil {
+ return nil, err
+ }
+ blobURL, err := bs.ub.BuildBlobURL(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ return transport.NewHTTPReadSeeker(bs.client, blobURL,
+ func(resp *http.Response) error {
+ if resp.StatusCode == http.StatusNotFound {
+ return distribution.ErrBlobUnknown
+ }
+ return HandleErrorResponse(resp)
+ }), nil
+}
+
+func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
+ panic("not implemented")
+}
+
+func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
+ writer, err := bs.Create(ctx)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ dgstr := digest.Canonical.Digester()
+ n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash()))
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ if n < int64(len(p)) {
+ return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p))
+ }
+
+ desc := distribution.Descriptor{
+ MediaType: mediaType,
+ Size: int64(len(p)),
+ Digest: dgstr.Digest(),
+ }
+
+ return writer.Commit(ctx, desc)
+}
+
+type optionFunc func(interface{}) error
+
+func (f optionFunc) Apply(v interface{}) error {
+ return f(v)
+}
+
+// WithMountFrom returns a BlobCreateOption which designates that the blob should be
+// mounted from the given canonical reference.
+func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
+ return optionFunc(func(v interface{}) error {
+ opts, ok := v.(*distribution.CreateOptions)
+ if !ok {
+ return fmt.Errorf("unexpected options type: %T", v)
+ }
+
+ opts.Mount.ShouldMount = true
+ opts.Mount.From = ref
+
+ return nil
+ })
+}
+
+func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
+ var opts distribution.CreateOptions
+
+ for _, option := range options {
+ err := option.Apply(&opts)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var values []url.Values
+
+ if opts.Mount.ShouldMount {
+ values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}})
+ }
+
+ u, err := bs.ub.BuildBlobUploadURL(bs.name, values...)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := bs.client.Post(u, "", nil)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ switch resp.StatusCode {
+ case http.StatusCreated:
+ desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest())
+ if err != nil {
+ return nil, err
+ }
+ return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc}
+ case http.StatusAccepted:
+ // TODO(dmcgowan): Check for invalid UUID
+ uuid := resp.Header.Get("Docker-Upload-UUID")
+ location, err := sanitizeLocation(resp.Header.Get("Location"), u)
+ if err != nil {
+ return nil, err
+ }
+
+ return &httpBlobUpload{
+ statter: bs.statter,
+ client: bs.client,
+ uuid: uuid,
+ startedAt: time.Now(),
+ location: location,
+ }, nil
+ default:
+ return nil, HandleErrorResponse(resp)
+ }
+}
+
+func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
+ panic("not implemented")
+}
+
+func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error {
+ return bs.statter.Clear(ctx, dgst)
+}
+
+type blobStatter struct {
+ name reference.Named
+ ub *v2.URLBuilder
+ client *http.Client
+}
+
+func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+ ref, err := reference.WithDigest(bs.name, dgst)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ u, err := bs.ub.BuildBlobURL(ref)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+
+ resp, err := bs.client.Head(u)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ defer resp.Body.Close()
+
+ if SuccessStatus(resp.StatusCode) {
+ lengthHeader := resp.Header.Get("Content-Length")
+ if lengthHeader == "" {
+ return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u)
+ }
+
+ length, err := strconv.ParseInt(lengthHeader, 10, 64)
+ if err != nil {
+ return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err)
+ }
+
+ return distribution.Descriptor{
+ MediaType: resp.Header.Get("Content-Type"),
+ Size: length,
+ Digest: dgst,
+ }, nil
+ } else if resp.StatusCode == http.StatusNotFound {
+ return distribution.Descriptor{}, distribution.ErrBlobUnknown
+ }
+ return distribution.Descriptor{}, HandleErrorResponse(resp)
+}
+
+func buildCatalogValues(maxEntries int, last string) url.Values {
+ values := url.Values{}
+
+ if maxEntries > 0 {
+ values.Add("n", strconv.Itoa(maxEntries))
+ }
+
+ if last != "" {
+ values.Add("last", last)
+ }
+
+ return values
+}
+
+func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
+ ref, err := reference.WithDigest(bs.name, dgst)
+ if err != nil {
+ return err
+ }
+ blobURL, err := bs.ub.BuildBlobURL(ref)
+ if err != nil {
+ return err
+ }
+
+ req, err := http.NewRequest("DELETE", blobURL, nil)
+ if err != nil {
+ return err
+ }
+
+ resp, err := bs.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if SuccessStatus(resp.StatusCode) {
+ return nil
+ }
+ return HandleErrorResponse(resp)
+}
+
+func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
+ return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
new file mode 100644
index 000000000..e5ff09d75
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
@@ -0,0 +1,251 @@
+package transport
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "regexp"
+ "strconv"
+)
+
+var (
+ contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`)
+
+ // ErrWrongCodeForByteRange is returned if the client sends a request
+ // with a Range header but the server returns a 2xx or 3xx code other
+ // than 206 Partial Content.
+ ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request")
+)
+
+// ReadSeekCloser combines io.ReadSeeker with io.Closer.
+type ReadSeekCloser interface {
+ io.ReadSeeker
+ io.Closer
+}
+
+// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET
+// request. When seeking and starting a read from a non-zero offset
+// the a "Range" header will be added which sets the offset.
+// TODO(dmcgowan): Move this into a separate utility package
+func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser {
+ return &httpReadSeeker{
+ client: client,
+ url: url,
+ errorHandler: errorHandler,
+ }
+}
+
+type httpReadSeeker struct {
+ client *http.Client
+ url string
+
+ // errorHandler creates an error from an unsuccessful HTTP response.
+ // This allows the error to be created with the HTTP response body
+ // without leaking the body through a returned error.
+ errorHandler func(*http.Response) error
+
+ size int64
+
+ // rc is the remote read closer.
+ rc io.ReadCloser
+ // readerOffset tracks the offset as of the last read.
+ readerOffset int64
+ // seekOffset allows Seek to override the offset. Seek changes
+ // seekOffset instead of changing readOffset directly so that
+ // connection resets can be delayed and possibly avoided if the
+ // seek is undone (i.e. seeking to the end and then back to the
+ // beginning).
+ seekOffset int64
+ err error
+}
+
+func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
+ if hrs.err != nil {
+ return 0, hrs.err
+ }
+
+ // If we sought to a different position, we need to reset the
+ // connection. This logic is here instead of Seek so that if
+ // a seek is undone before the next read, the connection doesn't
+ // need to be closed and reopened. A common example of this is
+ // seeking to the end to determine the length, and then seeking
+ // back to the original position.
+ if hrs.readerOffset != hrs.seekOffset {
+ hrs.reset()
+ }
+
+ hrs.readerOffset = hrs.seekOffset
+
+ rd, err := hrs.reader()
+ if err != nil {
+ return 0, err
+ }
+
+ n, err = rd.Read(p)
+ hrs.seekOffset += int64(n)
+ hrs.readerOffset += int64(n)
+
+ return n, err
+}
+
+func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
+ if hrs.err != nil {
+ return 0, hrs.err
+ }
+
+ lastReaderOffset := hrs.readerOffset
+
+ if whence == os.SEEK_SET && hrs.rc == nil {
+ // If no request has been made yet, and we are seeking to an
+ // absolute position, set the read offset as well to avoid an
+ // unnecessary request.
+ hrs.readerOffset = offset
+ }
+
+ _, err := hrs.reader()
+ if err != nil {
+ hrs.readerOffset = lastReaderOffset
+ return 0, err
+ }
+
+ newOffset := hrs.seekOffset
+
+ switch whence {
+ case os.SEEK_CUR:
+ newOffset += offset
+ case os.SEEK_END:
+ if hrs.size < 0 {
+ return 0, errors.New("content length not known")
+ }
+ newOffset = hrs.size + offset
+ case os.SEEK_SET:
+ newOffset = offset
+ }
+
+ if newOffset < 0 {
+ err = errors.New("cannot seek to negative position")
+ } else {
+ hrs.seekOffset = newOffset
+ }
+
+ return hrs.seekOffset, err
+}
+
+func (hrs *httpReadSeeker) Close() error {
+ if hrs.err != nil {
+ return hrs.err
+ }
+
+ // close and release reader chain
+ if hrs.rc != nil {
+ hrs.rc.Close()
+ }
+
+ hrs.rc = nil
+
+ hrs.err = errors.New("httpLayer: closed")
+
+ return nil
+}
+
+func (hrs *httpReadSeeker) reset() {
+ if hrs.err != nil {
+ return
+ }
+ if hrs.rc != nil {
+ hrs.rc.Close()
+ hrs.rc = nil
+ }
+}
+
+func (hrs *httpReadSeeker) reader() (io.Reader, error) {
+ if hrs.err != nil {
+ return nil, hrs.err
+ }
+
+ if hrs.rc != nil {
+ return hrs.rc, nil
+ }
+
+ req, err := http.NewRequest("GET", hrs.url, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ if hrs.readerOffset > 0 {
+ // If we are at different offset, issue a range request from there.
+ req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset))
+ // TODO: get context in here
+ // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range"))
+ }
+
+ req.Header.Add("Accept-Encoding", "identity")
+ resp, err := hrs.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+
+ // Normally would use client.SuccessStatus, but that would be a cyclic
+ // import
+ if resp.StatusCode >= 200 && resp.StatusCode <= 399 {
+ if hrs.readerOffset > 0 {
+ if resp.StatusCode != http.StatusPartialContent {
+ return nil, ErrWrongCodeForByteRange
+ }
+
+ contentRange := resp.Header.Get("Content-Range")
+ if contentRange == "" {
+ return nil, errors.New("no Content-Range header found in HTTP 206 response")
+ }
+
+ submatches := contentRangeRegexp.FindStringSubmatch(contentRange)
+ if len(submatches) < 4 {
+ return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange)
+ }
+
+ startByte, err := strconv.ParseUint(submatches[1], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange)
+ }
+
+ if startByte != uint64(hrs.readerOffset) {
+ return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset)
+ }
+
+ endByte, err := strconv.ParseUint(submatches[2], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange)
+ }
+
+ if submatches[3] == "*" {
+ hrs.size = -1
+ } else {
+ size, err := strconv.ParseUint(submatches[3], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange)
+ }
+
+ if endByte+1 != size {
+ return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange)
+ }
+
+ hrs.size = int64(size)
+ }
+ } else if resp.StatusCode == http.StatusOK {
+ hrs.size = resp.ContentLength
+ } else {
+ hrs.size = -1
+ }
+ hrs.rc = resp.Body
+ } else {
+ defer resp.Body.Close()
+ if hrs.errorHandler != nil {
+ return nil, hrs.errorHandler(resp)
+ }
+ return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status)
+ }
+
+ return hrs.rc, nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/github.com/docker/distribution/registry/client/transport/transport.go
new file mode 100644
index 000000000..30e45fab0
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/transport/transport.go
@@ -0,0 +1,147 @@
+package transport
+
+import (
+ "io"
+ "net/http"
+ "sync"
+)
+
+// RequestModifier represents an object which will do an inplace
+// modification of an HTTP request.
+type RequestModifier interface {
+ ModifyRequest(*http.Request) error
+}
+
+type headerModifier http.Header
+
+// NewHeaderRequestModifier returns a new RequestModifier which will
+// add the given headers to a request.
+func NewHeaderRequestModifier(header http.Header) RequestModifier {
+ return headerModifier(header)
+}
+
+func (h headerModifier) ModifyRequest(req *http.Request) error {
+ for k, s := range http.Header(h) {
+ req.Header[k] = append(req.Header[k], s...)
+ }
+
+ return nil
+}
+
+// NewTransport creates a new transport which will apply modifiers to
+// the request on a RoundTrip call.
+func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper {
+ return &transport{
+ Modifiers: modifiers,
+ Base: base,
+ }
+}
+
+// transport is an http.RoundTripper that makes HTTP requests after
+// copying and modifying the request
+type transport struct {
+ Modifiers []RequestModifier
+ Base http.RoundTripper
+
+ mu sync.Mutex // guards modReq
+ modReq map[*http.Request]*http.Request // original -> modified
+}
+
+// RoundTrip authorizes and authenticates the request with an
+// access token. If no token exists or token is expired,
+// tries to refresh/fetch a new token.
+func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ req2 := cloneRequest(req)
+ for _, modifier := range t.Modifiers {
+ if err := modifier.ModifyRequest(req2); err != nil {
+ return nil, err
+ }
+ }
+
+ t.setModReq(req, req2)
+ res, err := t.base().RoundTrip(req2)
+ if err != nil {
+ t.setModReq(req, nil)
+ return nil, err
+ }
+ res.Body = &onEOFReader{
+ rc: res.Body,
+ fn: func() { t.setModReq(req, nil) },
+ }
+ return res, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (t *transport) CancelRequest(req *http.Request) {
+ type canceler interface {
+ CancelRequest(*http.Request)
+ }
+ if cr, ok := t.base().(canceler); ok {
+ t.mu.Lock()
+ modReq := t.modReq[req]
+ delete(t.modReq, req)
+ t.mu.Unlock()
+ cr.CancelRequest(modReq)
+ }
+}
+
+func (t *transport) base() http.RoundTripper {
+ if t.Base != nil {
+ return t.Base
+ }
+ return http.DefaultTransport
+}
+
+func (t *transport) setModReq(orig, mod *http.Request) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.modReq == nil {
+ t.modReq = make(map[*http.Request]*http.Request)
+ }
+ if mod == nil {
+ delete(t.modReq, orig)
+ } else {
+ t.modReq[orig] = mod
+ }
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header, len(r.Header))
+ for k, s := range r.Header {
+ r2.Header[k] = append([]string(nil), s...)
+ }
+
+ return r2
+}
+
+type onEOFReader struct {
+ rc io.ReadCloser
+ fn func()
+}
+
+func (r *onEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+func (r *onEOFReader) Close() error {
+ err := r.rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *onEOFReader) runFunc() {
+ if fn := r.fn; fn != nil {
+ fn()
+ r.fn = nil
+ }
+}
diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go
new file mode 100644
index 000000000..10a390919
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go
@@ -0,0 +1,35 @@
+// Package cache provides facilities to speed up access to the storage
+// backend.
+package cache
+
+import (
+ "fmt"
+
+ "github.com/docker/distribution"
+)
+
+// BlobDescriptorCacheProvider provides repository scoped
+// BlobDescriptorService cache instances and a global descriptor cache.
+type BlobDescriptorCacheProvider interface {
+ distribution.BlobDescriptorService
+
+ RepositoryScoped(repo string) (distribution.BlobDescriptorService, error)
+}
+
+// ValidateDescriptor provides a helper function to ensure that caches have
+// common criteria for admitting descriptors.
+func ValidateDescriptor(desc distribution.Descriptor) error {
+ if err := desc.Digest.Validate(); err != nil {
+ return err
+ }
+
+ if desc.Size < 0 {
+ return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size)
+ }
+
+ if desc.MediaType == "" {
+ return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go
new file mode 100644
index 000000000..f647616bc
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go
@@ -0,0 +1,101 @@
+package cache
+
+import (
+ "github.com/docker/distribution/context"
+ "github.com/opencontainers/go-digest"
+
+ "github.com/docker/distribution"
+)
+
+// Metrics is used to hold metric counters
+// related to the number of times a cache was
+// hit or missed.
+type Metrics struct {
+ Requests uint64
+ Hits uint64
+ Misses uint64
+}
+
+// MetricsTracker represents a metric tracker
+// which simply counts the number of hits and misses.
+type MetricsTracker interface {
+ Hit()
+ Miss()
+ Metrics() Metrics
+}
+
+type cachedBlobStatter struct {
+ cache distribution.BlobDescriptorService
+ backend distribution.BlobDescriptorService
+ tracker MetricsTracker
+}
+
+// NewCachedBlobStatter creates a new statter which prefers a cache and
+// falls back to a backend.
+func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService {
+ return &cachedBlobStatter{
+ cache: cache,
+ backend: backend,
+ }
+}
+
+// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and
+// falls back to a backend. Hits and misses will send to the tracker.
+func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter {
+ return &cachedBlobStatter{
+ cache: cache,
+ backend: backend,
+ tracker: tracker,
+ }
+}
+
+func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+ desc, err := cbds.cache.Stat(ctx, dgst)
+ if err != nil {
+ if err != distribution.ErrBlobUnknown {
+ context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err)
+ }
+
+ goto fallback
+ }
+
+ if cbds.tracker != nil {
+ cbds.tracker.Hit()
+ }
+ return desc, nil
+fallback:
+ if cbds.tracker != nil {
+ cbds.tracker.Miss()
+ }
+ desc, err = cbds.backend.Stat(ctx, dgst)
+ if err != nil {
+ return desc, err
+ }
+
+ if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
+ context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err)
+ }
+
+ return desc, err
+
+}
+
+func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
+ err := cbds.cache.Clear(ctx, dgst)
+ if err != nil {
+ return err
+ }
+
+ err = cbds.backend.Clear(ctx, dgst)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
+ if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
+ context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
new file mode 100644
index 000000000..b2fcaf4e8
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
@@ -0,0 +1,179 @@
+package memory
+
+import (
+ "sync"
+
+ "github.com/docker/distribution"
+ "github.com/docker/distribution/context"
+ "github.com/docker/distribution/reference"
+ "github.com/docker/distribution/registry/storage/cache"
+ "github.com/opencontainers/go-digest"
+)
+
+type inMemoryBlobDescriptorCacheProvider struct {
+ global *mapBlobDescriptorCache
+ repositories map[string]*mapBlobDescriptorCache
+ mu sync.RWMutex
+}
+
+// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for
+// storing blob descriptor data.
+func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider {
+ return &inMemoryBlobDescriptorCacheProvider{
+ global: newMapBlobDescriptorCache(),
+ repositories: make(map[string]*mapBlobDescriptorCache),
+ }
+}
+
+func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) {
+ if _, err := reference.ParseNormalizedNamed(repo); err != nil {
+ return nil, err
+ }
+
+ imbdcp.mu.RLock()
+ defer imbdcp.mu.RUnlock()
+
+ return &repositoryScopedInMemoryBlobDescriptorCache{
+ repo: repo,
+ parent: imbdcp,
+ repository: imbdcp.repositories[repo],
+ }, nil
+}
+
+func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+ return imbdcp.global.Stat(ctx, dgst)
+}
+
+func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error {
+ return imbdcp.global.Clear(ctx, dgst)
+}
+
+func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
+ _, err := imbdcp.Stat(ctx, dgst)
+ if err == distribution.ErrBlobUnknown {
+
+ if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest {
+ // if the digests differ, set the other canonical mapping
+ if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil {
+ return err
+ }
+ }
+
+ // unknown, just set it
+ return imbdcp.global.SetDescriptor(ctx, dgst, desc)
+ }
+
+ // we already know it, do nothing
+ return err
+}
+
+// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped
+// repository cache. Instances are not thread-safe but the delegated
+// operations are.
+type repositoryScopedInMemoryBlobDescriptorCache struct {
+ repo string
+ parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map
+ repository *mapBlobDescriptorCache
+}
+
+func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+ rsimbdcp.parent.mu.Lock()
+ repo := rsimbdcp.repository
+ rsimbdcp.parent.mu.Unlock()
+
+ if repo == nil {
+ return distribution.Descriptor{}, distribution.ErrBlobUnknown
+ }
+
+ return repo.Stat(ctx, dgst)
+}
+
+func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
+ rsimbdcp.parent.mu.Lock()
+ repo := rsimbdcp.repository
+ rsimbdcp.parent.mu.Unlock()
+
+ if repo == nil {
+ return distribution.ErrBlobUnknown
+ }
+
+ return repo.Clear(ctx, dgst)
+}
+
+func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
+ rsimbdcp.parent.mu.Lock()
+ repo := rsimbdcp.repository
+ if repo == nil {
+ // allocate map since we are setting it now.
+ var ok bool
+ // have to read back value since we may have allocated elsewhere.
+ repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo]
+ if !ok {
+ repo = newMapBlobDescriptorCache()
+ rsimbdcp.parent.repositories[rsimbdcp.repo] = repo
+ }
+ rsimbdcp.repository = repo
+ }
+ rsimbdcp.parent.mu.Unlock()
+
+ if err := repo.SetDescriptor(ctx, dgst, desc); err != nil {
+ return err
+ }
+
+ return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc)
+}
+
+// mapBlobDescriptorCache provides a simple map-based implementation of the
+// descriptor cache.
+type mapBlobDescriptorCache struct {
+ descriptors map[digest.Digest]distribution.Descriptor
+ mu sync.RWMutex
+}
+
+var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{}
+
+func newMapBlobDescriptorCache() *mapBlobDescriptorCache {
+ return &mapBlobDescriptorCache{
+ descriptors: make(map[digest.Digest]distribution.Descriptor),
+ }
+}
+
+func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+ if err := dgst.Validate(); err != nil {
+ return distribution.Descriptor{}, err
+ }
+
+ mbdc.mu.RLock()
+ defer mbdc.mu.RUnlock()
+
+ desc, ok := mbdc.descriptors[dgst]
+ if !ok {
+ return distribution.Descriptor{}, distribution.ErrBlobUnknown
+ }
+
+ return desc, nil
+}
+
+func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
+ mbdc.mu.Lock()
+ defer mbdc.mu.Unlock()
+
+ delete(mbdc.descriptors, dgst)
+ return nil
+}
+
+func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
+ if err := dgst.Validate(); err != nil {
+ return err
+ }
+
+ if err := cache.ValidateDescriptor(desc); err != nil {
+ return err
+ }
+
+ mbdc.mu.Lock()
+ defer mbdc.mu.Unlock()
+
+ mbdc.descriptors[dgst] = desc
+ return nil
+}
diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go
new file mode 100644
index 000000000..503056596
--- /dev/null
+++ b/vendor/github.com/docker/distribution/tags.go
@@ -0,0 +1,27 @@
+package distribution
+
+import (
+ "github.com/docker/distribution/context"
+)
+
+// TagService provides access to information about tagged objects.
+type TagService interface {
+ // Get retrieves the descriptor identified by the tag. Some
+ // implementations may differentiate between "trusted" tags and
+ // "untrusted" tags. If a tag is "untrusted", the mapping will be returned
+ // as an ErrTagUntrusted error, with the target descriptor.
+ Get(ctx context.Context, tag string) (Descriptor, error)
+
+ // Tag associates the tag with the provided descriptor, updating the
+ // current association, if needed.
+ Tag(ctx context.Context, tag string, desc Descriptor) error
+
+ // Untag removes the given tag association
+ Untag(ctx context.Context, tag string) error
+
+ // All returns the set of tags managed by this tag service
+ All(ctx context.Context) ([]string, error)
+
+ // Lookup returns the set of tags referencing the given digest.
+ Lookup(ctx context.Context, digest Descriptor) ([]string, error)
+}
diff --git a/vendor/github.com/docker/distribution/uuid/uuid.go b/vendor/github.com/docker/distribution/uuid/uuid.go
new file mode 100644
index 000000000..d433ccaf5
--- /dev/null
+++ b/vendor/github.com/docker/distribution/uuid/uuid.go
@@ -0,0 +1,126 @@
+// Package uuid provides simple UUID generation. Only version 4 style UUIDs
+// can be generated.
+//
+// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs.
+package uuid
+
+import (
+ "crypto/rand"
+ "fmt"
+ "io"
+ "os"
+ "syscall"
+ "time"
+)
+
+const (
+ // Bits is the number of bits in a UUID
+ Bits = 128
+
+ // Size is the number of bytes in a UUID
+ Size = Bits / 8
+
+ format = "%08x-%04x-%04x-%04x-%012x"
+)
+
+var (
+ // ErrUUIDInvalid indicates a parsed string is not a valid uuid.
+ ErrUUIDInvalid = fmt.Errorf("invalid uuid")
+
+ // Loggerf can be used to override the default logging destination. Such
+ // log messages in this library should be logged at warning or higher.
+ Loggerf = func(format string, args ...interface{}) {}
+)
+
+// UUID represents a UUID value. UUIDs can be compared and set to other values
+// and accessed by byte.
+type UUID [Size]byte
+
+// Generate creates a new, version 4 uuid.
+func Generate() (u UUID) {
+ const (
+ // ensures we backoff for less than 450ms total. Use the following to
+ // select new value, in units of 10ms:
+ // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2
+ maxretries = 9
+ backoff = time.Millisecond * 10
+ )
+
+ var (
+ totalBackoff time.Duration
+ count int
+ retries int
+ )
+
+ for {
+ // This should never block but the read may fail. Because of this,
+ // we just try to read the random number generator until we get
+ // something. This is a very rare condition but may happen.
+ b := time.Duration(retries) * backoff
+ time.Sleep(b)
+ totalBackoff += b
+
+ n, err := io.ReadFull(rand.Reader, u[count:])
+ if err != nil {
+ if retryOnError(err) && retries < maxretries {
+ count += n
+ retries++
+ Loggerf("error generating version 4 uuid, retrying: %v", err)
+ continue
+ }
+
+ // Any other errors represent a system problem. What did someone
+ // do to /dev/urandom?
+ panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err))
+ }
+
+ break
+ }
+
+ u[6] = (u[6] & 0x0f) | 0x40 // set version byte
+ u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b}
+
+ return u
+}
+
+// Parse attempts to extract a uuid from the string or returns an error.
+func Parse(s string) (u UUID, err error) {
+ if len(s) != 36 {
+ return UUID{}, ErrUUIDInvalid
+ }
+
+ // create stack addresses for each section of the uuid.
+ p := make([][]byte, 5)
+
+ if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil {
+ return u, err
+ }
+
+ copy(u[0:4], p[0])
+ copy(u[4:6], p[1])
+ copy(u[6:8], p[2])
+ copy(u[8:10], p[3])
+ copy(u[10:16], p[4])
+
+ return
+}
+
+func (u UUID) String() string {
+ return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:])
+}
+
+// retryOnError tries to detect whether or not retrying would be fruitful.
+func retryOnError(err error) bool {
+ switch err := err.(type) {
+ case *os.PathError:
+ return retryOnError(err.Err) // unpack the target error
+ case syscall.Errno:
+ if err == syscall.EPERM {
+ // EPERM represents an entropy pool exhaustion, a condition under
+ // which we backoff and retry.
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf
new file mode 100644
index 000000000..d67edd779
--- /dev/null
+++ b/vendor/github.com/docker/distribution/vendor.conf
@@ -0,0 +1,43 @@
+github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e
+github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356
+github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4
+github.com/aws/aws-sdk-go c6fc52983ea2375810aa38ddb5370e9cdf611716
+github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a
+github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274
+github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702
+github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782
+github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2
+github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04
+github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85
+github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21
+github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257
+github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c
+github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3
+github.com/gorilla/context 14f550f51af52180c2eefed15e5fd18d63c0a64a
+github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b
+github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604
+github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
+github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
+github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39
+github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef
+github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6
+github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064
+github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842
+github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870
+github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985
+github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e
+github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128
+github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6
+golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b
+golang.org/x/net 4876518f9e71663000c348837735820161a42df7
+golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf
+golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
+google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54
+google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19
+google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2
+google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994
+gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673
+gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b
+gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420
+rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git
+github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
diff --git a/vendor/github.com/docker/docker-credential-helpers/LICENSE b/vendor/github.com/docker/docker-credential-helpers/LICENSE
new file mode 100644
index 000000000..1ea555e2a
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2016 David Calavera
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/docker/docker-credential-helpers/README.md b/vendor/github.com/docker/docker-credential-helpers/README.md
new file mode 100644
index 000000000..f9cbc3fb5
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/README.md
@@ -0,0 +1,82 @@
+## Introduction
+
+docker-credential-helpers is a suite of programs to use native stores to keep Docker credentials safe.
+
+## Installation
+
+Go to the [Releases](https://github.com/docker/docker-credential-helpers/releases) page and download the binary that works better for you. Put that binary in your `$PATH`, so Docker can find it.
+
+### Building from scratch
+
+The programs in this repository are written with the Go programming language. These instructions assume that you have previous knowledge about the language and you have it installed in your machine.
+
+1 - Download the source and put it in your `$GOPATH` with `go get`.
+
+```
+$ go get github.com/docker/docker-credential-helpers
+```
+
+2 - Use `make` to build the program you want. That will leave any executable in the `bin` directory inside the repository.
+
+```
+$ cd $GOPATH/docker/docker-credentials-helpers
+$ make osxkeychain
+```
+
+3 - Put that binary in your `$PATH`, so Docker can find it.
+
+## Usage
+
+### With the Docker Engine
+
+Set the `credsStore` option in your `.docker/config.json` file with the suffix of the program you want to use. For instance, set it to `osxkeychain` if you want to use `docker-credential-osxkeychain`.
+
+```json
+{
+ "credsStore": "osxkeychain"
+}
+```
+
+### With other command line applications
+
+The sub-package [client](https://godoc.org/github.com/docker/docker-credential-helpers/client) includes
+functions to call external programs from your own command line applications.
+
+There are three things you need to know if you need to interact with a helper:
+
+1. The name of the program to execute, for instance `docker-credential-osxkeychain`.
+2. The server address to identify the credentials, for instance `https://example.com`.
+3. The username and secret to store, when you want to store credentials.
+
+You can see examples of each function in the [client](https://godoc.org/github.com/docker/docker-credential-helpers/client) documentation.
+
+### Available programs
+
+1. osxkeychain: Provides a helper to use the OS X keychain as credentials store.
+2. secretservice: Provides a helper to use the D-Bus secret service as credentials store.
+3. wincred: Provides a helper to use Windows credentials manager as store.
+4. pass: Provides a helper to use `pass` as credentials store.
+
+#### Note
+
+`pass` needs to be configured for `docker-credential-pass` to work properly.
+It must be initialized with a `gpg2` key ID. Make sure your GPG key exists is in `gpg2` keyring as `pass` uses `gpg2` instead of the regular `gpg`.
+
+## Development
+
+A credential helper can be any program that can read values from the standard input. We use the first argument in the command line to differentiate the kind of command to execute. There are four valid values:
+
+- `store`: Adds credentials to the keychain. The payload in the standard input is a JSON document with `ServerURL`, `Username` and `Secret`.
+- `get`: Retrieves credentials from the keychain. The payload in the standard input is the raw value for the `ServerURL`.
+- `erase`: Removes credentials from the keychain. The payload in the standard input is the raw value for the `ServerURL`.
+- `list`: Lists stored credentials. There is no standard input payload.
+
+This repository also includes libraries to implement new credentials programs in Go. Adding a new helper program is pretty easy. You can see how the OS X keychain helper works in the [osxkeychain](osxkeychain) directory.
+
+1. Implement the interface `credentials.Helper` in `YOUR_PACKAGE/YOUR_PACKAGE_$GOOS.go`
+2. Create a main program in `YOUR_PACKAGE/cmd/main_$GOOS.go`.
+3. Add make tasks to build your program and run tests.
+
+## License
+
+MIT. See [LICENSE](LICENSE) for more information.
diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go
new file mode 100644
index 000000000..d1d0434cb
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/client/client.go
@@ -0,0 +1,121 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ "github.com/docker/docker-credential-helpers/credentials"
+)
+
+// isValidCredsMessage checks if 'msg' contains invalid credentials error message.
+// It returns whether the logs are free of invalid credentials errors and the error if it isn't.
+// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername.
+func isValidCredsMessage(msg string) error {
+ if credentials.IsCredentialsMissingServerURLMessage(msg) {
+ return credentials.NewErrCredentialsMissingServerURL()
+ }
+
+ if credentials.IsCredentialsMissingUsernameMessage(msg) {
+ return credentials.NewErrCredentialsMissingUsername()
+ }
+
+ return nil
+}
+
+// Store uses an external program to save credentials.
+func Store(program ProgramFunc, creds *credentials.Credentials) error {
+ cmd := program("store")
+
+ buffer := new(bytes.Buffer)
+ if err := json.NewEncoder(buffer).Encode(creds); err != nil {
+ return err
+ }
+ cmd.Input(buffer)
+
+ out, err := cmd.Output()
+ if err != nil {
+ t := strings.TrimSpace(string(out))
+
+ if isValidErr := isValidCredsMessage(t); isValidErr != nil {
+ err = isValidErr
+ }
+
+ return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t)
+ }
+
+ return nil
+}
+
+// Get executes an external program to get the credentials from a native store.
+func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) {
+ cmd := program("get")
+ cmd.Input(strings.NewReader(serverURL))
+
+ out, err := cmd.Output()
+ if err != nil {
+ t := strings.TrimSpace(string(out))
+
+ if credentials.IsErrCredentialsNotFoundMessage(t) {
+ return nil, credentials.NewErrCredentialsNotFound()
+ }
+
+ if isValidErr := isValidCredsMessage(t); isValidErr != nil {
+ err = isValidErr
+ }
+
+ return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t)
+ }
+
+ resp := &credentials.Credentials{
+ ServerURL: serverURL,
+ }
+
+ if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+// Erase executes a program to remove the server credentials from the native store.
+func Erase(program ProgramFunc, serverURL string) error {
+ cmd := program("erase")
+ cmd.Input(strings.NewReader(serverURL))
+ out, err := cmd.Output()
+ if err != nil {
+ t := strings.TrimSpace(string(out))
+
+ if isValidErr := isValidCredsMessage(t); isValidErr != nil {
+ err = isValidErr
+ }
+
+ return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t)
+ }
+
+ return nil
+}
+
+// List executes a program to list server credentials in the native store.
+func List(program ProgramFunc) (map[string]string, error) {
+ cmd := program("list")
+ cmd.Input(strings.NewReader("unused"))
+ out, err := cmd.Output()
+ if err != nil {
+ t := strings.TrimSpace(string(out))
+
+ if isValidErr := isValidCredsMessage(t); isValidErr != nil {
+ err = isValidErr
+ }
+
+ return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t)
+ }
+
+ var resp map[string]string
+ if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/client/command.go b/vendor/github.com/docker/docker-credential-helpers/client/command.go
new file mode 100644
index 000000000..8da334306
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/client/command.go
@@ -0,0 +1,56 @@
+package client
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+)
+
+// Program is an interface to execute external programs.
+type Program interface {
+ Output() ([]byte, error)
+ Input(in io.Reader)
+}
+
+// ProgramFunc is a type of function that initializes programs based on arguments.
+type ProgramFunc func(args ...string) Program
+
+// NewShellProgramFunc creates programs that are executed in a Shell.
+func NewShellProgramFunc(name string) ProgramFunc {
+ return NewShellProgramFuncWithEnv(name, nil)
+}
+
+// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables
+func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc {
+ return func(args ...string) Program {
+ return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)}
+ }
+}
+
+func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd {
+ programCmd := exec.Command(commandName, args...)
+ programCmd.Env = os.Environ()
+ if env != nil {
+ for k, v := range *env {
+ programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v))
+ }
+ }
+ programCmd.Stderr = os.Stderr
+ return programCmd
+}
+
+// Shell invokes shell commands to talk with a remote credentials helper.
+type Shell struct {
+ cmd *exec.Cmd
+}
+
+// Output returns responses from the remote credentials helper.
+func (s *Shell) Output() ([]byte, error) {
+ return s.cmd.Output()
+}
+
+// Input sets the input to send to a remote credentials helper.
+func (s *Shell) Input(in io.Reader) {
+ s.cmd.Stdin = in
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go
new file mode 100644
index 000000000..da8b594e7
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go
@@ -0,0 +1,186 @@
+package credentials
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+// Credentials holds the information shared between docker and the credentials store.
+type Credentials struct {
+ ServerURL string
+ Username string
+ Secret string
+}
+
+// isValid checks the integrity of Credentials object such that no credentials lack
+// a server URL or a username.
+// It returns whether the credentials are valid and the error if it isn't.
+// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername
+func (c *Credentials) isValid() (bool, error) {
+ if len(c.ServerURL) == 0 {
+ return false, NewErrCredentialsMissingServerURL()
+ }
+
+ if len(c.Username) == 0 {
+ return false, NewErrCredentialsMissingUsername()
+ }
+
+ return true, nil
+}
+
+// CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling.
+// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain,
+// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials"
+var CredsLabel = "Docker Credentials"
+
+// SetCredsLabel is a simple setter for CredsLabel
+func SetCredsLabel(label string) {
+ CredsLabel = label
+}
+
+// Serve initializes the credentials helper and parses the action argument.
+// This function is designed to be called from a command line interface.
+// It uses os.Args[1] as the key for the action.
+// It uses os.Stdin as input and os.Stdout as output.
+// This function terminates the program with os.Exit(1) if there is an error.
+func Serve(helper Helper) {
+ var err error
+ if len(os.Args) != 2 {
+ err = fmt.Errorf("Usage: %s <store|get|erase|list|version>", os.Args[0])
+ }
+
+ if err == nil {
+ err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout)
+ }
+
+ if err != nil {
+ fmt.Fprintf(os.Stdout, "%v\n", err)
+ os.Exit(1)
+ }
+}
+
+// HandleCommand uses a helper and a key to run a credential action.
+func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error {
+ switch key {
+ case "store":
+ return Store(helper, in)
+ case "get":
+ return Get(helper, in, out)
+ case "erase":
+ return Erase(helper, in)
+ case "list":
+ return List(helper, out)
+ case "version":
+ return PrintVersion(out)
+ }
+ return fmt.Errorf("Unknown credential action `%s`", key)
+}
+
+// Store uses a helper and an input reader to save credentials.
+// The reader must contain the JSON serialization of a Credentials struct.
+func Store(helper Helper, reader io.Reader) error {
+ scanner := bufio.NewScanner(reader)
+
+ buffer := new(bytes.Buffer)
+ for scanner.Scan() {
+ buffer.Write(scanner.Bytes())
+ }
+
+ if err := scanner.Err(); err != nil && err != io.EOF {
+ return err
+ }
+
+ var creds Credentials
+ if err := json.NewDecoder(buffer).Decode(&creds); err != nil {
+ return err
+ }
+
+ if ok, err := creds.isValid(); !ok {
+ return err
+ }
+
+ return helper.Add(&creds)
+}
+
+// Get retrieves the credentials for a given server url.
+// The reader must contain the server URL to search.
+// The writer is used to write the JSON serialization of the credentials.
+func Get(helper Helper, reader io.Reader, writer io.Writer) error {
+ scanner := bufio.NewScanner(reader)
+
+ buffer := new(bytes.Buffer)
+ for scanner.Scan() {
+ buffer.Write(scanner.Bytes())
+ }
+
+ if err := scanner.Err(); err != nil && err != io.EOF {
+ return err
+ }
+
+ serverURL := strings.TrimSpace(buffer.String())
+ if len(serverURL) == 0 {
+ return NewErrCredentialsMissingServerURL()
+ }
+
+ username, secret, err := helper.Get(serverURL)
+ if err != nil {
+ return err
+ }
+
+ resp := Credentials{
+ ServerURL: serverURL,
+ Username: username,
+ Secret: secret,
+ }
+
+ buffer.Reset()
+ if err := json.NewEncoder(buffer).Encode(resp); err != nil {
+ return err
+ }
+
+ fmt.Fprint(writer, buffer.String())
+ return nil
+}
+
+// Erase removes credentials from the store.
+// The reader must contain the server URL to remove.
+func Erase(helper Helper, reader io.Reader) error {
+ scanner := bufio.NewScanner(reader)
+
+ buffer := new(bytes.Buffer)
+ for scanner.Scan() {
+ buffer.Write(scanner.Bytes())
+ }
+
+ if err := scanner.Err(); err != nil && err != io.EOF {
+ return err
+ }
+
+ serverURL := strings.TrimSpace(buffer.String())
+ if len(serverURL) == 0 {
+ return NewErrCredentialsMissingServerURL()
+ }
+
+ return helper.Delete(serverURL)
+}
+
+//List returns all the serverURLs of keys in
+//the OS store as a list of strings
+func List(helper Helper, writer io.Writer) error {
+ accts, err := helper.List()
+ if err != nil {
+ return err
+ }
+ return json.NewEncoder(writer).Encode(accts)
+}
+
+//PrintVersion outputs the current version.
+func PrintVersion(writer io.Writer) error {
+ fmt.Fprintln(writer, Version)
+ return nil
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go
new file mode 100644
index 000000000..fe6a5aef4
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go
@@ -0,0 +1,102 @@
+package credentials
+
+const (
+ // ErrCredentialsNotFound standardizes the not found error, so every helper returns
+ // the same message and docker can handle it properly.
+ errCredentialsNotFoundMessage = "credentials not found in native keychain"
+
+ // ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize
+ // invalid credentials or credentials management operations
+ errCredentialsMissingServerURLMessage = "no credentials server URL"
+ errCredentialsMissingUsernameMessage = "no credentials username"
+)
+
+// errCredentialsNotFound represents an error
+// raised when credentials are not in the store.
+type errCredentialsNotFound struct{}
+
+// Error returns the standard error message
+// for when the credentials are not in the store.
+func (errCredentialsNotFound) Error() string {
+ return errCredentialsNotFoundMessage
+}
+
+// NewErrCredentialsNotFound creates a new error
+// for when the credentials are not in the store.
+func NewErrCredentialsNotFound() error {
+ return errCredentialsNotFound{}
+}
+
+// IsErrCredentialsNotFound returns true if the error
+// was caused by not having a set of credentials in a store.
+func IsErrCredentialsNotFound(err error) bool {
+ _, ok := err.(errCredentialsNotFound)
+ return ok
+}
+
+// IsErrCredentialsNotFoundMessage returns true if the error
+// was caused by not having a set of credentials in a store.
+//
+// This function helps to check messages returned by an
+// external program via its standard output.
+func IsErrCredentialsNotFoundMessage(err string) bool {
+ return err == errCredentialsNotFoundMessage
+}
+
+// errCredentialsMissingServerURL represents an error raised
+// when the credentials object has no server URL or when no
+// server URL is provided to a credentials operation requiring
+// one.
+type errCredentialsMissingServerURL struct{}
+
+func (errCredentialsMissingServerURL) Error() string {
+ return errCredentialsMissingServerURLMessage
+}
+
+// errCredentialsMissingUsername represents an error raised
+// when the credentials object has no username or when no
+// username is provided to a credentials operation requiring
+// one.
+type errCredentialsMissingUsername struct{}
+
+func (errCredentialsMissingUsername) Error() string {
+ return errCredentialsMissingUsernameMessage
+}
+
+// NewErrCredentialsMissingServerURL creates a new error for
+// errCredentialsMissingServerURL.
+func NewErrCredentialsMissingServerURL() error {
+ return errCredentialsMissingServerURL{}
+}
+
+// NewErrCredentialsMissingUsername creates a new error for
+// errCredentialsMissingUsername.
+func NewErrCredentialsMissingUsername() error {
+ return errCredentialsMissingUsername{}
+}
+
+// IsCredentialsMissingServerURL returns true if the error
+// was an errCredentialsMissingServerURL.
+func IsCredentialsMissingServerURL(err error) bool {
+ _, ok := err.(errCredentialsMissingServerURL)
+ return ok
+}
+
+// IsCredentialsMissingServerURLMessage checks for an
+// errCredentialsMissingServerURL in the error message.
+func IsCredentialsMissingServerURLMessage(err string) bool {
+ return err == errCredentialsMissingServerURLMessage
+}
+
+// IsCredentialsMissingUsername returns true if the error
+// was an errCredentialsMissingUsername.
+func IsCredentialsMissingUsername(err error) bool {
+ _, ok := err.(errCredentialsMissingUsername)
+ return ok
+}
+
+// IsCredentialsMissingUsernameMessage checks for an
+// errCredentialsMissingUsername in the error message.
+func IsCredentialsMissingUsernameMessage(err string) bool {
+ return err == errCredentialsMissingUsernameMessage
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go
new file mode 100644
index 000000000..135acd254
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go
@@ -0,0 +1,14 @@
+package credentials
+
+// Helper is the interface a credentials store helper must implement.
+type Helper interface {
+ // Add appends credentials to the store.
+ Add(*Credentials) error
+ // Delete removes credentials from the store.
+ Delete(serverURL string) error
+ // Get retrieves credentials from the store.
+ // It returns username and secret as strings.
+ Get(serverURL string) (string, string, error)
+ // List returns the stored serverURLs and their associated usernames.
+ List() (map[string]string, error)
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go
new file mode 100644
index 000000000..033a5fee5
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go
@@ -0,0 +1,4 @@
+package credentials
+
+// Version holds a string describing the current version
+const Version = "0.6.0"
diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.c b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.c
new file mode 100644
index 000000000..f84d61ee5
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.c
@@ -0,0 +1,228 @@
+#include "osxkeychain_darwin.h"
+#include <CoreFoundation/CoreFoundation.h>
+#include <Foundation/NSValue.h>
+#include <stdio.h>
+#include <string.h>
+
+char *get_error(OSStatus status) {
+ char *buf = malloc(128);
+ CFStringRef str = SecCopyErrorMessageString(status, NULL);
+ int success = CFStringGetCString(str, buf, 128, kCFStringEncodingUTF8);
+ if (!success) {
+ strncpy(buf, "Unknown error", 128);
+ }
+ return buf;
+}
+
+char *keychain_add(struct Server *server, char *label, char *username, char *secret) {
+ SecKeychainItemRef item;
+
+ OSStatus status = SecKeychainAddInternetPassword(
+ NULL,
+ strlen(server->host), server->host,
+ 0, NULL,
+ strlen(username), username,
+ strlen(server->path), server->path,
+ server->port,
+ server->proto,
+ kSecAuthenticationTypeDefault,
+ strlen(secret), secret,
+ &item
+ );
+
+ if (status) {
+ return get_error(status);
+ }
+
+ SecKeychainAttribute attribute;
+ SecKeychainAttributeList attrs;
+ attribute.tag = kSecLabelItemAttr;
+ attribute.data = label;
+ attribute.length = strlen(label);
+ attrs.count = 1;
+ attrs.attr = &attribute;
+
+ status = SecKeychainItemModifyContent(item, &attrs, 0, NULL);
+
+ if (status) {
+ return get_error(status);
+ }
+
+ return NULL;
+}
+
+char *keychain_get(struct Server *server, unsigned int *username_l, char **username, unsigned int *secret_l, char **secret) {
+ char *tmp;
+ SecKeychainItemRef item;
+
+ OSStatus status = SecKeychainFindInternetPassword(
+ NULL,
+ strlen(server->host), server->host,
+ 0, NULL,
+ 0, NULL,
+ strlen(server->path), server->path,
+ server->port,
+ server->proto,
+ kSecAuthenticationTypeDefault,
+ secret_l, (void **)&tmp,
+ &item);
+
+ if (status) {
+ return get_error(status);
+ }
+
+ *secret = strdup(tmp);
+ SecKeychainItemFreeContent(NULL, tmp);
+
+ SecKeychainAttributeList list;
+ SecKeychainAttribute attr;
+
+ list.count = 1;
+ list.attr = &attr;
+ attr.tag = kSecAccountItemAttr;
+
+ status = SecKeychainItemCopyContent(item, NULL, &list, NULL, NULL);
+ if (status) {
+ return get_error(status);
+ }
+
+ *username = strdup(attr.data);
+ *username_l = attr.length;
+ SecKeychainItemFreeContent(&list, NULL);
+
+ return NULL;
+}
+
+char *keychain_delete(struct Server *server) {
+ SecKeychainItemRef item;
+
+ OSStatus status = SecKeychainFindInternetPassword(
+ NULL,
+ strlen(server->host), server->host,
+ 0, NULL,
+ 0, NULL,
+ strlen(server->path), server->path,
+ server->port,
+ server->proto,
+ kSecAuthenticationTypeDefault,
+ 0, NULL,
+ &item);
+
+ if (status) {
+ return get_error(status);
+ }
+
+ status = SecKeychainItemDelete(item);
+ if (status) {
+ return get_error(status);
+ }
+ return NULL;
+}
+
+char * CFStringToCharArr(CFStringRef aString) {
+ if (aString == NULL) {
+ return NULL;
+ }
+ CFIndex length = CFStringGetLength(aString);
+ CFIndex maxSize =
+ CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8) + 1;
+ char *buffer = (char *)malloc(maxSize);
+ if (CFStringGetCString(aString, buffer, maxSize,
+ kCFStringEncodingUTF8)) {
+ return buffer;
+ }
+ return NULL;
+}
+
+char *keychain_list(char *credsLabel, char *** paths, char *** accts, unsigned int *list_l) {
+ CFStringRef credsLabelCF = CFStringCreateWithCString(NULL, credsLabel, kCFStringEncodingUTF8);
+ CFMutableDictionaryRef query = CFDictionaryCreateMutable (NULL, 1, NULL, NULL);
+ CFDictionaryAddValue(query, kSecClass, kSecClassInternetPassword);
+ CFDictionaryAddValue(query, kSecReturnAttributes, kCFBooleanTrue);
+ CFDictionaryAddValue(query, kSecMatchLimit, kSecMatchLimitAll);
+ CFDictionaryAddValue(query, kSecAttrLabel, credsLabelCF);
+ //Use this query dictionary
+ CFTypeRef result= NULL;
+ OSStatus status = SecItemCopyMatching(
+ query,
+ &result);
+
+ CFRelease(credsLabelCF);
+
+ //Ran a search and store the results in result
+ if (status) {
+ return get_error(status);
+ }
+ CFIndex numKeys = CFArrayGetCount(result);
+ *paths = (char **) malloc((int)sizeof(char *)*numKeys);
+ *accts = (char **) malloc((int)sizeof(char *)*numKeys);
+ //result is of type CFArray
+ for(CFIndex i=0; i<numKeys; i++) {
+ CFDictionaryRef currKey = CFArrayGetValueAtIndex(result,i);
+
+ CFStringRef protocolTmp = CFDictionaryGetValue(currKey, CFSTR("ptcl"));
+ if (protocolTmp != NULL) {
+ CFStringRef protocolStr = CFStringCreateWithFormat(NULL, NULL, CFSTR("%@"), protocolTmp);
+ if (CFStringCompare(protocolStr, CFSTR("htps"), 0) == kCFCompareEqualTo) {
+ protocolTmp = CFSTR("https://");
+ }
+ else {
+ protocolTmp = CFSTR("http://");
+ }
+ CFRelease(protocolStr);
+ }
+ else {
+ char * path = "0";
+ char * acct = "0";
+ (*paths)[i] = (char *) malloc(sizeof(char)*(strlen(path)));
+ memcpy((*paths)[i], path, sizeof(char)*(strlen(path)));
+ (*accts)[i] = (char *) malloc(sizeof(char)*(strlen(acct)));
+ memcpy((*accts)[i], acct, sizeof(char)*(strlen(acct)));
+ continue;
+ }
+
+ CFMutableStringRef str = CFStringCreateMutableCopy(NULL, 0, protocolTmp);
+ CFStringRef serverTmp = CFDictionaryGetValue(currKey, CFSTR("srvr"));
+ if (serverTmp != NULL) {
+ CFStringAppend(str, serverTmp);
+ }
+
+ CFStringRef pathTmp = CFDictionaryGetValue(currKey, CFSTR("path"));
+ if (pathTmp != NULL) {
+ CFStringAppend(str, pathTmp);
+ }
+
+ const NSNumber * portTmp = CFDictionaryGetValue(currKey, CFSTR("port"));
+ if (portTmp != NULL && portTmp.integerValue != 0) {
+ CFStringRef portStr = CFStringCreateWithFormat(NULL, NULL, CFSTR("%@"), portTmp);
+ CFStringAppend(str, CFSTR(":"));
+ CFStringAppend(str, portStr);
+ CFRelease(portStr);
+ }
+
+ CFStringRef acctTmp = CFDictionaryGetValue(currKey, CFSTR("acct"));
+ if (acctTmp == NULL) {
+ acctTmp = CFSTR("account not defined");
+ }
+
+ char * path = CFStringToCharArr(str);
+ char * acct = CFStringToCharArr(acctTmp);
+
+ //We now have all we need, username and servername. Now export this to .go
+ (*paths)[i] = (char *) malloc(sizeof(char)*(strlen(path)+1));
+ memcpy((*paths)[i], path, sizeof(char)*(strlen(path)+1));
+ (*accts)[i] = (char *) malloc(sizeof(char)*(strlen(acct)+1));
+ memcpy((*accts)[i], acct, sizeof(char)*(strlen(acct)+1));
+
+ CFRelease(str);
+ }
+ *list_l = (int)numKeys;
+ return NULL;
+}
+
+void freeListData(char *** data, unsigned int length) {
+ for(int i=0; i<length; i++) {
+ free((*data)[i]);
+ }
+ free(*data);
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.go b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.go
new file mode 100644
index 000000000..439126761
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.go
@@ -0,0 +1,196 @@
+package osxkeychain
+
+/*
+#cgo CFLAGS: -x objective-c -mmacosx-version-min=10.10
+#cgo LDFLAGS: -framework Security -framework Foundation -mmacosx-version-min=10.10
+
+#include "osxkeychain_darwin.h"
+#include <stdlib.h>
+*/
+import "C"
+import (
+ "errors"
+ "net/url"
+ "strconv"
+ "strings"
+ "unsafe"
+
+ "github.com/docker/docker-credential-helpers/credentials"
+)
+
+// errCredentialsNotFound is the specific error message returned by OS X
+// when the credentials are not in the keychain.
+const errCredentialsNotFound = "The specified item could not be found in the keychain."
+
+// Osxkeychain handles secrets using the OS X Keychain as store.
+type Osxkeychain struct{}
+
+// Add adds new credentials to the keychain.
+func (h Osxkeychain) Add(creds *credentials.Credentials) error {
+ h.Delete(creds.ServerURL)
+
+ s, err := splitServer(creds.ServerURL)
+ if err != nil {
+ return err
+ }
+ defer freeServer(s)
+
+ label := C.CString(credentials.CredsLabel)
+ defer C.free(unsafe.Pointer(label))
+ username := C.CString(creds.Username)
+ defer C.free(unsafe.Pointer(username))
+ secret := C.CString(creds.Secret)
+ defer C.free(unsafe.Pointer(secret))
+
+ errMsg := C.keychain_add(s, label, username, secret)
+ if errMsg != nil {
+ defer C.free(unsafe.Pointer(errMsg))
+ return errors.New(C.GoString(errMsg))
+ }
+
+ return nil
+}
+
+// Delete removes credentials from the keychain.
+func (h Osxkeychain) Delete(serverURL string) error {
+ s, err := splitServer(serverURL)
+ if err != nil {
+ return err
+ }
+ defer freeServer(s)
+
+ errMsg := C.keychain_delete(s)
+ if errMsg != nil {
+ defer C.free(unsafe.Pointer(errMsg))
+ return errors.New(C.GoString(errMsg))
+ }
+
+ return nil
+}
+
+// Get returns the username and secret to use for a given registry server URL.
+func (h Osxkeychain) Get(serverURL string) (string, string, error) {
+ s, err := splitServer(serverURL)
+ if err != nil {
+ return "", "", err
+ }
+ defer freeServer(s)
+
+ var usernameLen C.uint
+ var username *C.char
+ var secretLen C.uint
+ var secret *C.char
+ defer C.free(unsafe.Pointer(username))
+ defer C.free(unsafe.Pointer(secret))
+
+ errMsg := C.keychain_get(s, &usernameLen, &username, &secretLen, &secret)
+ if errMsg != nil {
+ defer C.free(unsafe.Pointer(errMsg))
+ goMsg := C.GoString(errMsg)
+ if goMsg == errCredentialsNotFound {
+ return "", "", credentials.NewErrCredentialsNotFound()
+ }
+
+ return "", "", errors.New(goMsg)
+ }
+
+ user := C.GoStringN(username, C.int(usernameLen))
+ pass := C.GoStringN(secret, C.int(secretLen))
+ return user, pass, nil
+}
+
+// List returns the stored URLs and corresponding usernames.
+func (h Osxkeychain) List() (map[string]string, error) {
+ credsLabelC := C.CString(credentials.CredsLabel)
+ defer C.free(unsafe.Pointer(credsLabelC))
+
+ var pathsC **C.char
+ defer C.free(unsafe.Pointer(pathsC))
+ var acctsC **C.char
+ defer C.free(unsafe.Pointer(acctsC))
+ var listLenC C.uint
+ errMsg := C.keychain_list(credsLabelC, &pathsC, &acctsC, &listLenC)
+ if errMsg != nil {
+ defer C.free(unsafe.Pointer(errMsg))
+ goMsg := C.GoString(errMsg)
+ return nil, errors.New(goMsg)
+ }
+
+ defer C.freeListData(&pathsC, listLenC)
+ defer C.freeListData(&acctsC, listLenC)
+
+ var listLen int
+ listLen = int(listLenC)
+ pathTmp := (*[1 << 30]*C.char)(unsafe.Pointer(pathsC))[:listLen:listLen]
+ acctTmp := (*[1 << 30]*C.char)(unsafe.Pointer(acctsC))[:listLen:listLen]
+ //taking the array of c strings into go while ignoring all the stuff irrelevant to credentials-helper
+ resp := make(map[string]string)
+ for i := 0; i < listLen; i++ {
+ if C.GoString(pathTmp[i]) == "0" {
+ continue
+ }
+ resp[C.GoString(pathTmp[i])] = C.GoString(acctTmp[i])
+ }
+ return resp, nil
+}
+
+func splitServer(serverURL string) (*C.struct_Server, error) {
+ u, err := parseURL(serverURL)
+ if err != nil {
+ return nil, err
+ }
+
+ proto := C.kSecProtocolTypeHTTPS
+ if u.Scheme == "http" {
+ proto = C.kSecProtocolTypeHTTP
+ }
+ var port int
+ p := getPort(u)
+ if p != "" {
+ port, err = strconv.Atoi(p)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &C.struct_Server{
+ proto: C.SecProtocolType(proto),
+ host: C.CString(getHostname(u)),
+ port: C.uint(port),
+ path: C.CString(u.Path),
+ }, nil
+}
+
+func freeServer(s *C.struct_Server) {
+ C.free(unsafe.Pointer(s.host))
+ C.free(unsafe.Pointer(s.path))
+}
+
+// parseURL parses and validates a given serverURL to an url.URL, and
+// returns an error if validation failed. Querystring parameters are
+// omitted in the resulting URL, because they are not used in the helper.
+//
+// If serverURL does not have a valid scheme, `//` is used as scheme
+// before parsing. This prevents the hostname being used as path,
+// and the credentials being stored without host.
+func parseURL(serverURL string) (*url.URL, error) {
+ // Check if serverURL has a scheme, otherwise add `//` as scheme.
+ if !strings.Contains(serverURL, "://") && !strings.HasPrefix(serverURL, "//") {
+ serverURL = "//" + serverURL
+ }
+
+ u, err := url.Parse(serverURL)
+ if err != nil {
+ return nil, err
+ }
+
+ if u.Scheme != "" && u.Scheme != "https" && u.Scheme != "http" {
+ return nil, errors.New("unsupported scheme: " + u.Scheme)
+ }
+ if getHostname(u) == "" {
+ return nil, errors.New("no hostname in URL")
+ }
+
+ u.RawQuery = ""
+ return u, nil
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.h b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.h
new file mode 100644
index 000000000..c54e7d728
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.h
@@ -0,0 +1,14 @@
+#include <Security/Security.h>
+
+struct Server {
+ SecProtocolType proto;
+ char *host;
+ char *path;
+ unsigned int port;
+};
+
+char *keychain_add(struct Server *server, char *label, char *username, char *secret);
+char *keychain_get(struct Server *server, unsigned int *username_l, char **username, unsigned int *secret_l, char **secret);
+char *keychain_delete(struct Server *server);
+char *keychain_list(char *credsLabel, char *** data, char *** accts, unsigned int *list_l);
+void freeListData(char *** data, unsigned int length); \ No newline at end of file
diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_go18.go b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_go18.go
new file mode 100644
index 000000000..0b7297d2f
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_go18.go
@@ -0,0 +1,13 @@
+//+build go1.8
+
+package osxkeychain
+
+import "net/url"
+
+func getHostname(u *url.URL) string {
+ return u.Hostname()
+}
+
+func getPort(u *url.URL) string {
+ return u.Port()
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_non_go18.go b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_non_go18.go
new file mode 100644
index 000000000..bdf9b7b00
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_non_go18.go
@@ -0,0 +1,41 @@
+//+build !go1.8
+
+package osxkeychain
+
+import (
+ "net/url"
+ "strings"
+)
+
+func getHostname(u *url.URL) string {
+ return stripPort(u.Host)
+}
+
+func getPort(u *url.URL) string {
+ return portOnly(u.Host)
+}
+
+func stripPort(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return hostport
+ }
+ if i := strings.IndexByte(hostport, ']'); i != -1 {
+ return strings.TrimPrefix(hostport[:i], "[")
+ }
+ return hostport[:colon]
+}
+
+func portOnly(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return ""
+ }
+ if i := strings.Index(hostport, "]:"); i != -1 {
+ return hostport[i+len("]:"):]
+ }
+ if strings.Contains(hostport, "]") {
+ return ""
+ }
+ return hostport[colon+len(":"):]
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.c b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.c
new file mode 100644
index 000000000..35dea92da
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.c
@@ -0,0 +1,162 @@
+#include <string.h>
+#include <stdlib.h>
+#include "secretservice_linux.h"
+
+const SecretSchema *docker_get_schema(void)
+{
+ static const SecretSchema docker_schema = {
+ "io.docker.Credentials", SECRET_SCHEMA_NONE,
+ {
+ { "label", SECRET_SCHEMA_ATTRIBUTE_STRING },
+ { "server", SECRET_SCHEMA_ATTRIBUTE_STRING },
+ { "username", SECRET_SCHEMA_ATTRIBUTE_STRING },
+ { "docker_cli", SECRET_SCHEMA_ATTRIBUTE_STRING },
+ { "NULL", 0 },
+ }
+ };
+ return &docker_schema;
+}
+
+GError *add(char *label, char *server, char *username, char *secret) {
+ GError *err = NULL;
+
+ secret_password_store_sync (DOCKER_SCHEMA, SECRET_COLLECTION_DEFAULT,
+ server, secret, NULL, &err,
+ "label", label,
+ "server", server,
+ "username", username,
+ "docker_cli", "1",
+ NULL);
+ return err;
+}
+
+GError *delete(char *server) {
+ GError *err = NULL;
+
+ secret_password_clear_sync(DOCKER_SCHEMA, NULL, &err,
+ "server", server,
+ "docker_cli", "1",
+ NULL);
+ if (err != NULL)
+ return err;
+ return NULL;
+}
+
+char *get_attribute(const char *attribute, SecretItem *item) {
+ GHashTable *attributes;
+ GHashTableIter iter;
+ gchar *value, *key;
+
+ attributes = secret_item_get_attributes(item);
+ g_hash_table_iter_init(&iter, attributes);
+ while (g_hash_table_iter_next(&iter, (void **)&key, (void **)&value)) {
+ if (strncmp(key, attribute, strlen(key)) == 0)
+ return (char *)value;
+ }
+ g_hash_table_unref(attributes);
+ return NULL;
+}
+
+GError *get(char *server, char **username, char **secret) {
+ GError *err = NULL;
+ GHashTable *attributes;
+ SecretService *service;
+ GList *items, *l;
+ SecretSearchFlags flags = SECRET_SEARCH_LOAD_SECRETS | SECRET_SEARCH_ALL | SECRET_SEARCH_UNLOCK;
+ SecretValue *secretValue;
+ gsize length;
+ gchar *value;
+
+ attributes = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, g_free);
+ g_hash_table_insert(attributes, g_strdup("server"), g_strdup(server));
+ g_hash_table_insert(attributes, g_strdup("docker_cli"), g_strdup("1"));
+
+ service = secret_service_get_sync(SECRET_SERVICE_NONE, NULL, &err);
+ if (err == NULL) {
+ items = secret_service_search_sync(service, DOCKER_SCHEMA, attributes, flags, NULL, &err);
+ if (err == NULL) {
+ for (l = items; l != NULL; l = g_list_next(l)) {
+ value = secret_item_get_schema_name(l->data);
+ if (strncmp(value, "io.docker.Credentials", strlen(value)) != 0) {
+ g_free(value);
+ continue;
+ }
+ g_free(value);
+ secretValue = secret_item_get_secret(l->data);
+ if (secret != NULL) {
+ *secret = strdup(secret_value_get(secretValue, &length));
+ secret_value_unref(secretValue);
+ }
+ *username = get_attribute("username", l->data);
+ }
+ g_list_free_full(items, g_object_unref);
+ }
+ g_object_unref(service);
+ }
+ g_hash_table_unref(attributes);
+ if (err != NULL) {
+ return err;
+ }
+ return NULL;
+}
+
+GError *list(char *ref_label, char *** paths, char *** accts, unsigned int *list_l) {
+ GList *items;
+ GError *err = NULL;
+ SecretService *service;
+ SecretSearchFlags flags = SECRET_SEARCH_LOAD_SECRETS | SECRET_SEARCH_ALL | SECRET_SEARCH_UNLOCK;
+ GHashTable *attributes = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, g_free);
+
+ // List credentials with the right label only
+ g_hash_table_insert(attributes, g_strdup("label"), g_strdup(ref_label));
+
+ service = secret_service_get_sync(SECRET_SERVICE_NONE, NULL, &err);
+ if (err != NULL) {
+ return err;
+ }
+
+ items = secret_service_search_sync(service, NULL, attributes, flags, NULL, &err);
+ int numKeys = g_list_length(items);
+ if (err != NULL) {
+ return err;
+ }
+
+ char **tmp_paths = (char **) calloc(1,(int)sizeof(char *)*numKeys);
+ char **tmp_accts = (char **) calloc(1,(int)sizeof(char *)*numKeys);
+
+ // items now contains our keys from the gnome keyring
+ // we will now put it in our two lists to return it to go
+ GList *current;
+ int listNumber = 0;
+ for(current = items; current!=NULL; current = current->next) {
+ char *pathTmp = secret_item_get_label(current->data);
+ // you cannot have a key without a label in the gnome keyring
+ char *acctTmp = get_attribute("username",current->data);
+ if (acctTmp==NULL) {
+ acctTmp = "account not defined";
+ }
+
+ tmp_paths[listNumber] = (char *) calloc(1, sizeof(char)*(strlen(pathTmp)+1));
+ tmp_accts[listNumber] = (char *) calloc(1, sizeof(char)*(strlen(acctTmp)+1));
+
+ memcpy(tmp_paths[listNumber], pathTmp, sizeof(char)*(strlen(pathTmp)+1));
+ memcpy(tmp_accts[listNumber], acctTmp, sizeof(char)*(strlen(acctTmp)+1));
+
+ listNumber = listNumber + 1;
+ }
+
+ *paths = (char **) realloc(tmp_paths, (int)sizeof(char *)*listNumber);
+ *accts = (char **) realloc(tmp_accts, (int)sizeof(char *)*listNumber);
+
+ *list_l = listNumber;
+
+ return NULL;
+}
+
+void freeListData(char *** data, unsigned int length) {
+ int i;
+ for(i=0; i<length; i++) {
+ free((*data)[i]);
+ }
+ free(*data);
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.go b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.go
new file mode 100644
index 000000000..95a1310b6
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.go
@@ -0,0 +1,118 @@
+package secretservice
+
+/*
+#cgo pkg-config: libsecret-1
+
+#include "secretservice_linux.h"
+#include <stdlib.h>
+*/
+import "C"
+import (
+ "errors"
+ "unsafe"
+
+ "github.com/docker/docker-credential-helpers/credentials"
+)
+
+// Secretservice handles secrets using Linux secret-service as a store.
+type Secretservice struct{}
+
+// Add adds new credentials to the keychain.
+func (h Secretservice) Add(creds *credentials.Credentials) error {
+ if creds == nil {
+ return errors.New("missing credentials")
+ }
+ credsLabel := C.CString(credentials.CredsLabel)
+ defer C.free(unsafe.Pointer(credsLabel))
+ server := C.CString(creds.ServerURL)
+ defer C.free(unsafe.Pointer(server))
+ username := C.CString(creds.Username)
+ defer C.free(unsafe.Pointer(username))
+ secret := C.CString(creds.Secret)
+ defer C.free(unsafe.Pointer(secret))
+
+ if err := C.add(credsLabel, server, username, secret); err != nil {
+ defer C.g_error_free(err)
+ errMsg := (*C.char)(unsafe.Pointer(err.message))
+ return errors.New(C.GoString(errMsg))
+ }
+ return nil
+}
+
+// Delete removes credentials from the store.
+func (h Secretservice) Delete(serverURL string) error {
+ if serverURL == "" {
+ return errors.New("missing server url")
+ }
+ server := C.CString(serverURL)
+ defer C.free(unsafe.Pointer(server))
+
+ if err := C.delete(server); err != nil {
+ defer C.g_error_free(err)
+ errMsg := (*C.char)(unsafe.Pointer(err.message))
+ return errors.New(C.GoString(errMsg))
+ }
+ return nil
+}
+
+// Get returns the username and secret to use for a given registry server URL.
+func (h Secretservice) Get(serverURL string) (string, string, error) {
+ if serverURL == "" {
+ return "", "", errors.New("missing server url")
+ }
+ var username *C.char
+ defer C.free(unsafe.Pointer(username))
+ var secret *C.char
+ defer C.free(unsafe.Pointer(secret))
+ server := C.CString(serverURL)
+ defer C.free(unsafe.Pointer(server))
+
+ err := C.get(server, &username, &secret)
+ if err != nil {
+ defer C.g_error_free(err)
+ errMsg := (*C.char)(unsafe.Pointer(err.message))
+ return "", "", errors.New(C.GoString(errMsg))
+ }
+ user := C.GoString(username)
+ pass := C.GoString(secret)
+ if pass == "" {
+ return "", "", credentials.NewErrCredentialsNotFound()
+ }
+ return user, pass, nil
+}
+
+// List returns the stored URLs and corresponding usernames for a given credentials label
+func (h Secretservice) List() (map[string]string, error) {
+ credsLabelC := C.CString(credentials.CredsLabel)
+ defer C.free(unsafe.Pointer(credsLabelC))
+
+ var pathsC **C.char
+ defer C.free(unsafe.Pointer(pathsC))
+ var acctsC **C.char
+ defer C.free(unsafe.Pointer(acctsC))
+ var listLenC C.uint
+ err := C.list(credsLabelC, &pathsC, &acctsC, &listLenC)
+ if err != nil {
+ defer C.free(unsafe.Pointer(err))
+ return nil, errors.New("Error from list function in secretservice_linux.c likely due to error in secretservice library")
+ }
+ defer C.freeListData(&pathsC, listLenC)
+ defer C.freeListData(&acctsC, listLenC)
+
+ resp := make(map[string]string)
+
+ listLen := int(listLenC)
+ if listLen == 0 {
+ return resp, nil
+ }
+ // The maximum capacity of the following two slices is limited to (2^29)-1 to remain compatible
+ // with 32-bit platforms. The size of a `*C.char` (a pointer) is 4 Byte on a 32-bit system
+ // and (2^29)*4 == math.MaxInt32 + 1. -- See issue golang/go#13656
+ pathTmp := (*[(1 << 29) - 1]*C.char)(unsafe.Pointer(pathsC))[:listLen:listLen]
+ acctTmp := (*[(1 << 29) - 1]*C.char)(unsafe.Pointer(acctsC))[:listLen:listLen]
+ for i := 0; i < listLen; i++ {
+ resp[C.GoString(pathTmp[i])] = C.GoString(acctTmp[i])
+ }
+
+ return resp, nil
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.h b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.h
new file mode 100644
index 000000000..a28179db3
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.h
@@ -0,0 +1,13 @@
+#define SECRET_WITH_UNSTABLE 1
+#define SECRET_API_SUBJECT_TO_CHANGE 1
+#include <libsecret/secret.h>
+
+const SecretSchema *docker_get_schema(void) G_GNUC_CONST;
+
+#define DOCKER_SCHEMA docker_get_schema()
+
+GError *add(char *label, char *server, char *username, char *secret);
+GError *delete(char *server);
+GError *get(char *server, char **username, char **secret);
+GError *list(char *label, char *** paths, char *** accts, unsigned int *list_l);
+void freeListData(char *** data, unsigned int length);
diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE
new file mode 100644
index 000000000..9c8e20ab8
--- /dev/null
+++ b/vendor/github.com/docker/docker/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2013-2017 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE
new file mode 100644
index 000000000..0c74e15b0
--- /dev/null
+++ b/vendor/github.com/docker/docker/NOTICE
@@ -0,0 +1,19 @@
+Docker
+Copyright 2012-2017 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (https://www.docker.com).
+
+This product contains software (https://github.com/kr/pty) developed
+by Keith Rarick, licensed under the MIT License.
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see https://www.bis.doc.gov
+
+See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/vendor/github.com/docker/docker/README.md b/vendor/github.com/docker/docker/README.md
new file mode 100644
index 000000000..533d7717d
--- /dev/null
+++ b/vendor/github.com/docker/docker/README.md
@@ -0,0 +1,90 @@
+### Docker users, see [Moby and Docker](https://mobyproject.org/#moby-and-docker) to clarify the relationship between the projects
+
+### Docker maintainers and contributors, see [Transitioning to Moby](#transitioning-to-moby) for more details
+
+The Moby Project
+================
+
+![Moby Project logo](docs/static_files/moby-project-logo.png "The Moby Project")
+
+Moby is an open-source project created by Docker to advance the software containerization movement.
+It provides a “Lego set” of dozens of components, the framework for assembling them into custom container-based systems, and a place for all container enthusiasts to experiment and exchange ideas.
+
+# Moby
+
+## Overview
+
+At the core of Moby is a framework to assemble specialized container systems.
+It provides:
+
+- A library of containerized components for all vital aspects of a container system: OS, container runtime, orchestration, infrastructure management, networking, storage, security, build, image distribution, etc.
+- Tools to assemble the components into runnable artifacts for a variety of platforms and architectures: bare metal (both x86 and Arm); executables for Linux, Mac and Windows; VM images for popular cloud and virtualization providers.
+- A set of reference assemblies which can be used as-is, modified, or used as inspiration to create your own.
+
+All Moby components are containers, so creating new components is as easy as building a new OCI-compatible container.
+
+## Principles
+
+Moby is an open project guided by strong principles, but modular, flexible and without too strong an opinion on user experience, so it is open to the community to help set its direction.
+The guiding principles are:
+
+- Batteries included but swappable: Moby includes enough components to build fully featured container system, but its modular architecture ensures that most of the components can be swapped by different implementations.
+- Usable security: Moby will provide secure defaults without compromising usability.
+- Container centric: Moby is built with containers, for running containers.
+
+With Moby, you should be able to describe all the components of your distributed application, from the high-level configuration files down to the kernel you would like to use and build and deploy it easily.
+
+Moby uses [containerd](https://github.com/containerd/containerd) as the default container runtime.
+
+## Audience
+
+Moby is recommended for anyone who wants to assemble a container-based system. This includes:
+
+- Hackers who want to customize or patch their Docker build
+- System engineers or integrators building a container system
+- Infrastructure providers looking to adapt existing container systems to their environment
+- Container enthusiasts who want to experiment with the latest container tech
+- Open-source developers looking to test their project in a variety of different systems
+- Anyone curious about Docker internals and how it’s built
+
+Moby is NOT recommended for:
+
+- Application developers looking for an easy way to run their applications in containers. We recommend Docker CE instead.
+- Enterprise IT and development teams looking for a ready-to-use, commercially supported container platform. We recommend Docker EE instead.
+- Anyone curious about containers and looking for an easy way to learn. We recommend the [docker.com](https://www.docker.com/) website instead.
+
+# Transitioning to Moby
+
+Docker is transitioning all of its open source collaborations to the Moby project going forward.
+During the transition, all open source activity should continue as usual.
+
+We are proposing the following list of changes:
+
+- splitting up the engine into more open components
+- removing the docker UI, SDK etc to keep them in the Docker org
+- clarifying that the project is not limited to the engine, but to the assembly of all the individual components of the Docker platform
+- open-source new tools & components which we currently use to assemble the Docker product, but could benefit the community
+- defining an open, community-centric governance inspired by the Fedora project (a very successful example of balancing the needs of the community with the constraints of the primary corporate sponsor)
+
+-----
+
+Legal
+=====
+
+*Brought to you courtesy of our legal counsel. For more context,
+please see the [NOTICE](https://github.com/moby/moby/blob/master/NOTICE) document in this repo.*
+
+Use and transfer of Moby may be subject to certain restrictions by the
+United States and other governments.
+
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see https://www.bis.doc.gov
+
+
+Licensing
+=========
+Moby is licensed under the Apache License, Version 2.0. See
+[LICENSE](https://github.com/moby/moby/blob/master/LICENSE) for the full
+license text.
diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md
new file mode 100644
index 000000000..bb8813252
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/README.md
@@ -0,0 +1,42 @@
+# Working on the Engine API
+
+The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon.
+
+It consists of various components in this repository:
+
+- `api/swagger.yaml` A Swagger definition of the API.
+- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this.
+- `cli/` The command-line client.
+- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs.
+- `daemon/` The daemon, which serves the API.
+
+## Swagger definition
+
+The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to:
+
+1. Automatically generate documentation.
+2. Automatically generate the Go server and client. (A work-in-progress.)
+3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc.
+
+## Updating the API documentation
+
+The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, you'll need to edit this file to represent the change in the documentation.
+
+The file is split into two main sections:
+
+- `definitions`, which defines re-usable objects used in requests and responses
+- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable)
+
+To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section.
+
+There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919)
+
+`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful for when you are making edits to ensure you are doing the right thing.
+
+## Viewing the API documentation
+
+When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly.
+
+Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation.
+
+The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io).
diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go
new file mode 100644
index 000000000..6e462aeda
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/common.go
@@ -0,0 +1,65 @@
+package api
+
+import (
+ "encoding/json"
+ "encoding/pem"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/docker/pkg/system"
+ "github.com/docker/libtrust"
+)
+
+// Common constants for daemon and client.
+const (
+ // DefaultVersion of Current REST API
+ DefaultVersion string = "1.32"
+
+ // NoBaseImageSpecifier is the symbol used by the FROM
+ // command to specify that no base image is to be used.
+ NoBaseImageSpecifier string = "scratch"
+)
+
+// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
+// otherwise generates a new one
+func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
+ err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700, "")
+ if err != nil {
+ return nil, err
+ }
+ trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
+ if err == libtrust.ErrKeyFileDoesNotExist {
+ trustKey, err = libtrust.GenerateECP256PrivateKey()
+ if err != nil {
+ return nil, fmt.Errorf("Error generating key: %s", err)
+ }
+ encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath))
+ if err != nil {
+ return nil, fmt.Errorf("Error serializing key: %s", err)
+ }
+ if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil {
+ return nil, fmt.Errorf("Error saving key file: %s", err)
+ }
+ } else if err != nil {
+ return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err)
+ }
+ return trustKey, nil
+}
+
+func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) {
+ if ext == ".json" || ext == ".jwk" {
+ encoded, err = json.Marshal(key)
+ if err != nil {
+ return nil, fmt.Errorf("unable to encode private key JWK: %s", err)
+ }
+ } else {
+ pemBlock, err := key.PEMBlock()
+ if err != nil {
+ return nil, fmt.Errorf("unable to encode private key PEM: %s", err)
+ }
+ encoded = pem.EncodeToMemory(pemBlock)
+ }
+ return
+}
diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go
new file mode 100644
index 000000000..081e61c45
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/common_unix.go
@@ -0,0 +1,6 @@
+// +build !windows
+
+package api
+
+// MinVersion represents Minimum REST API version supported
+const MinVersion string = "1.12"
diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go
new file mode 100644
index 000000000..a6268a4ff
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/common_windows.go
@@ -0,0 +1,8 @@
+package api
+
+// MinVersion represents Minimum REST API version supported
+// Technically the first daemon API version released on Windows is v1.25 in
+// engine version 1.13. However, some clients are explicitly using downlevel
+// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive.
+// Hence also allowing 1.24 on Windows.
+const MinVersion string = "1.24"
diff --git a/vendor/github.com/docker/docker/api/names.go b/vendor/github.com/docker/docker/api/names.go
new file mode 100644
index 000000000..f147d1f4c
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/names.go
@@ -0,0 +1,9 @@
+package api
+
+import "regexp"
+
+// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names.
+const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
+
+// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters.
+var RestrictedNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`)
diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go
new file mode 100644
index 000000000..056af6b84
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/auth.go
@@ -0,0 +1,22 @@
+package types
+
+// AuthConfig contains authorization information for connecting to a Registry
+type AuthConfig struct {
+ Username string `json:"username,omitempty"`
+ Password string `json:"password,omitempty"`
+ Auth string `json:"auth,omitempty"`
+
+ // Email is an optional value associated with the username.
+ // This field is deprecated and will be removed in a later
+ // version of docker.
+ Email string `json:"email,omitempty"`
+
+ ServerAddress string `json:"serveraddress,omitempty"`
+
+ // IdentityToken is used to authenticate the user and get
+ // an access token for the registry.
+ IdentityToken string `json:"identitytoken,omitempty"`
+
+ // RegistryToken is a bearer token to be sent to a registry
+ RegistryToken string `json:"registrytoken,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go
new file mode 100644
index 000000000..931ae10ab
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go
@@ -0,0 +1,23 @@
+package blkiodev
+
+import "fmt"
+
+// WeightDevice is a structure that holds device:weight pair
+type WeightDevice struct {
+ Path string
+ Weight uint16
+}
+
+func (w *WeightDevice) String() string {
+ return fmt.Sprintf("%s:%d", w.Path, w.Weight)
+}
+
+// ThrottleDevice is a structure that holds device:rate_per_second pair
+type ThrottleDevice struct {
+ Path string
+ Rate uint64
+}
+
+func (t *ThrottleDevice) String() string {
+ return fmt.Sprintf("%s:%d", t.Path, t.Rate)
+}
diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go
new file mode 100644
index 000000000..18a1263f1
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/client.go
@@ -0,0 +1,389 @@
+package types
+
+import (
+ "bufio"
+ "io"
+ "net"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ units "github.com/docker/go-units"
+)
+
+// CheckpointCreateOptions holds parameters to create a checkpoint from a container
+type CheckpointCreateOptions struct {
+ CheckpointID string
+ CheckpointDir string
+ Exit bool
+}
+
+// CheckpointListOptions holds parameters to list checkpoints for a container
+type CheckpointListOptions struct {
+ CheckpointDir string
+}
+
+// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container
+type CheckpointDeleteOptions struct {
+ CheckpointID string
+ CheckpointDir string
+}
+
+// ContainerAttachOptions holds parameters to attach to a container.
+type ContainerAttachOptions struct {
+ Stream bool
+ Stdin bool
+ Stdout bool
+ Stderr bool
+ DetachKeys string
+ Logs bool
+}
+
+// ContainerCommitOptions holds parameters to commit changes into a container.
+type ContainerCommitOptions struct {
+ Reference string
+ Comment string
+ Author string
+ Changes []string
+ Pause bool
+ Config *container.Config
+}
+
+// ContainerExecInspect holds information returned by exec inspect.
+type ContainerExecInspect struct {
+ ExecID string
+ ContainerID string
+ Running bool
+ ExitCode int
+ Pid int
+}
+
+// ContainerListOptions holds parameters to list containers with.
+type ContainerListOptions struct {
+ Quiet bool
+ Size bool
+ All bool
+ Latest bool
+ Since string
+ Before string
+ Limit int
+ Filters filters.Args
+}
+
+// ContainerLogsOptions holds parameters to filter logs with.
+type ContainerLogsOptions struct {
+ ShowStdout bool
+ ShowStderr bool
+ Since string
+ Timestamps bool
+ Follow bool
+ Tail string
+ Details bool
+}
+
+// ContainerRemoveOptions holds parameters to remove containers.
+type ContainerRemoveOptions struct {
+ RemoveVolumes bool
+ RemoveLinks bool
+ Force bool
+}
+
+// ContainerStartOptions holds parameters to start containers.
+type ContainerStartOptions struct {
+ CheckpointID string
+ CheckpointDir string
+}
+
+// CopyToContainerOptions holds information
+// about files to copy into a container
+type CopyToContainerOptions struct {
+ AllowOverwriteDirWithFile bool
+ CopyUIDGID bool
+}
+
+// EventsOptions holds parameters to filter events with.
+type EventsOptions struct {
+ Since string
+ Until string
+ Filters filters.Args
+}
+
+// NetworkListOptions holds parameters to filter the list of networks with.
+type NetworkListOptions struct {
+ Filters filters.Args
+}
+
+// HijackedResponse holds connection information for a hijacked request.
+type HijackedResponse struct {
+ Conn net.Conn
+ Reader *bufio.Reader
+}
+
+// Close closes the hijacked connection and reader.
+func (h *HijackedResponse) Close() {
+ h.Conn.Close()
+}
+
+// CloseWriter is an interface that implements structs
+// that close input streams to prevent from writing.
+type CloseWriter interface {
+ CloseWrite() error
+}
+
+// CloseWrite closes a readWriter for writing.
+func (h *HijackedResponse) CloseWrite() error {
+ if conn, ok := h.Conn.(CloseWriter); ok {
+ return conn.CloseWrite()
+ }
+ return nil
+}
+
+// ImageBuildOptions holds the information
+// necessary to build images.
+type ImageBuildOptions struct {
+ Tags []string
+ SuppressOutput bool
+ RemoteContext string
+ NoCache bool
+ Remove bool
+ ForceRemove bool
+ PullParent bool
+ Isolation container.Isolation
+ CPUSetCPUs string
+ CPUSetMems string
+ CPUShares int64
+ CPUQuota int64
+ CPUPeriod int64
+ Memory int64
+ MemorySwap int64
+ CgroupParent string
+ NetworkMode string
+ ShmSize int64
+ Dockerfile string
+ Ulimits []*units.Ulimit
+ // BuildArgs needs to be a *string instead of just a string so that
+ // we can tell the difference between "" (empty string) and no value
+ // at all (nil). See the parsing of buildArgs in
+ // api/server/router/build/build_routes.go for even more info.
+ BuildArgs map[string]*string
+ AuthConfigs map[string]AuthConfig
+ Context io.Reader
+ Labels map[string]string
+ // squash the resulting image's layers to the parent
+ // preserves the original image and creates a new one from the parent with all
+ // the changes applied to a single layer
+ Squash bool
+ // CacheFrom specifies images that are used for matching cache. Images
+ // specified here do not need to have a valid parent chain to match cache.
+ CacheFrom []string
+ SecurityOpt []string
+ ExtraHosts []string // List of extra hosts
+ Target string
+ SessionID string
+
+ // TODO @jhowardmsft LCOW Support: This will require extending to include
+ // `Platform string`, but is ommited for now as it's hard-coded temporarily
+ // to avoid API changes.
+}
+
+// ImageBuildResponse holds information
+// returned by a server after building
+// an image.
+type ImageBuildResponse struct {
+ Body io.ReadCloser
+ OSType string
+}
+
+// ImageCreateOptions holds information to create images.
+type ImageCreateOptions struct {
+ RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
+}
+
+// ImageImportSource holds source information for ImageImport
+type ImageImportSource struct {
+ Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
+ SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
+}
+
+// ImageImportOptions holds information to import images from the client host.
+type ImageImportOptions struct {
+ Tag string // Tag is the name to tag this image with. This attribute is deprecated.
+ Message string // Message is the message to tag the image with
+ Changes []string // Changes are the raw changes to apply to this image
+}
+
+// ImageListOptions holds parameters to filter the list of images with.
+type ImageListOptions struct {
+ All bool
+ Filters filters.Args
+}
+
+// ImageLoadResponse returns information to the client about a load process.
+type ImageLoadResponse struct {
+ // Body must be closed to avoid a resource leak
+ Body io.ReadCloser
+ JSON bool
+}
+
+// ImagePullOptions holds information to pull images.
+type ImagePullOptions struct {
+ All bool
+ RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
+ PrivilegeFunc RequestPrivilegeFunc
+}
+
+// RequestPrivilegeFunc is a function interface that
+// clients can supply to retry operations after
+// getting an authorization error.
+// This function returns the registry authentication
+// header value in base 64 format, or an error
+// if the privilege request fails.
+type RequestPrivilegeFunc func() (string, error)
+
+//ImagePushOptions holds information to push images.
+type ImagePushOptions ImagePullOptions
+
+// ImageRemoveOptions holds parameters to remove images.
+type ImageRemoveOptions struct {
+ Force bool
+ PruneChildren bool
+}
+
+// ImageSearchOptions holds parameters to search images with.
+type ImageSearchOptions struct {
+ RegistryAuth string
+ PrivilegeFunc RequestPrivilegeFunc
+ Filters filters.Args
+ Limit int
+}
+
+// ResizeOptions holds parameters to resize a tty.
+// It can be used to resize container ttys and
+// exec process ttys too.
+type ResizeOptions struct {
+ Height uint
+ Width uint
+}
+
+// NodeListOptions holds parameters to list nodes with.
+type NodeListOptions struct {
+ Filters filters.Args
+}
+
+// NodeRemoveOptions holds parameters to remove nodes with.
+type NodeRemoveOptions struct {
+ Force bool
+}
+
+// ServiceCreateOptions contains the options to use when creating a service.
+type ServiceCreateOptions struct {
+ // EncodedRegistryAuth is the encoded registry authorization credentials to
+ // use when updating the service.
+ //
+ // This field follows the format of the X-Registry-Auth header.
+ EncodedRegistryAuth string
+
+ // QueryRegistry indicates whether the service update requires
+ // contacting a registry. A registry may be contacted to retrieve
+ // the image digest and manifest, which in turn can be used to update
+ // platform or other information about the service.
+ QueryRegistry bool
+}
+
+// ServiceCreateResponse contains the information returned to a client
+// on the creation of a new service.
+type ServiceCreateResponse struct {
+ // ID is the ID of the created service.
+ ID string
+ // Warnings is a set of non-fatal warning messages to pass on to the user.
+ Warnings []string `json:",omitempty"`
+}
+
+// Values for RegistryAuthFrom in ServiceUpdateOptions
+const (
+ RegistryAuthFromSpec = "spec"
+ RegistryAuthFromPreviousSpec = "previous-spec"
+)
+
+// ServiceUpdateOptions contains the options to be used for updating services.
+type ServiceUpdateOptions struct {
+ // EncodedRegistryAuth is the encoded registry authorization credentials to
+ // use when updating the service.
+ //
+ // This field follows the format of the X-Registry-Auth header.
+ EncodedRegistryAuth string
+
+ // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
+ // into this field. While it does open API users up to racy writes, most
+ // users may not need that level of consistency in practice.
+
+ // RegistryAuthFrom specifies where to find the registry authorization
+ // credentials if they are not given in EncodedRegistryAuth. Valid
+ // values are "spec" and "previous-spec".
+ RegistryAuthFrom string
+
+ // Rollback indicates whether a server-side rollback should be
+ // performed. When this is set, the provided spec will be ignored.
+ // The valid values are "previous" and "none". An empty value is the
+ // same as "none".
+ Rollback string
+
+ // QueryRegistry indicates whether the service update requires
+ // contacting a registry. A registry may be contacted to retrieve
+ // the image digest and manifest, which in turn can be used to update
+ // platform or other information about the service.
+ QueryRegistry bool
+}
+
+// ServiceListOptions holds parameters to list services with.
+type ServiceListOptions struct {
+ Filters filters.Args
+}
+
+// ServiceInspectOptions holds parameters related to the "service inspect"
+// operation.
+type ServiceInspectOptions struct {
+ InsertDefaults bool
+}
+
+// TaskListOptions holds parameters to list tasks with.
+type TaskListOptions struct {
+ Filters filters.Args
+}
+
+// PluginRemoveOptions holds parameters to remove plugins.
+type PluginRemoveOptions struct {
+ Force bool
+}
+
+// PluginEnableOptions holds parameters to enable plugins.
+type PluginEnableOptions struct {
+ Timeout int
+}
+
+// PluginDisableOptions holds parameters to disable plugins.
+type PluginDisableOptions struct {
+ Force bool
+}
+
+// PluginInstallOptions holds parameters to install a plugin.
+type PluginInstallOptions struct {
+ Disabled bool
+ AcceptAllPermissions bool
+ RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
+ RemoteRef string // RemoteRef is the plugin name on the registry
+ PrivilegeFunc RequestPrivilegeFunc
+ AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
+ Args []string
+}
+
+// SwarmUnlockKeyResponse contains the response for Engine API:
+// GET /swarm/unlockkey
+type SwarmUnlockKeyResponse struct {
+ // UnlockKey is the unlock key in ASCII-armored format.
+ UnlockKey string
+}
+
+// PluginCreateOptions hold all options to plugin create.
+type PluginCreateOptions struct {
+ RepoName string
+}
diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go
new file mode 100644
index 000000000..e4d2ce6e3
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/configs.go
@@ -0,0 +1,70 @@
+package types
+
+import (
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/network"
+)
+
+// configs holds structs used for internal communication between the
+// frontend (such as an http server) and the backend (such as the
+// docker daemon).
+
+// ContainerCreateConfig is the parameter set to ContainerCreate()
+type ContainerCreateConfig struct {
+ Name string
+ Config *container.Config
+ HostConfig *container.HostConfig
+ NetworkingConfig *network.NetworkingConfig
+ AdjustCPUShares bool
+ Platform string
+}
+
+// ContainerRmConfig holds arguments for the container remove
+// operation. This struct is used to tell the backend what operations
+// to perform.
+type ContainerRmConfig struct {
+ ForceRemove, RemoveVolume, RemoveLink bool
+}
+
+// ContainerCommitConfig contains build configs for commit operation,
+// and is used when making a commit with the current state of the container.
+type ContainerCommitConfig struct {
+ Pause bool
+ Repo string
+ Tag string
+ Author string
+ Comment string
+ // merge container config into commit config before commit
+ MergeConfigs bool
+ Config *container.Config
+}
+
+// ExecConfig is a small subset of the Config struct that holds the configuration
+// for the exec feature of docker.
+type ExecConfig struct {
+ User string // User that will run the command
+ Privileged bool // Is the container in privileged mode
+ Tty bool // Attach standard streams to a tty.
+ AttachStdin bool // Attach the standard input, makes possible user interaction
+ AttachStderr bool // Attach the standard error
+ AttachStdout bool // Attach the standard output
+ Detach bool // Execute in detach mode
+ DetachKeys string // Escape keys for detach
+ Env []string // Environment variables
+ Cmd []string // Execution commands and args
+}
+
+// PluginRmConfig holds arguments for plugin remove.
+type PluginRmConfig struct {
+ ForceRemove bool
+}
+
+// PluginEnableConfig holds arguments for plugin enable
+type PluginEnableConfig struct {
+ Timeout int
+}
+
+// PluginDisableConfig holds arguments for plugin disable.
+type PluginDisableConfig struct {
+ ForceDisable bool
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go
new file mode 100644
index 000000000..55a03fc98
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/config.go
@@ -0,0 +1,69 @@
+package container
+
+import (
+ "time"
+
+ "github.com/docker/docker/api/types/strslice"
+ "github.com/docker/go-connections/nat"
+)
+
+// MinimumDuration puts a minimum on user configured duration.
+// This is to prevent API error on time unit. For example, API may
+// set 3 as healthcheck interval with intention of 3 seconds, but
+// Docker interprets it as 3 nanoseconds.
+const MinimumDuration = 1 * time.Millisecond
+
+// HealthConfig holds configuration settings for the HEALTHCHECK feature.
+type HealthConfig struct {
+ // Test is the test to perform to check that the container is healthy.
+ // An empty slice means to inherit the default.
+ // The options are:
+ // {} : inherit healthcheck
+ // {"NONE"} : disable healthcheck
+ // {"CMD", args...} : exec arguments directly
+ // {"CMD-SHELL", command} : run command with system's default shell
+ Test []string `json:",omitempty"`
+
+ // Zero means to inherit. Durations are expressed as integer nanoseconds.
+ Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
+ Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
+ StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
+
+ // Retries is the number of consecutive failures needed to consider a container as unhealthy.
+ // Zero means inherit.
+ Retries int `json:",omitempty"`
+}
+
+// Config contains the configuration data about a container.
+// It should hold only portable information about the container.
+// Here, "portable" means "independent from the host we are running on".
+// Non-portable information *should* appear in HostConfig.
+// All fields added to this struct must be marked `omitempty` to keep getting
+// predictable hashes from the old `v1Compatibility` configuration.
+type Config struct {
+ Hostname string // Hostname
+ Domainname string // Domainname
+ User string // User that will run the command(s) inside the container, also support user:group
+ AttachStdin bool // Attach the standard input, makes possible user interaction
+ AttachStdout bool // Attach the standard output
+ AttachStderr bool // Attach the standard error
+ ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports
+ Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
+ OpenStdin bool // Open stdin
+ StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
+ Env []string // List of environment variable to set in the container
+ Cmd strslice.StrSlice // Command to run when starting the container
+ Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
+ ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
+ Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
+ Volumes map[string]struct{} // List of volumes (mounts) used for the container
+ WorkingDir string // Current directory (PWD) in the command will be launched
+ Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
+ NetworkDisabled bool `json:",omitempty"` // Is network disabled
+ MacAddress string `json:",omitempty"` // Mac Address of the container
+ OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
+ Labels map[string]string // List of labels set to this container
+ StopSignal string `json:",omitempty"` // Signal to stop a container
+ StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
+ Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/container_changes.go b/vendor/github.com/docker/docker/api/types/container/container_changes.go
new file mode 100644
index 000000000..767945a53
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/container_changes.go
@@ -0,0 +1,21 @@
+package container
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// ContainerChangeResponseItem container change response item
+// swagger:model ContainerChangeResponseItem
+type ContainerChangeResponseItem struct {
+
+ // Kind of change
+ // Required: true
+ Kind uint8 `json:"Kind"`
+
+ // Path to file that has changed
+ // Required: true
+ Path string `json:"Path"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go
new file mode 100644
index 000000000..c95023b81
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/container_create.go
@@ -0,0 +1,21 @@
+package container
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// ContainerCreateCreatedBody container create created body
+// swagger:model ContainerCreateCreatedBody
+type ContainerCreateCreatedBody struct {
+
+ // The ID of the created container
+ // Required: true
+ ID string `json:"Id"`
+
+ // Warnings encountered when creating the container
+ // Required: true
+ Warnings []string `json:"Warnings"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go
new file mode 100644
index 000000000..78bc37ee5
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/container_top.go
@@ -0,0 +1,21 @@
+package container
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// ContainerTopOKBody container top o k body
+// swagger:model ContainerTopOKBody
+type ContainerTopOKBody struct {
+
+ // Each process running in the container, where each is process is an array of values corresponding to the titles
+ // Required: true
+ Processes [][]string `json:"Processes"`
+
+ // The ps column titles
+ // Required: true
+ Titles []string `json:"Titles"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go
new file mode 100644
index 000000000..2339366fb
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/container_update.go
@@ -0,0 +1,17 @@
+package container
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// ContainerUpdateOKBody container update o k body
+// swagger:model ContainerUpdateOKBody
+type ContainerUpdateOKBody struct {
+
+ // warnings
+ // Required: true
+ Warnings []string `json:"Warnings"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go
new file mode 100644
index 000000000..77ecdbaf7
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/container_wait.go
@@ -0,0 +1,17 @@
+package container
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// ContainerWaitOKBody container wait o k body
+// swagger:model ContainerWaitOKBody
+type ContainerWaitOKBody struct {
+
+ // Exit code of the container
+ // Required: true
+ StatusCode int64 `json:"StatusCode"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go
new file mode 100644
index 000000000..9fea9eb04
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/host_config.go
@@ -0,0 +1,380 @@
+package container
+
+import (
+ "strings"
+
+ "github.com/docker/docker/api/types/blkiodev"
+ "github.com/docker/docker/api/types/mount"
+ "github.com/docker/docker/api/types/strslice"
+ "github.com/docker/go-connections/nat"
+ "github.com/docker/go-units"
+)
+
+// Isolation represents the isolation technology of a container. The supported
+// values are platform specific
+type Isolation string
+
+// IsDefault indicates the default isolation technology of a container. On Linux this
+// is the native driver. On Windows, this is a Windows Server Container.
+func (i Isolation) IsDefault() bool {
+ return strings.ToLower(string(i)) == "default" || string(i) == ""
+}
+
+// IpcMode represents the container ipc stack.
+type IpcMode string
+
+// IsPrivate indicates whether the container uses its private ipc stack.
+func (n IpcMode) IsPrivate() bool {
+ return !(n.IsHost() || n.IsContainer())
+}
+
+// IsHost indicates whether the container uses the host's ipc stack.
+func (n IpcMode) IsHost() bool {
+ return n == "host"
+}
+
+// IsContainer indicates whether the container uses a container's ipc stack.
+func (n IpcMode) IsContainer() bool {
+ parts := strings.SplitN(string(n), ":", 2)
+ return len(parts) > 1 && parts[0] == "container"
+}
+
+// Valid indicates whether the ipc stack is valid.
+func (n IpcMode) Valid() bool {
+ parts := strings.Split(string(n), ":")
+ switch mode := parts[0]; mode {
+ case "", "host":
+ case "container":
+ if len(parts) != 2 || parts[1] == "" {
+ return false
+ }
+ default:
+ return false
+ }
+ return true
+}
+
+// Container returns the name of the container ipc stack is going to be used.
+func (n IpcMode) Container() string {
+ parts := strings.SplitN(string(n), ":", 2)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ return ""
+}
+
+// NetworkMode represents the container network stack.
+type NetworkMode string
+
+// IsNone indicates whether container isn't using a network stack.
+func (n NetworkMode) IsNone() bool {
+ return n == "none"
+}
+
+// IsDefault indicates whether container uses the default network stack.
+func (n NetworkMode) IsDefault() bool {
+ return n == "default"
+}
+
+// IsPrivate indicates whether container uses its private network stack.
+func (n NetworkMode) IsPrivate() bool {
+ return !(n.IsHost() || n.IsContainer())
+}
+
+// IsContainer indicates whether container uses a container network stack.
+func (n NetworkMode) IsContainer() bool {
+ parts := strings.SplitN(string(n), ":", 2)
+ return len(parts) > 1 && parts[0] == "container"
+}
+
+// ConnectedContainer is the id of the container which network this container is connected to.
+func (n NetworkMode) ConnectedContainer() string {
+ parts := strings.SplitN(string(n), ":", 2)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ return ""
+}
+
+//UserDefined indicates user-created network
+func (n NetworkMode) UserDefined() string {
+ if n.IsUserDefined() {
+ return string(n)
+ }
+ return ""
+}
+
+// UsernsMode represents userns mode in the container.
+type UsernsMode string
+
+// IsHost indicates whether the container uses the host's userns.
+func (n UsernsMode) IsHost() bool {
+ return n == "host"
+}
+
+// IsPrivate indicates whether the container uses the a private userns.
+func (n UsernsMode) IsPrivate() bool {
+ return !(n.IsHost())
+}
+
+// Valid indicates whether the userns is valid.
+func (n UsernsMode) Valid() bool {
+ parts := strings.Split(string(n), ":")
+ switch mode := parts[0]; mode {
+ case "", "host":
+ default:
+ return false
+ }
+ return true
+}
+
+// CgroupSpec represents the cgroup to use for the container.
+type CgroupSpec string
+
+// IsContainer indicates whether the container is using another container cgroup
+func (c CgroupSpec) IsContainer() bool {
+ parts := strings.SplitN(string(c), ":", 2)
+ return len(parts) > 1 && parts[0] == "container"
+}
+
+// Valid indicates whether the cgroup spec is valid.
+func (c CgroupSpec) Valid() bool {
+ return c.IsContainer() || c == ""
+}
+
+// Container returns the name of the container whose cgroup will be used.
+func (c CgroupSpec) Container() string {
+ parts := strings.SplitN(string(c), ":", 2)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ return ""
+}
+
+// UTSMode represents the UTS namespace of the container.
+type UTSMode string
+
+// IsPrivate indicates whether the container uses its private UTS namespace.
+func (n UTSMode) IsPrivate() bool {
+ return !(n.IsHost())
+}
+
+// IsHost indicates whether the container uses the host's UTS namespace.
+func (n UTSMode) IsHost() bool {
+ return n == "host"
+}
+
+// Valid indicates whether the UTS namespace is valid.
+func (n UTSMode) Valid() bool {
+ parts := strings.Split(string(n), ":")
+ switch mode := parts[0]; mode {
+ case "", "host":
+ default:
+ return false
+ }
+ return true
+}
+
+// PidMode represents the pid namespace of the container.
+type PidMode string
+
+// IsPrivate indicates whether the container uses its own new pid namespace.
+func (n PidMode) IsPrivate() bool {
+ return !(n.IsHost() || n.IsContainer())
+}
+
+// IsHost indicates whether the container uses the host's pid namespace.
+func (n PidMode) IsHost() bool {
+ return n == "host"
+}
+
+// IsContainer indicates whether the container uses a container's pid namespace.
+func (n PidMode) IsContainer() bool {
+ parts := strings.SplitN(string(n), ":", 2)
+ return len(parts) > 1 && parts[0] == "container"
+}
+
+// Valid indicates whether the pid namespace is valid.
+func (n PidMode) Valid() bool {
+ parts := strings.Split(string(n), ":")
+ switch mode := parts[0]; mode {
+ case "", "host":
+ case "container":
+ if len(parts) != 2 || parts[1] == "" {
+ return false
+ }
+ default:
+ return false
+ }
+ return true
+}
+
+// Container returns the name of the container whose pid namespace is going to be used.
+func (n PidMode) Container() string {
+ parts := strings.SplitN(string(n), ":", 2)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ return ""
+}
+
+// DeviceMapping represents the device mapping between the host and the container.
+type DeviceMapping struct {
+ PathOnHost string
+ PathInContainer string
+ CgroupPermissions string
+}
+
+// RestartPolicy represents the restart policies of the container.
+type RestartPolicy struct {
+ Name string
+ MaximumRetryCount int
+}
+
+// IsNone indicates whether the container has the "no" restart policy.
+// This means the container will not automatically restart when exiting.
+func (rp *RestartPolicy) IsNone() bool {
+ return rp.Name == "no" || rp.Name == ""
+}
+
+// IsAlways indicates whether the container has the "always" restart policy.
+// This means the container will automatically restart regardless of the exit status.
+func (rp *RestartPolicy) IsAlways() bool {
+ return rp.Name == "always"
+}
+
+// IsOnFailure indicates whether the container has the "on-failure" restart policy.
+// This means the container will automatically restart of exiting with a non-zero exit status.
+func (rp *RestartPolicy) IsOnFailure() bool {
+ return rp.Name == "on-failure"
+}
+
+// IsUnlessStopped indicates whether the container has the
+// "unless-stopped" restart policy. This means the container will
+// automatically restart unless user has put it to stopped state.
+func (rp *RestartPolicy) IsUnlessStopped() bool {
+ return rp.Name == "unless-stopped"
+}
+
+// IsSame compares two RestartPolicy to see if they are the same
+func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool {
+ return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount
+}
+
+// LogMode is a type to define the available modes for logging
+// These modes affect how logs are handled when log messages start piling up.
+type LogMode string
+
+// Available logging modes
+const (
+ LogModeUnset = ""
+ LogModeBlocking LogMode = "blocking"
+ LogModeNonBlock LogMode = "non-blocking"
+)
+
+// LogConfig represents the logging configuration of the container.
+type LogConfig struct {
+ Type string
+ Config map[string]string
+}
+
+// Resources contains container's resources (cgroups config, ulimits...)
+type Resources struct {
+ // Applicable to all platforms
+ CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers)
+ Memory int64 // Memory limit (in bytes)
+ NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10<sup>-9</sup> CPUs.
+
+ // Applicable to UNIX platforms
+ CgroupParent string // Parent cgroup.
+ BlkioWeight uint16 // Block IO weight (relative weight vs. other containers)
+ BlkioWeightDevice []*blkiodev.WeightDevice
+ BlkioDeviceReadBps []*blkiodev.ThrottleDevice
+ BlkioDeviceWriteBps []*blkiodev.ThrottleDevice
+ BlkioDeviceReadIOps []*blkiodev.ThrottleDevice
+ BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice
+ CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period
+ CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota
+ CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period
+ CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime
+ CpusetCpus string // CpusetCpus 0-2, 0,1
+ CpusetMems string // CpusetMems 0-2, 0,1
+ Devices []DeviceMapping // List of devices to map inside the container
+ DeviceCgroupRules []string // List of rule to be added to the device cgroup
+ DiskQuota int64 // Disk limit (in bytes)
+ KernelMemory int64 // Kernel memory limit (in bytes)
+ MemoryReservation int64 // Memory soft limit (in bytes)
+ MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
+ MemorySwappiness *int64 // Tuning container memory swappiness behaviour
+ OomKillDisable *bool // Whether to disable OOM Killer or not
+ PidsLimit int64 // Setting pids limit for a container
+ Ulimits []*units.Ulimit // List of ulimits to be set in the container
+
+ // Applicable to Windows
+ CPUCount int64 `json:"CpuCount"` // CPU count
+ CPUPercent int64 `json:"CpuPercent"` // CPU percent
+ IOMaximumIOps uint64 // Maximum IOps for the container system drive
+ IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive
+}
+
+// UpdateConfig holds the mutable attributes of a Container.
+// Those attributes can be updated at runtime.
+type UpdateConfig struct {
+ // Contains container's resources (cgroups, ulimits)
+ Resources
+ RestartPolicy RestartPolicy
+}
+
+// HostConfig the non-portable Config structure of a container.
+// Here, "non-portable" means "dependent of the host we are running on".
+// Portable information *should* appear in Config.
+type HostConfig struct {
+ // Applicable to all platforms
+ Binds []string // List of volume bindings for this container
+ ContainerIDFile string // File (path) where the containerId is written
+ LogConfig LogConfig // Configuration of the logs for this container
+ NetworkMode NetworkMode // Network mode to use for the container
+ PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host
+ RestartPolicy RestartPolicy // Restart policy to be used for the container
+ AutoRemove bool // Automatically remove container when it exits
+ VolumeDriver string // Name of the volume driver used to mount volumes
+ VolumesFrom []string // List of volumes to take from other container
+
+ // Applicable to UNIX platforms
+ CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
+ CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
+ DNS []string `json:"Dns"` // List of DNS server to lookup
+ DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
+ DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
+ ExtraHosts []string // List of extra hosts
+ GroupAdd []string // List of additional groups that the container process will run as
+ IpcMode IpcMode // IPC namespace to use for the container
+ Cgroup CgroupSpec // Cgroup to use for the container
+ Links []string // List of links (in the name:alias form)
+ OomScoreAdj int // Container preference for OOM-killing
+ PidMode PidMode // PID namespace to use for the container
+ Privileged bool // Is the container in privileged mode
+ PublishAllPorts bool // Should docker publish all exposed port for the container
+ ReadonlyRootfs bool // Is the container root filesystem in read-only
+ SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux.
+ StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container.
+ Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
+ UTSMode UTSMode // UTS namespace to use for the container
+ UsernsMode UsernsMode // The user namespace to use for the container
+ ShmSize int64 // Total shm memory usage
+ Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container
+ Runtime string `json:",omitempty"` // Runtime to use with this container
+
+ // Applicable to Windows
+ ConsoleSize [2]uint // Initial console size (height,width)
+ Isolation Isolation // Isolation technology of the container (e.g. default, hyperv)
+
+ // Contains container's resources (cgroups, ulimits)
+ Resources
+
+ // Mounts specs used by the container
+ Mounts []mount.Mount `json:",omitempty"`
+
+ // Run a custom init inside the container, if null, use the daemon's configured settings
+ Init *bool `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go
new file mode 100644
index 000000000..2d664d1c9
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go
@@ -0,0 +1,41 @@
+// +build !windows
+
+package container
+
+// IsValid indicates if an isolation technology is valid
+func (i Isolation) IsValid() bool {
+ return i.IsDefault()
+}
+
+// NetworkName returns the name of the network stack.
+func (n NetworkMode) NetworkName() string {
+ if n.IsBridge() {
+ return "bridge"
+ } else if n.IsHost() {
+ return "host"
+ } else if n.IsContainer() {
+ return "container"
+ } else if n.IsNone() {
+ return "none"
+ } else if n.IsDefault() {
+ return "default"
+ } else if n.IsUserDefined() {
+ return n.UserDefined()
+ }
+ return ""
+}
+
+// IsBridge indicates whether container uses the bridge network stack
+func (n NetworkMode) IsBridge() bool {
+ return n == "bridge"
+}
+
+// IsHost indicates whether container uses the host network stack.
+func (n NetworkMode) IsHost() bool {
+ return n == "host"
+}
+
+// IsUserDefined indicates user-created network
+func (n NetworkMode) IsUserDefined() bool {
+ return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go
new file mode 100644
index 000000000..469923f7e
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go
@@ -0,0 +1,54 @@
+package container
+
+import (
+ "strings"
+)
+
+// IsBridge indicates whether container uses the bridge network stack
+// in windows it is given the name NAT
+func (n NetworkMode) IsBridge() bool {
+ return n == "nat"
+}
+
+// IsHost indicates whether container uses the host network stack.
+// returns false as this is not supported by windows
+func (n NetworkMode) IsHost() bool {
+ return false
+}
+
+// IsUserDefined indicates user-created network
+func (n NetworkMode) IsUserDefined() bool {
+ return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer()
+}
+
+// IsHyperV indicates the use of a Hyper-V partition for isolation
+func (i Isolation) IsHyperV() bool {
+ return strings.ToLower(string(i)) == "hyperv"
+}
+
+// IsProcess indicates the use of process isolation
+func (i Isolation) IsProcess() bool {
+ return strings.ToLower(string(i)) == "process"
+}
+
+// IsValid indicates if an isolation technology is valid
+func (i Isolation) IsValid() bool {
+ return i.IsDefault() || i.IsHyperV() || i.IsProcess()
+}
+
+// NetworkName returns the name of the network stack.
+func (n NetworkMode) NetworkName() string {
+ if n.IsDefault() {
+ return "default"
+ } else if n.IsBridge() {
+ return "nat"
+ } else if n.IsNone() {
+ return "none"
+ } else if n.IsContainer() {
+ return "container"
+ } else if n.IsUserDefined() {
+ return n.UserDefined()
+ }
+
+ return ""
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go
new file mode 100644
index 000000000..64820fe35
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/waitcondition.go
@@ -0,0 +1,22 @@
+package container
+
+// WaitCondition is a type used to specify a container state for which
+// to wait.
+type WaitCondition string
+
+// Possible WaitCondition Values.
+//
+// WaitConditionNotRunning (default) is used to wait for any of the non-running
+// states: "created", "exited", "dead", "removing", or "removed".
+//
+// WaitConditionNextExit is used to wait for the next time the state changes
+// to a non-running state. If the state is currently "created" or "exited",
+// this would cause Wait() to block until either the container runs and exits
+// or is removed.
+//
+// WaitConditionRemoved is used to wait for the container to be removed.
+const (
+ WaitConditionNotRunning WaitCondition = "not-running"
+ WaitConditionNextExit WaitCondition = "next-exit"
+ WaitConditionRemoved WaitCondition = "removed"
+)
diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go
new file mode 100644
index 000000000..dc942d9d9
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/error_response.go
@@ -0,0 +1,13 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// ErrorResponse Represents an error.
+// swagger:model ErrorResponse
+type ErrorResponse struct {
+
+ // The error message.
+ // Required: true
+ Message string `json:"message"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go
new file mode 100644
index 000000000..e292565b6
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/events/events.go
@@ -0,0 +1,52 @@
+package events
+
+const (
+ // ContainerEventType is the event type that containers generate
+ ContainerEventType = "container"
+ // DaemonEventType is the event type that daemon generate
+ DaemonEventType = "daemon"
+ // ImageEventType is the event type that images generate
+ ImageEventType = "image"
+ // NetworkEventType is the event type that networks generate
+ NetworkEventType = "network"
+ // PluginEventType is the event type that plugins generate
+ PluginEventType = "plugin"
+ // VolumeEventType is the event type that volumes generate
+ VolumeEventType = "volume"
+ // ServiceEventType is the event type that services generate
+ ServiceEventType = "service"
+ // NodeEventType is the event type that nodes generate
+ NodeEventType = "node"
+ // SecretEventType is the event type that secrets generate
+ SecretEventType = "secret"
+ // ConfigEventType is the event type that configs generate
+ ConfigEventType = "config"
+)
+
+// Actor describes something that generates events,
+// like a container, or a network, or a volume.
+// It has a defined name and a set or attributes.
+// The container attributes are its labels, other actors
+// can generate these attributes from other properties.
+type Actor struct {
+ ID string
+ Attributes map[string]string
+}
+
+// Message represents the information an event contains
+type Message struct {
+ // Deprecated information from JSONMessage.
+ // With data only in container events.
+ Status string `json:"status,omitempty"`
+ ID string `json:"id,omitempty"`
+ From string `json:"from,omitempty"`
+
+ Type string
+ Action string
+ Actor Actor
+ // Engine events are local scope. Cluster events are swarm scope.
+ Scope string `json:"scope,omitempty"`
+
+ Time int64 `json:"time,omitempty"`
+ TimeNano int64 `json:"timeNano,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go
new file mode 100644
index 000000000..beec3d494
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/filters/parse.go
@@ -0,0 +1,310 @@
+// Package filters provides helper function to parse and handle command line
+// filter, used for example in docker ps or docker images commands.
+package filters
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/docker/docker/api/types/versions"
+)
+
+// Args stores filter arguments as map key:{map key: bool}.
+// It contains an aggregation of the map of arguments (which are in the form
+// of -f 'key=value') based on the key, and stores values for the same key
+// in a map with string keys and boolean values.
+// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu'
+// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}}
+type Args struct {
+ fields map[string]map[string]bool
+}
+
+// NewArgs initializes a new Args struct.
+func NewArgs() Args {
+ return Args{fields: map[string]map[string]bool{}}
+}
+
+// ParseFlag parses the argument to the filter flag. Like
+//
+// `docker ps -f 'created=today' -f 'image.name=ubuntu*'`
+//
+// If prev map is provided, then it is appended to, and returned. By default a new
+// map is created.
+func ParseFlag(arg string, prev Args) (Args, error) {
+ filters := prev
+ if len(arg) == 0 {
+ return filters, nil
+ }
+
+ if !strings.Contains(arg, "=") {
+ return filters, ErrBadFormat
+ }
+
+ f := strings.SplitN(arg, "=", 2)
+
+ name := strings.ToLower(strings.TrimSpace(f[0]))
+ value := strings.TrimSpace(f[1])
+
+ filters.Add(name, value)
+
+ return filters, nil
+}
+
+// ErrBadFormat is an error returned in case of bad format for a filter.
+var ErrBadFormat = errors.New("bad format of filter (expected name=value)")
+
+// ToParam packs the Args into a string for easy transport from client to server.
+func ToParam(a Args) (string, error) {
+ // this way we don't URL encode {}, just empty space
+ if a.Len() == 0 {
+ return "", nil
+ }
+
+ buf, err := json.Marshal(a.fields)
+ if err != nil {
+ return "", err
+ }
+ return string(buf), nil
+}
+
+// ToParamWithVersion packs the Args into a string for easy transport from client to server.
+// The generated string will depend on the specified version (corresponding to the API version).
+func ToParamWithVersion(version string, a Args) (string, error) {
+ // this way we don't URL encode {}, just empty space
+ if a.Len() == 0 {
+ return "", nil
+ }
+
+ // for daemons older than v1.10, filter must be of the form map[string][]string
+ var buf []byte
+ var err error
+ if version != "" && versions.LessThan(version, "1.22") {
+ buf, err = json.Marshal(convertArgsToSlice(a.fields))
+ } else {
+ buf, err = json.Marshal(a.fields)
+ }
+ if err != nil {
+ return "", err
+ }
+ return string(buf), nil
+}
+
+// FromParam unpacks the filter Args.
+func FromParam(p string) (Args, error) {
+ if len(p) == 0 {
+ return NewArgs(), nil
+ }
+
+ r := strings.NewReader(p)
+ d := json.NewDecoder(r)
+
+ m := map[string]map[string]bool{}
+ if err := d.Decode(&m); err != nil {
+ r.Seek(0, 0)
+
+ // Allow parsing old arguments in slice format.
+ // Because other libraries might be sending them in this format.
+ deprecated := map[string][]string{}
+ if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil {
+ m = deprecatedArgs(deprecated)
+ } else {
+ return NewArgs(), err
+ }
+ }
+ return Args{m}, nil
+}
+
+// Get returns the list of values associates with a field.
+// It returns a slice of strings to keep backwards compatibility with old code.
+func (filters Args) Get(field string) []string {
+ values := filters.fields[field]
+ if values == nil {
+ return make([]string, 0)
+ }
+ slice := make([]string, 0, len(values))
+ for key := range values {
+ slice = append(slice, key)
+ }
+ return slice
+}
+
+// Add adds a new value to a filter field.
+func (filters Args) Add(name, value string) {
+ if _, ok := filters.fields[name]; ok {
+ filters.fields[name][value] = true
+ } else {
+ filters.fields[name] = map[string]bool{value: true}
+ }
+}
+
+// Del removes a value from a filter field.
+func (filters Args) Del(name, value string) {
+ if _, ok := filters.fields[name]; ok {
+ delete(filters.fields[name], value)
+ if len(filters.fields[name]) == 0 {
+ delete(filters.fields, name)
+ }
+ }
+}
+
+// Len returns the number of fields in the arguments.
+func (filters Args) Len() int {
+ return len(filters.fields)
+}
+
+// MatchKVList returns true if the values for the specified field matches the ones
+// from the sources.
+// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
+// field is 'label' and sources are {'label1': '1', 'label2': '2'}
+// it returns true.
+func (filters Args) MatchKVList(field string, sources map[string]string) bool {
+ fieldValues := filters.fields[field]
+
+ //do not filter if there is no filter set or cannot determine filter
+ if len(fieldValues) == 0 {
+ return true
+ }
+
+ if len(sources) == 0 {
+ return false
+ }
+
+ for name2match := range fieldValues {
+ testKV := strings.SplitN(name2match, "=", 2)
+
+ v, ok := sources[testKV[0]]
+ if !ok {
+ return false
+ }
+ if len(testKV) == 2 && testKV[1] != v {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Match returns true if the values for the specified field matches the source string
+// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
+// field is 'image.name' and source is 'ubuntu'
+// it returns true.
+func (filters Args) Match(field, source string) bool {
+ if filters.ExactMatch(field, source) {
+ return true
+ }
+
+ fieldValues := filters.fields[field]
+ for name2match := range fieldValues {
+ match, err := regexp.MatchString(name2match, source)
+ if err != nil {
+ continue
+ }
+ if match {
+ return true
+ }
+ }
+ return false
+}
+
+// ExactMatch returns true if the source matches exactly one of the filters.
+func (filters Args) ExactMatch(field, source string) bool {
+ fieldValues, ok := filters.fields[field]
+ //do not filter if there is no filter set or cannot determine filter
+ if !ok || len(fieldValues) == 0 {
+ return true
+ }
+
+ // try to match full name value to avoid O(N) regular expression matching
+ return fieldValues[source]
+}
+
+// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one.
+func (filters Args) UniqueExactMatch(field, source string) bool {
+ fieldValues := filters.fields[field]
+ //do not filter if there is no filter set or cannot determine filter
+ if len(fieldValues) == 0 {
+ return true
+ }
+ if len(filters.fields[field]) != 1 {
+ return false
+ }
+
+ // try to match full name value to avoid O(N) regular expression matching
+ return fieldValues[source]
+}
+
+// FuzzyMatch returns true if the source matches exactly one of the filters,
+// or the source has one of the filters as a prefix.
+func (filters Args) FuzzyMatch(field, source string) bool {
+ if filters.ExactMatch(field, source) {
+ return true
+ }
+
+ fieldValues := filters.fields[field]
+ for prefix := range fieldValues {
+ if strings.HasPrefix(source, prefix) {
+ return true
+ }
+ }
+ return false
+}
+
+// Include returns true if the name of the field to filter is in the filters.
+func (filters Args) Include(field string) bool {
+ _, ok := filters.fields[field]
+ return ok
+}
+
+// Validate ensures that all the fields in the filter are valid.
+// It returns an error as soon as it finds an invalid field.
+func (filters Args) Validate(accepted map[string]bool) error {
+ for name := range filters.fields {
+ if !accepted[name] {
+ return fmt.Errorf("Invalid filter '%s'", name)
+ }
+ }
+ return nil
+}
+
+// WalkValues iterates over the list of filtered values for a field.
+// It stops the iteration if it finds an error and it returns that error.
+func (filters Args) WalkValues(field string, op func(value string) error) error {
+ if _, ok := filters.fields[field]; !ok {
+ return nil
+ }
+ for v := range filters.fields[field] {
+ if err := op(v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func deprecatedArgs(d map[string][]string) map[string]map[string]bool {
+ m := map[string]map[string]bool{}
+ for k, v := range d {
+ values := map[string]bool{}
+ for _, vv := range v {
+ values[vv] = true
+ }
+ m[k] = values
+ }
+ return m
+}
+
+func convertArgsToSlice(f map[string]map[string]bool) map[string][]string {
+ m := map[string][]string{}
+ for k, v := range f {
+ values := []string{}
+ for kk := range v {
+ if v[kk] {
+ values = append(values, kk)
+ }
+ }
+ m[k] = values
+ }
+ return m
+}
diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go
new file mode 100644
index 000000000..4d9bf1c62
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/graph_driver_data.go
@@ -0,0 +1,17 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// GraphDriverData Information about a container's graph driver.
+// swagger:model GraphDriverData
+type GraphDriverData struct {
+
+ // data
+ // Required: true
+ Data map[string]string `json:"Data"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go
new file mode 100644
index 000000000..7592d2f8b
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/id_response.go
@@ -0,0 +1,13 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// IDResponse Response to an API call that returns just an Id
+// swagger:model IdResponse
+type IDResponse struct {
+
+ // The id of the newly created object.
+ // Required: true
+ ID string `json:"Id"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go
new file mode 100644
index 000000000..0dd30c729
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/image/image_history.go
@@ -0,0 +1,37 @@
+package image
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// HistoryResponseItem history response item
+// swagger:model HistoryResponseItem
+type HistoryResponseItem struct {
+
+ // comment
+ // Required: true
+ Comment string `json:"Comment"`
+
+ // created
+ // Required: true
+ Created int64 `json:"Created"`
+
+ // created by
+ // Required: true
+ CreatedBy string `json:"CreatedBy"`
+
+ // Id
+ // Required: true
+ ID string `json:"Id"`
+
+ // size
+ // Required: true
+ Size int64 `json:"Size"`
+
+ // tags
+ // Required: true
+ Tags []string `json:"Tags"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go
new file mode 100644
index 000000000..b9a65a0d8
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go
@@ -0,0 +1,15 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// ImageDeleteResponseItem image delete response item
+// swagger:model ImageDeleteResponseItem
+type ImageDeleteResponseItem struct {
+
+ // The image ID of an image that was deleted
+ Deleted string `json:"Deleted,omitempty"`
+
+ // The image ID of an image that was untagged
+ Untagged string `json:"Untagged,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go
new file mode 100644
index 000000000..e145b3dcf
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/image_summary.go
@@ -0,0 +1,49 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// ImageSummary image summary
+// swagger:model ImageSummary
+type ImageSummary struct {
+
+ // containers
+ // Required: true
+ Containers int64 `json:"Containers"`
+
+ // created
+ // Required: true
+ Created int64 `json:"Created"`
+
+ // Id
+ // Required: true
+ ID string `json:"Id"`
+
+ // labels
+ // Required: true
+ Labels map[string]string `json:"Labels"`
+
+ // parent Id
+ // Required: true
+ ParentID string `json:"ParentId"`
+
+ // repo digests
+ // Required: true
+ RepoDigests []string `json:"RepoDigests"`
+
+ // repo tags
+ // Required: true
+ RepoTags []string `json:"RepoTags"`
+
+ // shared size
+ // Required: true
+ SharedSize int64 `json:"SharedSize"`
+
+ // size
+ // Required: true
+ Size int64 `json:"Size"`
+
+ // virtual size
+ // Required: true
+ VirtualSize int64 `json:"VirtualSize"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go
new file mode 100644
index 000000000..2744f85d6
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/mount/mount.go
@@ -0,0 +1,128 @@
+package mount
+
+import (
+ "os"
+)
+
+// Type represents the type of a mount.
+type Type string
+
+// Type constants
+const (
+ // TypeBind is the type for mounting host dir
+ TypeBind Type = "bind"
+ // TypeVolume is the type for remote storage volumes
+ TypeVolume Type = "volume"
+ // TypeTmpfs is the type for mounting tmpfs
+ TypeTmpfs Type = "tmpfs"
+)
+
+// Mount represents a mount (volume).
+type Mount struct {
+ Type Type `json:",omitempty"`
+ // Source specifies the name of the mount. Depending on mount type, this
+ // may be a volume name or a host path, or even ignored.
+ // Source is not supported for tmpfs (must be an empty value)
+ Source string `json:",omitempty"`
+ Target string `json:",omitempty"`
+ ReadOnly bool `json:",omitempty"`
+ Consistency Consistency `json:",omitempty"`
+
+ BindOptions *BindOptions `json:",omitempty"`
+ VolumeOptions *VolumeOptions `json:",omitempty"`
+ TmpfsOptions *TmpfsOptions `json:",omitempty"`
+}
+
+// Propagation represents the propagation of a mount.
+type Propagation string
+
+const (
+ // PropagationRPrivate RPRIVATE
+ PropagationRPrivate Propagation = "rprivate"
+ // PropagationPrivate PRIVATE
+ PropagationPrivate Propagation = "private"
+ // PropagationRShared RSHARED
+ PropagationRShared Propagation = "rshared"
+ // PropagationShared SHARED
+ PropagationShared Propagation = "shared"
+ // PropagationRSlave RSLAVE
+ PropagationRSlave Propagation = "rslave"
+ // PropagationSlave SLAVE
+ PropagationSlave Propagation = "slave"
+)
+
+// Propagations is the list of all valid mount propagations
+var Propagations = []Propagation{
+ PropagationRPrivate,
+ PropagationPrivate,
+ PropagationRShared,
+ PropagationShared,
+ PropagationRSlave,
+ PropagationSlave,
+}
+
+// Consistency represents the consistency requirements of a mount.
+type Consistency string
+
+const (
+ // ConsistencyFull guarantees bind-mount-like consistency
+ ConsistencyFull Consistency = "consistent"
+ // ConsistencyCached mounts can cache read data and FS structure
+ ConsistencyCached Consistency = "cached"
+ // ConsistencyDelegated mounts can cache read and written data and structure
+ ConsistencyDelegated Consistency = "delegated"
+ // ConsistencyDefault provides "consistent" behavior unless overridden
+ ConsistencyDefault Consistency = "default"
+)
+
+// BindOptions defines options specific to mounts of type "bind".
+type BindOptions struct {
+ Propagation Propagation `json:",omitempty"`
+}
+
+// VolumeOptions represents the options for a mount of type volume.
+type VolumeOptions struct {
+ NoCopy bool `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ DriverConfig *Driver `json:",omitempty"`
+}
+
+// Driver represents a volume driver.
+type Driver struct {
+ Name string `json:",omitempty"`
+ Options map[string]string `json:",omitempty"`
+}
+
+// TmpfsOptions defines options specific to mounts of type "tmpfs".
+type TmpfsOptions struct {
+ // Size sets the size of the tmpfs, in bytes.
+ //
+ // This will be converted to an operating system specific value
+ // depending on the host. For example, on linux, it will be converted to
+ // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with
+ // docker, uses a straight byte value.
+ //
+ // Percentages are not supported.
+ SizeBytes int64 `json:",omitempty"`
+ // Mode of the tmpfs upon creation
+ Mode os.FileMode `json:",omitempty"`
+
+ // TODO(stevvooe): There are several more tmpfs flags, specified in the
+ // daemon, that are accepted. Only the most basic are added for now.
+ //
+ // From docker/docker/pkg/mount/flags.go:
+ //
+ // var validFlags = map[string]bool{
+ // "": true,
+ // "size": true, X
+ // "mode": true, X
+ // "uid": true,
+ // "gid": true,
+ // "nr_inodes": true,
+ // "nr_blocks": true,
+ // "mpol": true,
+ // }
+ //
+ // Some of these may be straightforward to add, but others, such as
+ // uid/gid have implications in a clustered system.
+}
diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go
new file mode 100644
index 000000000..7c7dbacc8
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/network/network.go
@@ -0,0 +1,108 @@
+package network
+
+// Address represents an IP address
+type Address struct {
+ Addr string
+ PrefixLen int
+}
+
+// IPAM represents IP Address Management
+type IPAM struct {
+ Driver string
+ Options map[string]string //Per network IPAM driver options
+ Config []IPAMConfig
+}
+
+// IPAMConfig represents IPAM configurations
+type IPAMConfig struct {
+ Subnet string `json:",omitempty"`
+ IPRange string `json:",omitempty"`
+ Gateway string `json:",omitempty"`
+ AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"`
+}
+
+// EndpointIPAMConfig represents IPAM configurations for the endpoint
+type EndpointIPAMConfig struct {
+ IPv4Address string `json:",omitempty"`
+ IPv6Address string `json:",omitempty"`
+ LinkLocalIPs []string `json:",omitempty"`
+}
+
+// Copy makes a copy of the endpoint ipam config
+func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig {
+ cfgCopy := *cfg
+ cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs))
+ cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...)
+ return &cfgCopy
+}
+
+// PeerInfo represents one peer of an overlay network
+type PeerInfo struct {
+ Name string
+ IP string
+}
+
+// EndpointSettings stores the network endpoint details
+type EndpointSettings struct {
+ // Configurations
+ IPAMConfig *EndpointIPAMConfig
+ Links []string
+ Aliases []string
+ // Operational data
+ NetworkID string
+ EndpointID string
+ Gateway string
+ IPAddress string
+ IPPrefixLen int
+ IPv6Gateway string
+ GlobalIPv6Address string
+ GlobalIPv6PrefixLen int
+ MacAddress string
+ DriverOpts map[string]string
+}
+
+// Task carries the information about one backend task
+type Task struct {
+ Name string
+ EndpointID string
+ EndpointIP string
+ Info map[string]string
+}
+
+// ServiceInfo represents service parameters with the list of service's tasks
+type ServiceInfo struct {
+ VIP string
+ Ports []string
+ LocalLBIndex int
+ Tasks []Task
+}
+
+// Copy makes a deep copy of `EndpointSettings`
+func (es *EndpointSettings) Copy() *EndpointSettings {
+ epCopy := *es
+ if es.IPAMConfig != nil {
+ epCopy.IPAMConfig = es.IPAMConfig.Copy()
+ }
+
+ if es.Links != nil {
+ links := make([]string, 0, len(es.Links))
+ epCopy.Links = append(links, es.Links...)
+ }
+
+ if es.Aliases != nil {
+ aliases := make([]string, 0, len(es.Aliases))
+ epCopy.Aliases = append(aliases, es.Aliases...)
+ }
+ return &epCopy
+}
+
+// NetworkingConfig represents the container's networking configuration for each of its interfaces
+// Carries the networking configs specified in the `docker run` and `docker network connect` commands
+type NetworkingConfig struct {
+ EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network
+}
+
+// ConfigReference specifies the source which provides a network's configuration
+type ConfigReference struct {
+ Network string
+}
diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go
new file mode 100644
index 000000000..cab333e01
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/plugin.go
@@ -0,0 +1,200 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// Plugin A plugin for the Engine API
+// swagger:model Plugin
+type Plugin struct {
+
+ // config
+ // Required: true
+ Config PluginConfig `json:"Config"`
+
+ // True if the plugin is running. False if the plugin is not running, only installed.
+ // Required: true
+ Enabled bool `json:"Enabled"`
+
+ // Id
+ ID string `json:"Id,omitempty"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // plugin remote reference used to push/pull the plugin
+ PluginReference string `json:"PluginReference,omitempty"`
+
+ // settings
+ // Required: true
+ Settings PluginSettings `json:"Settings"`
+}
+
+// PluginConfig The config of a plugin.
+// swagger:model PluginConfig
+type PluginConfig struct {
+
+ // args
+ // Required: true
+ Args PluginConfigArgs `json:"Args"`
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // Docker Version used to create the plugin
+ DockerVersion string `json:"DockerVersion,omitempty"`
+
+ // documentation
+ // Required: true
+ Documentation string `json:"Documentation"`
+
+ // entrypoint
+ // Required: true
+ Entrypoint []string `json:"Entrypoint"`
+
+ // env
+ // Required: true
+ Env []PluginEnv `json:"Env"`
+
+ // interface
+ // Required: true
+ Interface PluginConfigInterface `json:"Interface"`
+
+ // ipc host
+ // Required: true
+ IpcHost bool `json:"IpcHost"`
+
+ // linux
+ // Required: true
+ Linux PluginConfigLinux `json:"Linux"`
+
+ // mounts
+ // Required: true
+ Mounts []PluginMount `json:"Mounts"`
+
+ // network
+ // Required: true
+ Network PluginConfigNetwork `json:"Network"`
+
+ // pid host
+ // Required: true
+ PidHost bool `json:"PidHost"`
+
+ // propagated mount
+ // Required: true
+ PropagatedMount string `json:"PropagatedMount"`
+
+ // user
+ User PluginConfigUser `json:"User,omitempty"`
+
+ // work dir
+ // Required: true
+ WorkDir string `json:"WorkDir"`
+
+ // rootfs
+ Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"`
+}
+
+// PluginConfigArgs plugin config args
+// swagger:model PluginConfigArgs
+type PluginConfigArgs struct {
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // settable
+ // Required: true
+ Settable []string `json:"Settable"`
+
+ // value
+ // Required: true
+ Value []string `json:"Value"`
+}
+
+// PluginConfigInterface The interface between Docker and the plugin
+// swagger:model PluginConfigInterface
+type PluginConfigInterface struct {
+
+ // socket
+ // Required: true
+ Socket string `json:"Socket"`
+
+ // types
+ // Required: true
+ Types []PluginInterfaceType `json:"Types"`
+}
+
+// PluginConfigLinux plugin config linux
+// swagger:model PluginConfigLinux
+type PluginConfigLinux struct {
+
+ // allow all devices
+ // Required: true
+ AllowAllDevices bool `json:"AllowAllDevices"`
+
+ // capabilities
+ // Required: true
+ Capabilities []string `json:"Capabilities"`
+
+ // devices
+ // Required: true
+ Devices []PluginDevice `json:"Devices"`
+}
+
+// PluginConfigNetwork plugin config network
+// swagger:model PluginConfigNetwork
+type PluginConfigNetwork struct {
+
+ // type
+ // Required: true
+ Type string `json:"Type"`
+}
+
+// PluginConfigRootfs plugin config rootfs
+// swagger:model PluginConfigRootfs
+type PluginConfigRootfs struct {
+
+ // diff ids
+ DiffIds []string `json:"diff_ids"`
+
+ // type
+ Type string `json:"type,omitempty"`
+}
+
+// PluginConfigUser plugin config user
+// swagger:model PluginConfigUser
+type PluginConfigUser struct {
+
+ // g ID
+ GID uint32 `json:"GID,omitempty"`
+
+ // UID
+ UID uint32 `json:"UID,omitempty"`
+}
+
+// PluginSettings Settings that can be modified by users.
+// swagger:model PluginSettings
+type PluginSettings struct {
+
+ // args
+ // Required: true
+ Args []string `json:"Args"`
+
+ // devices
+ // Required: true
+ Devices []PluginDevice `json:"Devices"`
+
+ // env
+ // Required: true
+ Env []string `json:"Env"`
+
+ // mounts
+ // Required: true
+ Mounts []PluginMount `json:"Mounts"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go
new file mode 100644
index 000000000..569901067
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/plugin_device.go
@@ -0,0 +1,25 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// PluginDevice plugin device
+// swagger:model PluginDevice
+type PluginDevice struct {
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // path
+ // Required: true
+ Path *string `json:"Path"`
+
+ // settable
+ // Required: true
+ Settable []string `json:"Settable"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go
new file mode 100644
index 000000000..32962dc2e
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/plugin_env.go
@@ -0,0 +1,25 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// PluginEnv plugin env
+// swagger:model PluginEnv
+type PluginEnv struct {
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // settable
+ // Required: true
+ Settable []string `json:"Settable"`
+
+ // value
+ // Required: true
+ Value *string `json:"Value"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go
new file mode 100644
index 000000000..c82f204e8
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go
@@ -0,0 +1,21 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// PluginInterfaceType plugin interface type
+// swagger:model PluginInterfaceType
+type PluginInterfaceType struct {
+
+ // capability
+ // Required: true
+ Capability string `json:"Capability"`
+
+ // prefix
+ // Required: true
+ Prefix string `json:"Prefix"`
+
+ // version
+ // Required: true
+ Version string `json:"Version"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go
new file mode 100644
index 000000000..5c031cf8b
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/plugin_mount.go
@@ -0,0 +1,37 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// PluginMount plugin mount
+// swagger:model PluginMount
+type PluginMount struct {
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // destination
+ // Required: true
+ Destination string `json:"Destination"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // options
+ // Required: true
+ Options []string `json:"Options"`
+
+ // settable
+ // Required: true
+ Settable []string `json:"Settable"`
+
+ // source
+ // Required: true
+ Source *string `json:"Source"`
+
+ // type
+ // Required: true
+ Type string `json:"Type"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go
new file mode 100644
index 000000000..18f743fcd
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/plugin_responses.go
@@ -0,0 +1,71 @@
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+)
+
+// PluginsListResponse contains the response for the Engine API
+type PluginsListResponse []*Plugin
+
+// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType
+func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error {
+ versionIndex := len(p)
+ prefixIndex := 0
+ if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' {
+ return fmt.Errorf("%q is not a plugin interface type", p)
+ }
+ p = p[1 : len(p)-1]
+loop:
+ for i, b := range p {
+ switch b {
+ case '.':
+ prefixIndex = i
+ case '/':
+ versionIndex = i
+ break loop
+ }
+ }
+ t.Prefix = string(p[:prefixIndex])
+ t.Capability = string(p[prefixIndex+1 : versionIndex])
+ if versionIndex < len(p) {
+ t.Version = string(p[versionIndex+1:])
+ }
+ return nil
+}
+
+// MarshalJSON implements json.Marshaler for PluginInterfaceType
+func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(t.String())
+}
+
+// String implements fmt.Stringer for PluginInterfaceType
+func (t PluginInterfaceType) String() string {
+ return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version)
+}
+
+// PluginPrivilege describes a permission the user has to accept
+// upon installing a plugin.
+type PluginPrivilege struct {
+ Name string
+ Description string
+ Value []string
+}
+
+// PluginPrivileges is a list of PluginPrivilege
+type PluginPrivileges []PluginPrivilege
+
+func (s PluginPrivileges) Len() int {
+ return len(s)
+}
+
+func (s PluginPrivileges) Less(i, j int) bool {
+ return s[i].Name < s[j].Name
+}
+
+func (s PluginPrivileges) Swap(i, j int) {
+ sort.Strings(s[i].Value)
+ sort.Strings(s[j].Value)
+ s[i], s[j] = s[j], s[i]
+}
diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go
new file mode 100644
index 000000000..ad52d46d5
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/port.go
@@ -0,0 +1,23 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// Port An open port on a container
+// swagger:model Port
+type Port struct {
+
+ // IP
+ IP string `json:"IP,omitempty"`
+
+ // Port on the container
+ // Required: true
+ PrivatePort uint16 `json:"PrivatePort"`
+
+ // Port exposed on the host
+ PublicPort uint16 `json:"PublicPort,omitempty"`
+
+ // type
+ // Required: true
+ Type string `json:"Type"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go
new file mode 100644
index 000000000..42cac4430
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/registry/authenticate.go
@@ -0,0 +1,21 @@
+package registry
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// AuthenticateOKBody authenticate o k body
+// swagger:model AuthenticateOKBody
+type AuthenticateOKBody struct {
+
+ // An opaque token used to authenticate a user after a successful login
+ // Required: true
+ IdentityToken string `json:"IdentityToken"`
+
+ // The status of the authentication
+ // Required: true
+ Status string `json:"Status"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go
new file mode 100644
index 000000000..b98a943a1
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/registry/registry.go
@@ -0,0 +1,119 @@
+package registry
+
+import (
+ "encoding/json"
+ "net"
+
+ "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// ServiceConfig stores daemon registry services configuration.
+type ServiceConfig struct {
+ AllowNondistributableArtifactsCIDRs []*NetIPNet
+ AllowNondistributableArtifactsHostnames []string
+ InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
+ IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
+ Mirrors []string
+}
+
+// NetIPNet is the net.IPNet type, which can be marshalled and
+// unmarshalled to JSON
+type NetIPNet net.IPNet
+
+// String returns the CIDR notation of ipnet
+func (ipnet *NetIPNet) String() string {
+ return (*net.IPNet)(ipnet).String()
+}
+
+// MarshalJSON returns the JSON representation of the IPNet
+func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) {
+ return json.Marshal((*net.IPNet)(ipnet).String())
+}
+
+// UnmarshalJSON sets the IPNet from a byte array of JSON
+func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) {
+ var ipnetStr string
+ if err = json.Unmarshal(b, &ipnetStr); err == nil {
+ var cidr *net.IPNet
+ if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil {
+ *ipnet = NetIPNet(*cidr)
+ }
+ }
+ return
+}
+
+// IndexInfo contains information about a registry
+//
+// RepositoryInfo Examples:
+// {
+// "Index" : {
+// "Name" : "docker.io",
+// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"],
+// "Secure" : true,
+// "Official" : true,
+// },
+// "RemoteName" : "library/debian",
+// "LocalName" : "debian",
+// "CanonicalName" : "docker.io/debian"
+// "Official" : true,
+// }
+//
+// {
+// "Index" : {
+// "Name" : "127.0.0.1:5000",
+// "Mirrors" : [],
+// "Secure" : false,
+// "Official" : false,
+// },
+// "RemoteName" : "user/repo",
+// "LocalName" : "127.0.0.1:5000/user/repo",
+// "CanonicalName" : "127.0.0.1:5000/user/repo",
+// "Official" : false,
+// }
+type IndexInfo struct {
+ // Name is the name of the registry, such as "docker.io"
+ Name string
+ // Mirrors is a list of mirrors, expressed as URIs
+ Mirrors []string
+ // Secure is set to false if the registry is part of the list of
+ // insecure registries. Insecure registries accept HTTP and/or accept
+ // HTTPS with certificates from unknown CAs.
+ Secure bool
+ // Official indicates whether this is an official registry
+ Official bool
+}
+
+// SearchResult describes a search result returned from a registry
+type SearchResult struct {
+ // StarCount indicates the number of stars this repository has
+ StarCount int `json:"star_count"`
+ // IsOfficial is true if the result is from an official repository.
+ IsOfficial bool `json:"is_official"`
+ // Name is the name of the repository
+ Name string `json:"name"`
+ // IsAutomated indicates whether the result is automated
+ IsAutomated bool `json:"is_automated"`
+ // Description is a textual description of the repository
+ Description string `json:"description"`
+}
+
+// SearchResults lists a collection search results returned from a registry
+type SearchResults struct {
+ // Query contains the query string that generated the search results
+ Query string `json:"query"`
+ // NumResults indicates the number of results the query returned
+ NumResults int `json:"num_results"`
+ // Results is a slice containing the actual results for the search
+ Results []SearchResult `json:"results"`
+}
+
+// DistributionInspect describes the result obtained from contacting the
+// registry to retrieve image metadata
+type DistributionInspect struct {
+ // Descriptor contains information about the manifest, including
+ // the content addressable digest
+ Descriptor v1.Descriptor
+ // Platforms contains the list of platforms supported by the image,
+ // obtained by parsing the manifest
+ Platforms []v1.Platform
+}
diff --git a/vendor/github.com/docker/docker/api/types/seccomp.go b/vendor/github.com/docker/docker/api/types/seccomp.go
new file mode 100644
index 000000000..7d62c9a43
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/seccomp.go
@@ -0,0 +1,93 @@
+package types
+
+// Seccomp represents the config for a seccomp profile for syscall restriction.
+type Seccomp struct {
+ DefaultAction Action `json:"defaultAction"`
+ // Architectures is kept to maintain backward compatibility with the old
+ // seccomp profile.
+ Architectures []Arch `json:"architectures,omitempty"`
+ ArchMap []Architecture `json:"archMap,omitempty"`
+ Syscalls []*Syscall `json:"syscalls"`
+}
+
+// Architecture is used to represent a specific architecture
+// and its sub-architectures
+type Architecture struct {
+ Arch Arch `json:"architecture"`
+ SubArches []Arch `json:"subArchitectures"`
+}
+
+// Arch used for architectures
+type Arch string
+
+// Additional architectures permitted to be used for system calls
+// By default only the native architecture of the kernel is permitted
+const (
+ ArchX86 Arch = "SCMP_ARCH_X86"
+ ArchX86_64 Arch = "SCMP_ARCH_X86_64"
+ ArchX32 Arch = "SCMP_ARCH_X32"
+ ArchARM Arch = "SCMP_ARCH_ARM"
+ ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
+ ArchMIPS Arch = "SCMP_ARCH_MIPS"
+ ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
+ ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
+ ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
+ ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
+ ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
+ ArchPPC Arch = "SCMP_ARCH_PPC"
+ ArchPPC64 Arch = "SCMP_ARCH_PPC64"
+ ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
+ ArchS390 Arch = "SCMP_ARCH_S390"
+ ArchS390X Arch = "SCMP_ARCH_S390X"
+)
+
+// Action taken upon Seccomp rule match
+type Action string
+
+// Define actions for Seccomp rules
+const (
+ ActKill Action = "SCMP_ACT_KILL"
+ ActTrap Action = "SCMP_ACT_TRAP"
+ ActErrno Action = "SCMP_ACT_ERRNO"
+ ActTrace Action = "SCMP_ACT_TRACE"
+ ActAllow Action = "SCMP_ACT_ALLOW"
+)
+
+// Operator used to match syscall arguments in Seccomp
+type Operator string
+
+// Define operators for syscall arguments in Seccomp
+const (
+ OpNotEqual Operator = "SCMP_CMP_NE"
+ OpLessThan Operator = "SCMP_CMP_LT"
+ OpLessEqual Operator = "SCMP_CMP_LE"
+ OpEqualTo Operator = "SCMP_CMP_EQ"
+ OpGreaterEqual Operator = "SCMP_CMP_GE"
+ OpGreaterThan Operator = "SCMP_CMP_GT"
+ OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ"
+)
+
+// Arg used for matching specific syscall arguments in Seccomp
+type Arg struct {
+ Index uint `json:"index"`
+ Value uint64 `json:"value"`
+ ValueTwo uint64 `json:"valueTwo"`
+ Op Operator `json:"op"`
+}
+
+// Filter is used to conditionally apply Seccomp rules
+type Filter struct {
+ Caps []string `json:"caps,omitempty"`
+ Arches []string `json:"arches,omitempty"`
+}
+
+// Syscall is used to match a group of syscalls in Seccomp
+type Syscall struct {
+ Name string `json:"name,omitempty"`
+ Names []string `json:"names,omitempty"`
+ Action Action `json:"action"`
+ Args []*Arg `json:"args"`
+ Comment string `json:"comment"`
+ Includes Filter `json:"includes"`
+ Excludes Filter `json:"excludes"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/service_update_response.go
new file mode 100644
index 000000000..74ea64b1b
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/service_update_response.go
@@ -0,0 +1,12 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// ServiceUpdateResponse service update response
+// swagger:model ServiceUpdateResponse
+type ServiceUpdateResponse struct {
+
+ // Optional warning messages
+ Warnings []string `json:"Warnings"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go
new file mode 100644
index 000000000..7ca76a5b6
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/stats.go
@@ -0,0 +1,181 @@
+// Package types is used for API stability in the types and response to the
+// consumers of the API stats endpoint.
+package types
+
+import "time"
+
+// ThrottlingData stores CPU throttling stats of one running container.
+// Not used on Windows.
+type ThrottlingData struct {
+ // Number of periods with throttling active
+ Periods uint64 `json:"periods"`
+ // Number of periods when the container hits its throttling limit.
+ ThrottledPeriods uint64 `json:"throttled_periods"`
+ // Aggregate time the container was throttled for in nanoseconds.
+ ThrottledTime uint64 `json:"throttled_time"`
+}
+
+// CPUUsage stores All CPU stats aggregated since container inception.
+type CPUUsage struct {
+ // Total CPU time consumed.
+ // Units: nanoseconds (Linux)
+ // Units: 100's of nanoseconds (Windows)
+ TotalUsage uint64 `json:"total_usage"`
+
+ // Total CPU time consumed per core (Linux). Not used on Windows.
+ // Units: nanoseconds.
+ PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
+
+ // Time spent by tasks of the cgroup in kernel mode (Linux).
+ // Time spent by all container processes in kernel mode (Windows).
+ // Units: nanoseconds (Linux).
+ // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers.
+ UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
+
+ // Time spent by tasks of the cgroup in user mode (Linux).
+ // Time spent by all container processes in user mode (Windows).
+ // Units: nanoseconds (Linux).
+ // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers
+ UsageInUsermode uint64 `json:"usage_in_usermode"`
+}
+
+// CPUStats aggregates and wraps all CPU related info of container
+type CPUStats struct {
+ // CPU Usage. Linux and Windows.
+ CPUUsage CPUUsage `json:"cpu_usage"`
+
+ // System Usage. Linux only.
+ SystemUsage uint64 `json:"system_cpu_usage,omitempty"`
+
+ // Online CPUs. Linux only.
+ OnlineCPUs uint32 `json:"online_cpus,omitempty"`
+
+ // Throttling Data. Linux only.
+ ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
+}
+
+// MemoryStats aggregates all memory stats since container inception on Linux.
+// Windows returns stats for commit and private working set only.
+type MemoryStats struct {
+ // Linux Memory Stats
+
+ // current res_counter usage for memory
+ Usage uint64 `json:"usage,omitempty"`
+ // maximum usage ever recorded.
+ MaxUsage uint64 `json:"max_usage,omitempty"`
+ // TODO(vishh): Export these as stronger types.
+ // all the stats exported via memory.stat.
+ Stats map[string]uint64 `json:"stats,omitempty"`
+ // number of times memory usage hits limits.
+ Failcnt uint64 `json:"failcnt,omitempty"`
+ Limit uint64 `json:"limit,omitempty"`
+
+ // Windows Memory Stats
+ // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx
+
+ // committed bytes
+ Commit uint64 `json:"commitbytes,omitempty"`
+ // peak committed bytes
+ CommitPeak uint64 `json:"commitpeakbytes,omitempty"`
+ // private working set
+ PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"`
+}
+
+// BlkioStatEntry is one small entity to store a piece of Blkio stats
+// Not used on Windows.
+type BlkioStatEntry struct {
+ Major uint64 `json:"major"`
+ Minor uint64 `json:"minor"`
+ Op string `json:"op"`
+ Value uint64 `json:"value"`
+}
+
+// BlkioStats stores All IO service stats for data read and write.
+// This is a Linux specific structure as the differences between expressing
+// block I/O on Windows and Linux are sufficiently significant to make
+// little sense attempting to morph into a combined structure.
+type BlkioStats struct {
+ // number of bytes transferred to and from the block device
+ IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"`
+ IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"`
+ IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"`
+ IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"`
+ IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"`
+ IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"`
+ IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"`
+ SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"`
+}
+
+// StorageStats is the disk I/O stats for read/write on Windows.
+type StorageStats struct {
+ ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"`
+ ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"`
+ WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"`
+ WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"`
+}
+
+// NetworkStats aggregates the network stats of one container
+type NetworkStats struct {
+ // Bytes received. Windows and Linux.
+ RxBytes uint64 `json:"rx_bytes"`
+ // Packets received. Windows and Linux.
+ RxPackets uint64 `json:"rx_packets"`
+ // Received errors. Not used on Windows. Note that we dont `omitempty` this
+ // field as it is expected in the >=v1.21 API stats structure.
+ RxErrors uint64 `json:"rx_errors"`
+ // Incoming packets dropped. Windows and Linux.
+ RxDropped uint64 `json:"rx_dropped"`
+ // Bytes sent. Windows and Linux.
+ TxBytes uint64 `json:"tx_bytes"`
+ // Packets sent. Windows and Linux.
+ TxPackets uint64 `json:"tx_packets"`
+ // Sent errors. Not used on Windows. Note that we dont `omitempty` this
+ // field as it is expected in the >=v1.21 API stats structure.
+ TxErrors uint64 `json:"tx_errors"`
+ // Outgoing packets dropped. Windows and Linux.
+ TxDropped uint64 `json:"tx_dropped"`
+ // Endpoint ID. Not used on Linux.
+ EndpointID string `json:"endpoint_id,omitempty"`
+ // Instance ID. Not used on Linux.
+ InstanceID string `json:"instance_id,omitempty"`
+}
+
+// PidsStats contains the stats of a container's pids
+type PidsStats struct {
+ // Current is the number of pids in the cgroup
+ Current uint64 `json:"current,omitempty"`
+ // Limit is the hard limit on the number of pids in the cgroup.
+ // A "Limit" of 0 means that there is no limit.
+ Limit uint64 `json:"limit,omitempty"`
+}
+
+// Stats is Ultimate struct aggregating all types of stats of one container
+type Stats struct {
+ // Common stats
+ Read time.Time `json:"read"`
+ PreRead time.Time `json:"preread"`
+
+ // Linux specific stats, not populated on Windows.
+ PidsStats PidsStats `json:"pids_stats,omitempty"`
+ BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
+
+ // Windows specific stats, not populated on Linux.
+ NumProcs uint32 `json:"num_procs"`
+ StorageStats StorageStats `json:"storage_stats,omitempty"`
+
+ // Shared stats
+ CPUStats CPUStats `json:"cpu_stats,omitempty"`
+ PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous"
+ MemoryStats MemoryStats `json:"memory_stats,omitempty"`
+}
+
+// StatsJSON is newly used Networks
+type StatsJSON struct {
+ Stats
+
+ Name string `json:"name,omitempty"`
+ ID string `json:"id,omitempty"`
+
+ // Networks request version >=1.21
+ Networks map[string]NetworkStats `json:"networks,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go
new file mode 100644
index 000000000..bad493fb8
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/strslice/strslice.go
@@ -0,0 +1,30 @@
+package strslice
+
+import "encoding/json"
+
+// StrSlice represents a string or an array of strings.
+// We need to override the json decoder to accept both options.
+type StrSlice []string
+
+// UnmarshalJSON decodes the byte slice whether it's a string or an array of
+// strings. This method is needed to implement json.Unmarshaler.
+func (e *StrSlice) UnmarshalJSON(b []byte) error {
+ if len(b) == 0 {
+ // With no input, we preserve the existing value by returning nil and
+ // leaving the target alone. This allows defining default values for
+ // the type.
+ return nil
+ }
+
+ p := make([]string, 0, 1)
+ if err := json.Unmarshal(b, &p); err != nil {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ p = append(p, s)
+ }
+
+ *e = p
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go
new file mode 100644
index 000000000..2834cf202
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/common.go
@@ -0,0 +1,40 @@
+package swarm
+
+import "time"
+
+// Version represents the internal object version.
+type Version struct {
+ Index uint64 `json:",omitempty"`
+}
+
+// Meta is a base object inherited by most of the other once.
+type Meta struct {
+ Version Version `json:",omitempty"`
+ CreatedAt time.Time `json:",omitempty"`
+ UpdatedAt time.Time `json:",omitempty"`
+}
+
+// Annotations represents how to describe an object.
+type Annotations struct {
+ Name string `json:",omitempty"`
+ Labels map[string]string `json:"Labels"`
+}
+
+// Driver represents a driver (network, logging, secrets backend).
+type Driver struct {
+ Name string `json:",omitempty"`
+ Options map[string]string `json:",omitempty"`
+}
+
+// TLSInfo represents the TLS information about what CA certificate is trusted,
+// and who the issuer for a TLS certificate is
+type TLSInfo struct {
+ // TrustRoot is the trusted CA root certificate in PEM format
+ TrustRoot string `json:",omitempty"`
+
+ // CertIssuer is the raw subject bytes of the issuer
+ CertIssuerSubject []byte `json:",omitempty"`
+
+ // CertIssuerPublicKey is the raw public key bytes of the issuer
+ CertIssuerPublicKey []byte `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go
new file mode 100644
index 000000000..0fb021ce9
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/config.go
@@ -0,0 +1,31 @@
+package swarm
+
+import "os"
+
+// Config represents a config.
+type Config struct {
+ ID string
+ Meta
+ Spec ConfigSpec
+}
+
+// ConfigSpec represents a config specification from a config in swarm
+type ConfigSpec struct {
+ Annotations
+ Data []byte `json:",omitempty"`
+}
+
+// ConfigReferenceFileTarget is a file target in a config reference
+type ConfigReferenceFileTarget struct {
+ Name string
+ UID string
+ GID string
+ Mode os.FileMode
+}
+
+// ConfigReference is a reference to a config in swarm
+type ConfigReference struct {
+ File *ConfigReferenceFileTarget
+ ConfigID string
+ ConfigName string
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go
new file mode 100644
index 000000000..6f8b45f6b
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/container.go
@@ -0,0 +1,72 @@
+package swarm
+
+import (
+ "time"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/mount"
+)
+
+// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
+// Detailed documentation is available in:
+// http://man7.org/linux/man-pages/man5/resolv.conf.5.html
+// `nameserver`, `search`, `options` have been supported.
+// TODO: `domain` is not supported yet.
+type DNSConfig struct {
+ // Nameservers specifies the IP addresses of the name servers
+ Nameservers []string `json:",omitempty"`
+ // Search specifies the search list for host-name lookup
+ Search []string `json:",omitempty"`
+ // Options allows certain internal resolver variables to be modified
+ Options []string `json:",omitempty"`
+}
+
+// SELinuxContext contains the SELinux labels of the container.
+type SELinuxContext struct {
+ Disable bool
+
+ User string
+ Role string
+ Type string
+ Level string
+}
+
+// CredentialSpec for managed service account (Windows only)
+type CredentialSpec struct {
+ File string
+ Registry string
+}
+
+// Privileges defines the security options for the container.
+type Privileges struct {
+ CredentialSpec *CredentialSpec
+ SELinuxContext *SELinuxContext
+}
+
+// ContainerSpec represents the spec of a container.
+type ContainerSpec struct {
+ Image string `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ Command []string `json:",omitempty"`
+ Args []string `json:",omitempty"`
+ Hostname string `json:",omitempty"`
+ Env []string `json:",omitempty"`
+ Dir string `json:",omitempty"`
+ User string `json:",omitempty"`
+ Groups []string `json:",omitempty"`
+ Privileges *Privileges `json:",omitempty"`
+ StopSignal string `json:",omitempty"`
+ TTY bool `json:",omitempty"`
+ OpenStdin bool `json:",omitempty"`
+ ReadOnly bool `json:",omitempty"`
+ Mounts []mount.Mount `json:",omitempty"`
+ StopGracePeriod *time.Duration `json:",omitempty"`
+ Healthcheck *container.HealthConfig `json:",omitempty"`
+ // The format of extra hosts on swarmkit is specified in:
+ // http://man7.org/linux/man-pages/man5/hosts.5.html
+ // IP_address canonical_hostname [aliases...]
+ Hosts []string `json:",omitempty"`
+ DNSConfig *DNSConfig `json:",omitempty"`
+ Secrets []*SecretReference `json:",omitempty"`
+ Configs []*ConfigReference `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go
new file mode 100644
index 000000000..97c484e14
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/network.go
@@ -0,0 +1,119 @@
+package swarm
+
+import (
+ "github.com/docker/docker/api/types/network"
+)
+
+// Endpoint represents an endpoint.
+type Endpoint struct {
+ Spec EndpointSpec `json:",omitempty"`
+ Ports []PortConfig `json:",omitempty"`
+ VirtualIPs []EndpointVirtualIP `json:",omitempty"`
+}
+
+// EndpointSpec represents the spec of an endpoint.
+type EndpointSpec struct {
+ Mode ResolutionMode `json:",omitempty"`
+ Ports []PortConfig `json:",omitempty"`
+}
+
+// ResolutionMode represents a resolution mode.
+type ResolutionMode string
+
+const (
+ // ResolutionModeVIP VIP
+ ResolutionModeVIP ResolutionMode = "vip"
+ // ResolutionModeDNSRR DNSRR
+ ResolutionModeDNSRR ResolutionMode = "dnsrr"
+)
+
+// PortConfig represents the config of a port.
+type PortConfig struct {
+ Name string `json:",omitempty"`
+ Protocol PortConfigProtocol `json:",omitempty"`
+ // TargetPort is the port inside the container
+ TargetPort uint32 `json:",omitempty"`
+ // PublishedPort is the port on the swarm hosts
+ PublishedPort uint32 `json:",omitempty"`
+ // PublishMode is the mode in which port is published
+ PublishMode PortConfigPublishMode `json:",omitempty"`
+}
+
+// PortConfigPublishMode represents the mode in which the port is to
+// be published.
+type PortConfigPublishMode string
+
+const (
+ // PortConfigPublishModeIngress is used for ports published
+ // for ingress load balancing using routing mesh.
+ PortConfigPublishModeIngress PortConfigPublishMode = "ingress"
+ // PortConfigPublishModeHost is used for ports published
+ // for direct host level access on the host where the task is running.
+ PortConfigPublishModeHost PortConfigPublishMode = "host"
+)
+
+// PortConfigProtocol represents the protocol of a port.
+type PortConfigProtocol string
+
+const (
+ // TODO(stevvooe): These should be used generally, not just for PortConfig.
+
+ // PortConfigProtocolTCP TCP
+ PortConfigProtocolTCP PortConfigProtocol = "tcp"
+ // PortConfigProtocolUDP UDP
+ PortConfigProtocolUDP PortConfigProtocol = "udp"
+)
+
+// EndpointVirtualIP represents the virtual ip of a port.
+type EndpointVirtualIP struct {
+ NetworkID string `json:",omitempty"`
+ Addr string `json:",omitempty"`
+}
+
+// Network represents a network.
+type Network struct {
+ ID string
+ Meta
+ Spec NetworkSpec `json:",omitempty"`
+ DriverState Driver `json:",omitempty"`
+ IPAMOptions *IPAMOptions `json:",omitempty"`
+}
+
+// NetworkSpec represents the spec of a network.
+type NetworkSpec struct {
+ Annotations
+ DriverConfiguration *Driver `json:",omitempty"`
+ IPv6Enabled bool `json:",omitempty"`
+ Internal bool `json:",omitempty"`
+ Attachable bool `json:",omitempty"`
+ Ingress bool `json:",omitempty"`
+ IPAMOptions *IPAMOptions `json:",omitempty"`
+ ConfigFrom *network.ConfigReference `json:",omitempty"`
+ Scope string `json:",omitempty"`
+}
+
+// NetworkAttachmentConfig represents the configuration of a network attachment.
+type NetworkAttachmentConfig struct {
+ Target string `json:",omitempty"`
+ Aliases []string `json:",omitempty"`
+ DriverOpts map[string]string `json:",omitempty"`
+}
+
+// NetworkAttachment represents a network attachment.
+type NetworkAttachment struct {
+ Network Network `json:",omitempty"`
+ Addresses []string `json:",omitempty"`
+}
+
+// IPAMOptions represents ipam options.
+type IPAMOptions struct {
+ Driver Driver `json:",omitempty"`
+ Configs []IPAMConfig `json:",omitempty"`
+}
+
+// IPAMConfig represents ipam configuration.
+type IPAMConfig struct {
+ Subnet string `json:",omitempty"`
+ Range string `json:",omitempty"`
+ Gateway string `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go
new file mode 100644
index 000000000..28c6851e9
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/node.go
@@ -0,0 +1,115 @@
+package swarm
+
+// Node represents a node.
+type Node struct {
+ ID string
+ Meta
+ // Spec defines the desired state of the node as specified by the user.
+ // The system will honor this and will *never* modify it.
+ Spec NodeSpec `json:",omitempty"`
+ // Description encapsulates the properties of the Node as reported by the
+ // agent.
+ Description NodeDescription `json:",omitempty"`
+ // Status provides the current status of the node, as seen by the manager.
+ Status NodeStatus `json:",omitempty"`
+ // ManagerStatus provides the current status of the node's manager
+ // component, if the node is a manager.
+ ManagerStatus *ManagerStatus `json:",omitempty"`
+}
+
+// NodeSpec represents the spec of a node.
+type NodeSpec struct {
+ Annotations
+ Role NodeRole `json:",omitempty"`
+ Availability NodeAvailability `json:",omitempty"`
+}
+
+// NodeRole represents the role of a node.
+type NodeRole string
+
+const (
+ // NodeRoleWorker WORKER
+ NodeRoleWorker NodeRole = "worker"
+ // NodeRoleManager MANAGER
+ NodeRoleManager NodeRole = "manager"
+)
+
+// NodeAvailability represents the availability of a node.
+type NodeAvailability string
+
+const (
+ // NodeAvailabilityActive ACTIVE
+ NodeAvailabilityActive NodeAvailability = "active"
+ // NodeAvailabilityPause PAUSE
+ NodeAvailabilityPause NodeAvailability = "pause"
+ // NodeAvailabilityDrain DRAIN
+ NodeAvailabilityDrain NodeAvailability = "drain"
+)
+
+// NodeDescription represents the description of a node.
+type NodeDescription struct {
+ Hostname string `json:",omitempty"`
+ Platform Platform `json:",omitempty"`
+ Resources Resources `json:",omitempty"`
+ Engine EngineDescription `json:",omitempty"`
+ TLSInfo TLSInfo `json:",omitempty"`
+}
+
+// Platform represents the platform (Arch/OS).
+type Platform struct {
+ Architecture string `json:",omitempty"`
+ OS string `json:",omitempty"`
+}
+
+// EngineDescription represents the description of an engine.
+type EngineDescription struct {
+ EngineVersion string `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ Plugins []PluginDescription `json:",omitempty"`
+}
+
+// PluginDescription represents the description of an engine plugin.
+type PluginDescription struct {
+ Type string `json:",omitempty"`
+ Name string `json:",omitempty"`
+}
+
+// NodeStatus represents the status of a node.
+type NodeStatus struct {
+ State NodeState `json:",omitempty"`
+ Message string `json:",omitempty"`
+ Addr string `json:",omitempty"`
+}
+
+// Reachability represents the reachability of a node.
+type Reachability string
+
+const (
+ // ReachabilityUnknown UNKNOWN
+ ReachabilityUnknown Reachability = "unknown"
+ // ReachabilityUnreachable UNREACHABLE
+ ReachabilityUnreachable Reachability = "unreachable"
+ // ReachabilityReachable REACHABLE
+ ReachabilityReachable Reachability = "reachable"
+)
+
+// ManagerStatus represents the status of a manager.
+type ManagerStatus struct {
+ Leader bool `json:",omitempty"`
+ Reachability Reachability `json:",omitempty"`
+ Addr string `json:",omitempty"`
+}
+
+// NodeState represents the state of a node.
+type NodeState string
+
+const (
+ // NodeStateUnknown UNKNOWN
+ NodeStateUnknown NodeState = "unknown"
+ // NodeStateDown DOWN
+ NodeStateDown NodeState = "down"
+ // NodeStateReady READY
+ NodeStateReady NodeState = "ready"
+ // NodeStateDisconnected DISCONNECTED
+ NodeStateDisconnected NodeState = "disconnected"
+)
diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go
new file mode 100644
index 000000000..c4c731dc8
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/runtime.go
@@ -0,0 +1,19 @@
+package swarm
+
+// RuntimeType is the type of runtime used for the TaskSpec
+type RuntimeType string
+
+// RuntimeURL is the proto type url
+type RuntimeURL string
+
+const (
+ // RuntimeContainer is the container based runtime
+ RuntimeContainer RuntimeType = "container"
+ // RuntimePlugin is the plugin based runtime
+ RuntimePlugin RuntimeType = "plugin"
+
+ // RuntimeURLContainer is the proto url for the container type
+ RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer"
+ // RuntimeURLPlugin is the proto url for the plugin type
+ RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin"
+)
diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go
new file mode 100644
index 000000000..47ae234ef
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go
@@ -0,0 +1,3 @@
+//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto
+
+package runtime
diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go
new file mode 100644
index 000000000..1fdc9b043
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go
@@ -0,0 +1,712 @@
+// Code generated by protoc-gen-gogo.
+// source: plugin.proto
+// DO NOT EDIT!
+
+/*
+ Package runtime is a generated protocol buffer package.
+
+ It is generated from these files:
+ plugin.proto
+
+ It has these top-level messages:
+ PluginSpec
+ PluginPrivilege
+*/
+package runtime
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+// PluginSpec defines the base payload which clients can specify for creating
+// a service with the plugin runtime.
+type PluginSpec struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"`
+ Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"`
+ Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"`
+}
+
+func (m *PluginSpec) Reset() { *m = PluginSpec{} }
+func (m *PluginSpec) String() string { return proto.CompactTextString(m) }
+func (*PluginSpec) ProtoMessage() {}
+func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} }
+
+func (m *PluginSpec) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *PluginSpec) GetRemote() string {
+ if m != nil {
+ return m.Remote
+ }
+ return ""
+}
+
+func (m *PluginSpec) GetPrivileges() []*PluginPrivilege {
+ if m != nil {
+ return m.Privileges
+ }
+ return nil
+}
+
+func (m *PluginSpec) GetDisabled() bool {
+ if m != nil {
+ return m.Disabled
+ }
+ return false
+}
+
+// PluginPrivilege describes a permission the user has to accept
+// upon installing a plugin.
+type PluginPrivilege struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"`
+}
+
+func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} }
+func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) }
+func (*PluginPrivilege) ProtoMessage() {}
+func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} }
+
+func (m *PluginPrivilege) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *PluginPrivilege) GetDescription() string {
+ if m != nil {
+ return m.Description
+ }
+ return ""
+}
+
+func (m *PluginPrivilege) GetValue() []string {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*PluginSpec)(nil), "PluginSpec")
+ proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege")
+}
+func (m *PluginSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Remote) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote)))
+ i += copy(dAtA[i:], m.Remote)
+ }
+ if len(m.Privileges) > 0 {
+ for _, msg := range m.Privileges {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintPlugin(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.Disabled {
+ dAtA[i] = 0x20
+ i++
+ if m.Disabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ return i, nil
+}
+
+func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Description) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description)))
+ i += copy(dAtA[i:], m.Description)
+ }
+ if len(m.Value) > 0 {
+ for _, s := range m.Value {
+ dAtA[i] = 0x1a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int {
+ dAtA[offset] = uint8(v)
+ dAtA[offset+1] = uint8(v >> 8)
+ dAtA[offset+2] = uint8(v >> 16)
+ dAtA[offset+3] = uint8(v >> 24)
+ dAtA[offset+4] = uint8(v >> 32)
+ dAtA[offset+5] = uint8(v >> 40)
+ dAtA[offset+6] = uint8(v >> 48)
+ dAtA[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int {
+ dAtA[offset] = uint8(v)
+ dAtA[offset+1] = uint8(v >> 8)
+ dAtA[offset+2] = uint8(v >> 16)
+ dAtA[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *PluginSpec) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ l = len(m.Remote)
+ if l > 0 {
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ if len(m.Privileges) > 0 {
+ for _, e := range m.Privileges {
+ l = e.Size()
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ }
+ if m.Disabled {
+ n += 2
+ }
+ return n
+}
+
+func (m *PluginPrivilege) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ l = len(m.Description)
+ if l > 0 {
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ if len(m.Value) > 0 {
+ for _, s := range m.Value {
+ l = len(s)
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovPlugin(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozPlugin(x uint64) (n int) {
+ return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *PluginSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Remote = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Privileges = append(m.Privileges, &PluginPrivilege{})
+ if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Disabled = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipPlugin(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PluginPrivilege) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Description = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = append(m.Value, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipPlugin(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipPlugin(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthPlugin
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipPlugin(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) }
+
+var fileDescriptorPlugin = []byte{
+ // 196 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d,
+ 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b,
+ 0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30,
+ 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12,
+ 0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35,
+ 0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c,
+ 0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a,
+ 0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab,
+ 0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0,
+ 0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33,
+ 0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4,
+ 0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79,
+ 0x0c, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto
new file mode 100644
index 000000000..06eb7ba65
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto
@@ -0,0 +1,18 @@
+syntax = "proto3";
+
+// PluginSpec defines the base payload which clients can specify for creating
+// a service with the plugin runtime.
+message PluginSpec {
+ string name = 1;
+ string remote = 2;
+ repeated PluginPrivilege privileges = 3;
+ bool disabled = 4;
+}
+
+// PluginPrivilege describes a permission the user has to accept
+// upon installing a plugin.
+message PluginPrivilege {
+ string name = 1;
+ string description = 2;
+ repeated string value = 3;
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go
new file mode 100644
index 000000000..f9b1e9266
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go
@@ -0,0 +1,32 @@
+package swarm
+
+import "os"
+
+// Secret represents a secret.
+type Secret struct {
+ ID string
+ Meta
+ Spec SecretSpec
+}
+
+// SecretSpec represents a secret specification from a secret in swarm
+type SecretSpec struct {
+ Annotations
+ Data []byte `json:",omitempty"`
+ Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store
+}
+
+// SecretReferenceFileTarget is a file target in a secret reference
+type SecretReferenceFileTarget struct {
+ Name string
+ UID string
+ GID string
+ Mode os.FileMode
+}
+
+// SecretReference is a reference to a secret in swarm
+type SecretReference struct {
+ File *SecretReferenceFileTarget
+ SecretID string
+ SecretName string
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go
new file mode 100644
index 000000000..fa31a7ec8
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/service.go
@@ -0,0 +1,124 @@
+package swarm
+
+import "time"
+
+// Service represents a service.
+type Service struct {
+ ID string
+ Meta
+ Spec ServiceSpec `json:",omitempty"`
+ PreviousSpec *ServiceSpec `json:",omitempty"`
+ Endpoint Endpoint `json:",omitempty"`
+ UpdateStatus *UpdateStatus `json:",omitempty"`
+}
+
+// ServiceSpec represents the spec of a service.
+type ServiceSpec struct {
+ Annotations
+
+ // TaskTemplate defines how the service should construct new tasks when
+ // orchestrating this service.
+ TaskTemplate TaskSpec `json:",omitempty"`
+ Mode ServiceMode `json:",omitempty"`
+ UpdateConfig *UpdateConfig `json:",omitempty"`
+ RollbackConfig *UpdateConfig `json:",omitempty"`
+
+ // Networks field in ServiceSpec is deprecated. The
+ // same field in TaskSpec should be used instead.
+ // This field will be removed in a future release.
+ Networks []NetworkAttachmentConfig `json:",omitempty"`
+ EndpointSpec *EndpointSpec `json:",omitempty"`
+}
+
+// ServiceMode represents the mode of a service.
+type ServiceMode struct {
+ Replicated *ReplicatedService `json:",omitempty"`
+ Global *GlobalService `json:",omitempty"`
+}
+
+// UpdateState is the state of a service update.
+type UpdateState string
+
+const (
+ // UpdateStateUpdating is the updating state.
+ UpdateStateUpdating UpdateState = "updating"
+ // UpdateStatePaused is the paused state.
+ UpdateStatePaused UpdateState = "paused"
+ // UpdateStateCompleted is the completed state.
+ UpdateStateCompleted UpdateState = "completed"
+ // UpdateStateRollbackStarted is the state with a rollback in progress.
+ UpdateStateRollbackStarted UpdateState = "rollback_started"
+ // UpdateStateRollbackPaused is the state with a rollback in progress.
+ UpdateStateRollbackPaused UpdateState = "rollback_paused"
+ // UpdateStateRollbackCompleted is the state with a rollback in progress.
+ UpdateStateRollbackCompleted UpdateState = "rollback_completed"
+)
+
+// UpdateStatus reports the status of a service update.
+type UpdateStatus struct {
+ State UpdateState `json:",omitempty"`
+ StartedAt *time.Time `json:",omitempty"`
+ CompletedAt *time.Time `json:",omitempty"`
+ Message string `json:",omitempty"`
+}
+
+// ReplicatedService is a kind of ServiceMode.
+type ReplicatedService struct {
+ Replicas *uint64 `json:",omitempty"`
+}
+
+// GlobalService is a kind of ServiceMode.
+type GlobalService struct{}
+
+const (
+ // UpdateFailureActionPause PAUSE
+ UpdateFailureActionPause = "pause"
+ // UpdateFailureActionContinue CONTINUE
+ UpdateFailureActionContinue = "continue"
+ // UpdateFailureActionRollback ROLLBACK
+ UpdateFailureActionRollback = "rollback"
+
+ // UpdateOrderStopFirst STOP_FIRST
+ UpdateOrderStopFirst = "stop-first"
+ // UpdateOrderStartFirst START_FIRST
+ UpdateOrderStartFirst = "start-first"
+)
+
+// UpdateConfig represents the update configuration.
+type UpdateConfig struct {
+ // Maximum number of tasks to be updated in one iteration.
+ // 0 means unlimited parallelism.
+ Parallelism uint64
+
+ // Amount of time between updates.
+ Delay time.Duration `json:",omitempty"`
+
+ // FailureAction is the action to take when an update failures.
+ FailureAction string `json:",omitempty"`
+
+ // Monitor indicates how long to monitor a task for failure after it is
+ // created. If the task fails by ending up in one of the states
+ // REJECTED, COMPLETED, or FAILED, within Monitor from its creation,
+ // this counts as a failure. If it fails after Monitor, it does not
+ // count as a failure. If Monitor is unspecified, a default value will
+ // be used.
+ Monitor time.Duration `json:",omitempty"`
+
+ // MaxFailureRatio is the fraction of tasks that may fail during
+ // an update before the failure action is invoked. Any task created by
+ // the current update which ends up in one of the states REJECTED,
+ // COMPLETED or FAILED within Monitor from its creation counts as a
+ // failure. The number of failures is divided by the number of tasks
+ // being updated, and if this fraction is greater than
+ // MaxFailureRatio, the failure action is invoked.
+ //
+ // If the failure action is CONTINUE, there is no effect.
+ // If the failure action is PAUSE, no more tasks will be updated until
+ // another update is started.
+ MaxFailureRatio float32
+
+ // Order indicates the order of operations when rolling out an updated
+ // task. Either the old task is shut down before the new task is
+ // started, or the new task is started before the old task is shut down.
+ Order string
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go
new file mode 100644
index 000000000..b65fa86da
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go
@@ -0,0 +1,217 @@
+package swarm
+
+import "time"
+
+// ClusterInfo represents info about the cluster for outputting in "info"
+// it contains the same information as "Swarm", but without the JoinTokens
+type ClusterInfo struct {
+ ID string
+ Meta
+ Spec Spec
+ TLSInfo TLSInfo
+ RootRotationInProgress bool
+}
+
+// Swarm represents a swarm.
+type Swarm struct {
+ ClusterInfo
+ JoinTokens JoinTokens
+}
+
+// JoinTokens contains the tokens workers and managers need to join the swarm.
+type JoinTokens struct {
+ // Worker is the join token workers may use to join the swarm.
+ Worker string
+ // Manager is the join token managers may use to join the swarm.
+ Manager string
+}
+
+// Spec represents the spec of a swarm.
+type Spec struct {
+ Annotations
+
+ Orchestration OrchestrationConfig `json:",omitempty"`
+ Raft RaftConfig `json:",omitempty"`
+ Dispatcher DispatcherConfig `json:",omitempty"`
+ CAConfig CAConfig `json:",omitempty"`
+ TaskDefaults TaskDefaults `json:",omitempty"`
+ EncryptionConfig EncryptionConfig `json:",omitempty"`
+}
+
+// OrchestrationConfig represents orchestration configuration.
+type OrchestrationConfig struct {
+ // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or
+ // node. If negative, never remove completed or failed tasks.
+ TaskHistoryRetentionLimit *int64 `json:",omitempty"`
+}
+
+// TaskDefaults parameterizes cluster-level task creation with default values.
+type TaskDefaults struct {
+ // LogDriver selects the log driver to use for tasks created in the
+ // orchestrator if unspecified by a service.
+ //
+ // Updating this value will only have an affect on new tasks. Old tasks
+ // will continue use their previously configured log driver until
+ // recreated.
+ LogDriver *Driver `json:",omitempty"`
+}
+
+// EncryptionConfig controls at-rest encryption of data and keys.
+type EncryptionConfig struct {
+ // AutoLockManagers specifies whether or not managers TLS keys and raft data
+ // should be encrypted at rest in such a way that they must be unlocked
+ // before the manager node starts up again.
+ AutoLockManagers bool
+}
+
+// RaftConfig represents raft configuration.
+type RaftConfig struct {
+ // SnapshotInterval is the number of log entries between snapshots.
+ SnapshotInterval uint64 `json:",omitempty"`
+
+ // KeepOldSnapshots is the number of snapshots to keep beyond the
+ // current snapshot.
+ KeepOldSnapshots *uint64 `json:",omitempty"`
+
+ // LogEntriesForSlowFollowers is the number of log entries to keep
+ // around to sync up slow followers after a snapshot is created.
+ LogEntriesForSlowFollowers uint64 `json:",omitempty"`
+
+ // ElectionTick is the number of ticks that a follower will wait for a message
+ // from the leader before becoming a candidate and starting an election.
+ // ElectionTick must be greater than HeartbeatTick.
+ //
+ // A tick currently defaults to one second, so these translate directly to
+ // seconds currently, but this is NOT guaranteed.
+ ElectionTick int
+
+ // HeartbeatTick is the number of ticks between heartbeats. Every
+ // HeartbeatTick ticks, the leader will send a heartbeat to the
+ // followers.
+ //
+ // A tick currently defaults to one second, so these translate directly to
+ // seconds currently, but this is NOT guaranteed.
+ HeartbeatTick int
+}
+
+// DispatcherConfig represents dispatcher configuration.
+type DispatcherConfig struct {
+ // HeartbeatPeriod defines how often agent should send heartbeats to
+ // dispatcher.
+ HeartbeatPeriod time.Duration `json:",omitempty"`
+}
+
+// CAConfig represents CA configuration.
+type CAConfig struct {
+ // NodeCertExpiry is the duration certificates should be issued for
+ NodeCertExpiry time.Duration `json:",omitempty"`
+
+ // ExternalCAs is a list of CAs to which a manager node will make
+ // certificate signing requests for node certificates.
+ ExternalCAs []*ExternalCA `json:",omitempty"`
+
+ // SigningCACert and SigningCAKey specify the desired signing root CA and
+ // root CA key for the swarm. When inspecting the cluster, the key will
+ // be redacted.
+ SigningCACert string `json:",omitempty"`
+ SigningCAKey string `json:",omitempty"`
+
+ // If this value changes, and there is no specified signing cert and key,
+ // then the swarm is forced to generate a new root certificate ane key.
+ ForceRotate uint64 `json:",omitempty"`
+}
+
+// ExternalCAProtocol represents type of external CA.
+type ExternalCAProtocol string
+
+// ExternalCAProtocolCFSSL CFSSL
+const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl"
+
+// ExternalCA defines external CA to be used by the cluster.
+type ExternalCA struct {
+ // Protocol is the protocol used by this external CA.
+ Protocol ExternalCAProtocol
+
+ // URL is the URL where the external CA can be reached.
+ URL string
+
+ // Options is a set of additional key/value pairs whose interpretation
+ // depends on the specified CA type.
+ Options map[string]string `json:",omitempty"`
+
+ // CACert specifies which root CA is used by this external CA. This certificate must
+ // be in PEM format.
+ CACert string
+}
+
+// InitRequest is the request used to init a swarm.
+type InitRequest struct {
+ ListenAddr string
+ AdvertiseAddr string
+ DataPathAddr string
+ ForceNewCluster bool
+ Spec Spec
+ AutoLockManagers bool
+ Availability NodeAvailability
+}
+
+// JoinRequest is the request used to join a swarm.
+type JoinRequest struct {
+ ListenAddr string
+ AdvertiseAddr string
+ DataPathAddr string
+ RemoteAddrs []string
+ JoinToken string // accept by secret
+ Availability NodeAvailability
+}
+
+// UnlockRequest is the request used to unlock a swarm.
+type UnlockRequest struct {
+ // UnlockKey is the unlock key in ASCII-armored format.
+ UnlockKey string
+}
+
+// LocalNodeState represents the state of the local node.
+type LocalNodeState string
+
+const (
+ // LocalNodeStateInactive INACTIVE
+ LocalNodeStateInactive LocalNodeState = "inactive"
+ // LocalNodeStatePending PENDING
+ LocalNodeStatePending LocalNodeState = "pending"
+ // LocalNodeStateActive ACTIVE
+ LocalNodeStateActive LocalNodeState = "active"
+ // LocalNodeStateError ERROR
+ LocalNodeStateError LocalNodeState = "error"
+ // LocalNodeStateLocked LOCKED
+ LocalNodeStateLocked LocalNodeState = "locked"
+)
+
+// Info represents generic information about swarm.
+type Info struct {
+ NodeID string
+ NodeAddr string
+
+ LocalNodeState LocalNodeState
+ ControlAvailable bool
+ Error string
+
+ RemoteManagers []Peer
+ Nodes int `json:",omitempty"`
+ Managers int `json:",omitempty"`
+
+ Cluster *ClusterInfo `json:",omitempty"`
+}
+
+// Peer represents a peer.
+type Peer struct {
+ NodeID string
+ Addr string
+}
+
+// UpdateFlags contains flags for SwarmUpdate.
+type UpdateFlags struct {
+ RotateWorkerToken bool
+ RotateManagerToken bool
+ RotateManagerUnlockKey bool
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go
new file mode 100644
index 000000000..ff11b07e7
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/task.go
@@ -0,0 +1,184 @@
+package swarm
+
+import (
+ "time"
+
+ "github.com/docker/docker/api/types/swarm/runtime"
+)
+
+// TaskState represents the state of a task.
+type TaskState string
+
+const (
+ // TaskStateNew NEW
+ TaskStateNew TaskState = "new"
+ // TaskStateAllocated ALLOCATED
+ TaskStateAllocated TaskState = "allocated"
+ // TaskStatePending PENDING
+ TaskStatePending TaskState = "pending"
+ // TaskStateAssigned ASSIGNED
+ TaskStateAssigned TaskState = "assigned"
+ // TaskStateAccepted ACCEPTED
+ TaskStateAccepted TaskState = "accepted"
+ // TaskStatePreparing PREPARING
+ TaskStatePreparing TaskState = "preparing"
+ // TaskStateReady READY
+ TaskStateReady TaskState = "ready"
+ // TaskStateStarting STARTING
+ TaskStateStarting TaskState = "starting"
+ // TaskStateRunning RUNNING
+ TaskStateRunning TaskState = "running"
+ // TaskStateComplete COMPLETE
+ TaskStateComplete TaskState = "complete"
+ // TaskStateShutdown SHUTDOWN
+ TaskStateShutdown TaskState = "shutdown"
+ // TaskStateFailed FAILED
+ TaskStateFailed TaskState = "failed"
+ // TaskStateRejected REJECTED
+ TaskStateRejected TaskState = "rejected"
+)
+
+// Task represents a task.
+type Task struct {
+ ID string
+ Meta
+ Annotations
+
+ Spec TaskSpec `json:",omitempty"`
+ ServiceID string `json:",omitempty"`
+ Slot int `json:",omitempty"`
+ NodeID string `json:",omitempty"`
+ Status TaskStatus `json:",omitempty"`
+ DesiredState TaskState `json:",omitempty"`
+ NetworksAttachments []NetworkAttachment `json:",omitempty"`
+ GenericResources []GenericResource `json:",omitempty"`
+}
+
+// TaskSpec represents the spec of a task.
+type TaskSpec struct {
+ // ContainerSpec and PluginSpec are mutually exclusive.
+ // PluginSpec will only be used when the `Runtime` field is set to `plugin`
+ ContainerSpec *ContainerSpec `json:",omitempty"`
+ PluginSpec *runtime.PluginSpec `json:",omitempty"`
+
+ Resources *ResourceRequirements `json:",omitempty"`
+ RestartPolicy *RestartPolicy `json:",omitempty"`
+ Placement *Placement `json:",omitempty"`
+ Networks []NetworkAttachmentConfig `json:",omitempty"`
+
+ // LogDriver specifies the LogDriver to use for tasks created from this
+ // spec. If not present, the one on cluster default on swarm.Spec will be
+ // used, finally falling back to the engine default if not specified.
+ LogDriver *Driver `json:",omitempty"`
+
+ // ForceUpdate is a counter that triggers an update even if no relevant
+ // parameters have been changed.
+ ForceUpdate uint64
+
+ Runtime RuntimeType `json:",omitempty"`
+}
+
+// Resources represents resources (CPU/Memory).
+type Resources struct {
+ NanoCPUs int64 `json:",omitempty"`
+ MemoryBytes int64 `json:",omitempty"`
+ GenericResources []GenericResource `json:",omitempty"`
+}
+
+// GenericResource represents a "user defined" resource which can
+// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1)
+type GenericResource struct {
+ NamedResourceSpec *NamedGenericResource `json:",omitempty"`
+ DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"`
+}
+
+// NamedGenericResource represents a "user defined" resource which is defined
+// as a string.
+// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
+// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...)
+type NamedGenericResource struct {
+ Kind string `json:",omitempty"`
+ Value string `json:",omitempty"`
+}
+
+// DiscreteGenericResource represents a "user defined" resource which is defined
+// as an integer
+// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
+// Value is used to count the resource (SSD=5, HDD=3, ...)
+type DiscreteGenericResource struct {
+ Kind string `json:",omitempty"`
+ Value int64 `json:",omitempty"`
+}
+
+// ResourceRequirements represents resources requirements.
+type ResourceRequirements struct {
+ Limits *Resources `json:",omitempty"`
+ Reservations *Resources `json:",omitempty"`
+}
+
+// Placement represents orchestration parameters.
+type Placement struct {
+ Constraints []string `json:",omitempty"`
+ Preferences []PlacementPreference `json:",omitempty"`
+
+ // Platforms stores all the platforms that the image can run on.
+ // This field is used in the platform filter for scheduling. If empty,
+ // then the platform filter is off, meaning there are no scheduling restrictions.
+ Platforms []Platform `json:",omitempty"`
+}
+
+// PlacementPreference provides a way to make the scheduler aware of factors
+// such as topology.
+type PlacementPreference struct {
+ Spread *SpreadOver
+}
+
+// SpreadOver is a scheduling preference that instructs the scheduler to spread
+// tasks evenly over groups of nodes identified by labels.
+type SpreadOver struct {
+ // label descriptor, such as engine.labels.az
+ SpreadDescriptor string
+}
+
+// RestartPolicy represents the restart policy.
+type RestartPolicy struct {
+ Condition RestartPolicyCondition `json:",omitempty"`
+ Delay *time.Duration `json:",omitempty"`
+ MaxAttempts *uint64 `json:",omitempty"`
+ Window *time.Duration `json:",omitempty"`
+}
+
+// RestartPolicyCondition represents when to restart.
+type RestartPolicyCondition string
+
+const (
+ // RestartPolicyConditionNone NONE
+ RestartPolicyConditionNone RestartPolicyCondition = "none"
+ // RestartPolicyConditionOnFailure ON_FAILURE
+ RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure"
+ // RestartPolicyConditionAny ANY
+ RestartPolicyConditionAny RestartPolicyCondition = "any"
+)
+
+// TaskStatus represents the status of a task.
+type TaskStatus struct {
+ Timestamp time.Time `json:",omitempty"`
+ State TaskState `json:",omitempty"`
+ Message string `json:",omitempty"`
+ Err string `json:",omitempty"`
+ ContainerStatus ContainerStatus `json:",omitempty"`
+ PortStatus PortStatus `json:",omitempty"`
+}
+
+// ContainerStatus represents the status of a container.
+type ContainerStatus struct {
+ ContainerID string `json:",omitempty"`
+ PID int `json:",omitempty"`
+ ExitCode int `json:",omitempty"`
+}
+
+// PortStatus represents the port status of a task's host ports whose
+// service has published host ports
+type PortStatus struct {
+ Ports []PortConfig `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/time/duration_convert.go b/vendor/github.com/docker/docker/api/types/time/duration_convert.go
new file mode 100644
index 000000000..63e1eec19
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/time/duration_convert.go
@@ -0,0 +1,12 @@
+package time
+
+import (
+ "strconv"
+ "time"
+)
+
+// DurationToSecondsString converts the specified duration to the number
+// seconds it represents, formatted as a string.
+func DurationToSecondsString(duration time.Duration) string {
+ return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64)
+}
diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go
new file mode 100644
index 000000000..9aa9702da
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go
@@ -0,0 +1,124 @@
+package time
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// These are additional predefined layouts for use in Time.Format and Time.Parse
+// with --since and --until parameters for `docker logs` and `docker events`
+const (
+ rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone
+ rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone
+ dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00
+ dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00
+)
+
+// GetTimestamp tries to parse given string as golang duration,
+// then RFC3339 time and finally as a Unix timestamp. If
+// any of these were successful, it returns a Unix timestamp
+// as string otherwise returns the given value back.
+// In case of duration input, the returned timestamp is computed
+// as the given reference time minus the amount of the duration.
+func GetTimestamp(value string, reference time.Time) (string, error) {
+ if d, err := time.ParseDuration(value); value != "0" && err == nil {
+ return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil
+ }
+
+ var format string
+ var parseInLocation bool
+
+ // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
+ parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
+
+ if strings.Contains(value, ".") {
+ if parseInLocation {
+ format = rFC3339NanoLocal
+ } else {
+ format = time.RFC3339Nano
+ }
+ } else if strings.Contains(value, "T") {
+ // we want the number of colons in the T portion of the timestamp
+ tcolons := strings.Count(value, ":")
+ // if parseInLocation is off and we have a +/- zone offset (not Z) then
+ // there will be an extra colon in the input for the tz offset subtract that
+ // colon from the tcolons count
+ if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 {
+ tcolons--
+ }
+ if parseInLocation {
+ switch tcolons {
+ case 0:
+ format = "2006-01-02T15"
+ case 1:
+ format = "2006-01-02T15:04"
+ default:
+ format = rFC3339Local
+ }
+ } else {
+ switch tcolons {
+ case 0:
+ format = "2006-01-02T15Z07:00"
+ case 1:
+ format = "2006-01-02T15:04Z07:00"
+ default:
+ format = time.RFC3339
+ }
+ }
+ } else if parseInLocation {
+ format = dateLocal
+ } else {
+ format = dateWithZone
+ }
+
+ var t time.Time
+ var err error
+
+ if parseInLocation {
+ t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone()))
+ } else {
+ t, err = time.Parse(format, value)
+ }
+
+ if err != nil {
+ // if there is a `-` then it's an RFC3339 like timestamp otherwise assume unixtimestamp
+ if strings.Contains(value, "-") {
+ return "", err // was probably an RFC3339 like timestamp but the parser failed with an error
+ }
+ return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server)
+ }
+
+ return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil
+}
+
+// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the
+// format "%d.%09d", time.Unix(), int64(time.Nanosecond()))
+// if the incoming nanosecond portion is longer or shorter than 9 digits it is
+// converted to nanoseconds. The expectation is that the seconds and
+// seconds will be used to create a time variable. For example:
+// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
+// if err == nil since := time.Unix(seconds, nanoseconds)
+// returns seconds as def(aultSeconds) if value == ""
+func ParseTimestamps(value string, def int64) (int64, int64, error) {
+ if value == "" {
+ return def, 0, nil
+ }
+ sa := strings.SplitN(value, ".", 2)
+ s, err := strconv.ParseInt(sa[0], 10, 64)
+ if err != nil {
+ return s, 0, err
+ }
+ if len(sa) != 2 {
+ return s, 0, nil
+ }
+ n, err := strconv.ParseInt(sa[1], 10, 64)
+ if err != nil {
+ return s, n, err
+ }
+ // should already be in nanoseconds but just in case convert n to nanoseconds
+ n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1]))))
+ return s, n, nil
+}
diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go
new file mode 100644
index 000000000..f7ac77297
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/types.go
@@ -0,0 +1,575 @@
+package types
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/mount"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/registry"
+ "github.com/docker/docker/api/types/swarm"
+ "github.com/docker/go-connections/nat"
+)
+
+// RootFS returns Image's RootFS description including the layer IDs.
+type RootFS struct {
+ Type string
+ Layers []string `json:",omitempty"`
+ BaseLayer string `json:",omitempty"`
+}
+
+// ImageInspect contains response of Engine API:
+// GET "/images/{name:.*}/json"
+type ImageInspect struct {
+ ID string `json:"Id"`
+ RepoTags []string
+ RepoDigests []string
+ Parent string
+ Comment string
+ Created string
+ Container string
+ ContainerConfig *container.Config
+ DockerVersion string
+ Author string
+ Config *container.Config
+ Architecture string
+ Os string
+ OsVersion string `json:",omitempty"`
+ Size int64
+ VirtualSize int64
+ GraphDriver GraphDriverData
+ RootFS RootFS
+ Metadata ImageMetadata
+}
+
+// ImageMetadata contains engine-local data about the image
+type ImageMetadata struct {
+ LastTagTime time.Time `json:",omitempty"`
+}
+
+// Container contains response of Engine API:
+// GET "/containers/json"
+type Container struct {
+ ID string `json:"Id"`
+ Names []string
+ Image string
+ ImageID string
+ Command string
+ Created int64
+ Ports []Port
+ SizeRw int64 `json:",omitempty"`
+ SizeRootFs int64 `json:",omitempty"`
+ Labels map[string]string
+ State string
+ Status string
+ HostConfig struct {
+ NetworkMode string `json:",omitempty"`
+ }
+ NetworkSettings *SummaryNetworkSettings
+ Mounts []MountPoint
+}
+
+// CopyConfig contains request body of Engine API:
+// POST "/containers/"+containerID+"/copy"
+type CopyConfig struct {
+ Resource string
+}
+
+// ContainerPathStat is used to encode the header from
+// GET "/containers/{name:.*}/archive"
+// "Name" is the file or directory name.
+type ContainerPathStat struct {
+ Name string `json:"name"`
+ Size int64 `json:"size"`
+ Mode os.FileMode `json:"mode"`
+ Mtime time.Time `json:"mtime"`
+ LinkTarget string `json:"linkTarget"`
+}
+
+// ContainerStats contains response of Engine API:
+// GET "/stats"
+type ContainerStats struct {
+ Body io.ReadCloser `json:"body"`
+ OSType string `json:"ostype"`
+}
+
+// Ping contains response of Engine API:
+// GET "/_ping"
+type Ping struct {
+ APIVersion string
+ OSType string
+ Experimental bool
+}
+
+// Version contains response of Engine API:
+// GET "/version"
+type Version struct {
+ Version string
+ APIVersion string `json:"ApiVersion"`
+ MinAPIVersion string `json:"MinAPIVersion,omitempty"`
+ GitCommit string
+ GoVersion string
+ Os string
+ Arch string
+ KernelVersion string `json:",omitempty"`
+ Experimental bool `json:",omitempty"`
+ BuildTime string `json:",omitempty"`
+}
+
+// Commit holds the Git-commit (SHA1) that a binary was built from, as reported
+// in the version-string of external tools, such as containerd, or runC.
+type Commit struct {
+ ID string // ID is the actual commit ID of external tool.
+ Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time.
+}
+
+// Info contains response of Engine API:
+// GET "/info"
+type Info struct {
+ ID string
+ Containers int
+ ContainersRunning int
+ ContainersPaused int
+ ContainersStopped int
+ Images int
+ Driver string
+ DriverStatus [][2]string
+ SystemStatus [][2]string
+ Plugins PluginsInfo
+ MemoryLimit bool
+ SwapLimit bool
+ KernelMemory bool
+ CPUCfsPeriod bool `json:"CpuCfsPeriod"`
+ CPUCfsQuota bool `json:"CpuCfsQuota"`
+ CPUShares bool
+ CPUSet bool
+ IPv4Forwarding bool
+ BridgeNfIptables bool
+ BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
+ Debug bool
+ NFd int
+ OomKillDisable bool
+ NGoroutines int
+ SystemTime string
+ LoggingDriver string
+ CgroupDriver string
+ NEventsListener int
+ KernelVersion string
+ OperatingSystem string
+ OSType string
+ Architecture string
+ IndexServerAddress string
+ RegistryConfig *registry.ServiceConfig
+ NCPU int
+ MemTotal int64
+ GenericResources []swarm.GenericResource
+ DockerRootDir string
+ HTTPProxy string `json:"HttpProxy"`
+ HTTPSProxy string `json:"HttpsProxy"`
+ NoProxy string
+ Name string
+ Labels []string
+ ExperimentalBuild bool
+ ServerVersion string
+ ClusterStore string
+ ClusterAdvertise string
+ Runtimes map[string]Runtime
+ DefaultRuntime string
+ Swarm swarm.Info
+ // LiveRestoreEnabled determines whether containers should be kept
+ // running when the daemon is shutdown or upon daemon start if
+ // running containers are detected
+ LiveRestoreEnabled bool
+ Isolation container.Isolation
+ InitBinary string
+ ContainerdCommit Commit
+ RuncCommit Commit
+ InitCommit Commit
+ SecurityOptions []string
+}
+
+// KeyValue holds a key/value pair
+type KeyValue struct {
+ Key, Value string
+}
+
+// SecurityOpt contains the name and options of a security option
+type SecurityOpt struct {
+ Name string
+ Options []KeyValue
+}
+
+// DecodeSecurityOptions decodes a security options string slice to a type safe
+// SecurityOpt
+func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) {
+ so := []SecurityOpt{}
+ for _, opt := range opts {
+ // support output from a < 1.13 docker daemon
+ if !strings.Contains(opt, "=") {
+ so = append(so, SecurityOpt{Name: opt})
+ continue
+ }
+ secopt := SecurityOpt{}
+ split := strings.Split(opt, ",")
+ for _, s := range split {
+ kv := strings.SplitN(s, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("invalid security option %q", s)
+ }
+ if kv[0] == "" || kv[1] == "" {
+ return nil, errors.New("invalid empty security option")
+ }
+ if kv[0] == "name" {
+ secopt.Name = kv[1]
+ continue
+ }
+ secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]})
+ }
+ so = append(so, secopt)
+ }
+ return so, nil
+}
+
+// PluginsInfo is a temp struct holding Plugins name
+// registered with docker daemon. It is used by Info struct
+type PluginsInfo struct {
+ // List of Volume plugins registered
+ Volume []string
+ // List of Network plugins registered
+ Network []string
+ // List of Authorization plugins registered
+ Authorization []string
+ // List of Log plugins registered
+ Log []string
+}
+
+// ExecStartCheck is a temp struct used by execStart
+// Config fields is part of ExecConfig in runconfig package
+type ExecStartCheck struct {
+ // ExecStart will first check if it's detached
+ Detach bool
+ // Check if there's a tty
+ Tty bool
+}
+
+// HealthcheckResult stores information about a single run of a healthcheck probe
+type HealthcheckResult struct {
+ Start time.Time // Start is the time this check started
+ End time.Time // End is the time this check ended
+ ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe
+ Output string // Output from last check
+}
+
+// Health states
+const (
+ NoHealthcheck = "none" // Indicates there is no healthcheck
+ Starting = "starting" // Starting indicates that the container is not yet ready
+ Healthy = "healthy" // Healthy indicates that the container is running correctly
+ Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem
+)
+
+// Health stores information about the container's healthcheck results
+type Health struct {
+ Status string // Status is one of Starting, Healthy or Unhealthy
+ FailingStreak int // FailingStreak is the number of consecutive failures
+ Log []*HealthcheckResult // Log contains the last few results (oldest first)
+}
+
+// ContainerState stores container's running state
+// it's part of ContainerJSONBase and will return by "inspect" command
+type ContainerState struct {
+ Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead"
+ Running bool
+ Paused bool
+ Restarting bool
+ OOMKilled bool
+ Dead bool
+ Pid int
+ ExitCode int
+ Error string
+ StartedAt string
+ FinishedAt string
+ Health *Health `json:",omitempty"`
+}
+
+// ContainerNode stores information about the node that a container
+// is running on. It's only available in Docker Swarm
+type ContainerNode struct {
+ ID string
+ IPAddress string `json:"IP"`
+ Addr string
+ Name string
+ Cpus int
+ Memory int64
+ Labels map[string]string
+}
+
+// ContainerJSONBase contains response of Engine API:
+// GET "/containers/{name:.*}/json"
+type ContainerJSONBase struct {
+ ID string `json:"Id"`
+ Created string
+ Path string
+ Args []string
+ State *ContainerState
+ Image string
+ ResolvConfPath string
+ HostnamePath string
+ HostsPath string
+ LogPath string
+ Node *ContainerNode `json:",omitempty"`
+ Name string
+ RestartCount int
+ Driver string
+ Platform string
+ MountLabel string
+ ProcessLabel string
+ AppArmorProfile string
+ ExecIDs []string
+ HostConfig *container.HostConfig
+ GraphDriver GraphDriverData
+ SizeRw *int64 `json:",omitempty"`
+ SizeRootFs *int64 `json:",omitempty"`
+}
+
+// ContainerJSON is newly used struct along with MountPoint
+type ContainerJSON struct {
+ *ContainerJSONBase
+ Mounts []MountPoint
+ Config *container.Config
+ NetworkSettings *NetworkSettings
+}
+
+// NetworkSettings exposes the network settings in the api
+type NetworkSettings struct {
+ NetworkSettingsBase
+ DefaultNetworkSettings
+ Networks map[string]*network.EndpointSettings
+}
+
+// SummaryNetworkSettings provides a summary of container's networks
+// in /containers/json
+type SummaryNetworkSettings struct {
+ Networks map[string]*network.EndpointSettings
+}
+
+// NetworkSettingsBase holds basic information about networks
+type NetworkSettingsBase struct {
+ Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`)
+ SandboxID string // SandboxID uniquely represents a container's network stack
+ HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
+ LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix
+ LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address
+ Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port
+ SandboxKey string // SandboxKey identifies the sandbox
+ SecondaryIPAddresses []network.Address
+ SecondaryIPv6Addresses []network.Address
+}
+
+// DefaultNetworkSettings holds network information
+// during the 2 release deprecation period.
+// It will be removed in Docker 1.11.
+type DefaultNetworkSettings struct {
+ EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox
+ Gateway string // Gateway holds the gateway address for the network
+ GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address
+ GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address
+ IPAddress string // IPAddress holds the IPv4 address for the network
+ IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address
+ IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6
+ MacAddress string // MacAddress holds the MAC address for the network
+}
+
+// MountPoint represents a mount point configuration inside the container.
+// This is used for reporting the mountpoints in use by a container.
+type MountPoint struct {
+ Type mount.Type `json:",omitempty"`
+ Name string `json:",omitempty"`
+ Source string
+ Destination string
+ Driver string `json:",omitempty"`
+ Mode string
+ RW bool
+ Propagation mount.Propagation
+}
+
+// NetworkResource is the body of the "get network" http response message
+type NetworkResource struct {
+ Name string // Name is the requested name of the network
+ ID string `json:"Id"` // ID uniquely identifies a network on a single machine
+ Created time.Time // Created is the time the network created
+ Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level)
+ Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
+ EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
+ IPAM network.IPAM // IPAM is the network's IP Address Management
+ Internal bool // Internal represents if the network is used internal only
+ Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
+ Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
+ ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network.
+ ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
+ Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
+ Options map[string]string // Options holds the network specific options to use for when creating the network
+ Labels map[string]string // Labels holds metadata specific to the network being created
+ Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
+ Services map[string]network.ServiceInfo `json:",omitempty"`
+}
+
+// EndpointResource contains network resources allocated and used for a container in a network
+type EndpointResource struct {
+ Name string
+ EndpointID string
+ MacAddress string
+ IPv4Address string
+ IPv6Address string
+}
+
+// NetworkCreate is the expected body of the "create network" http request message
+type NetworkCreate struct {
+ // Check for networks with duplicate names.
+ // Network is primarily keyed based on a random ID and not on the name.
+ // Network name is strictly a user-friendly alias to the network
+ // which is uniquely identified using ID.
+ // And there is no guaranteed way to check for duplicates.
+ // Option CheckDuplicate is there to provide a best effort checking of any networks
+ // which has the same name but it is not guaranteed to catch all name collisions.
+ CheckDuplicate bool
+ Driver string
+ Scope string
+ EnableIPv6 bool
+ IPAM *network.IPAM
+ Internal bool
+ Attachable bool
+ Ingress bool
+ ConfigOnly bool
+ ConfigFrom *network.ConfigReference
+ Options map[string]string
+ Labels map[string]string
+}
+
+// NetworkCreateRequest is the request message sent to the server for network create call.
+type NetworkCreateRequest struct {
+ NetworkCreate
+ Name string
+}
+
+// NetworkCreateResponse is the response message sent by the server for network create call
+type NetworkCreateResponse struct {
+ ID string `json:"Id"`
+ Warning string
+}
+
+// NetworkConnect represents the data to be used to connect a container to the network
+type NetworkConnect struct {
+ Container string
+ EndpointConfig *network.EndpointSettings `json:",omitempty"`
+}
+
+// NetworkDisconnect represents the data to be used to disconnect a container from the network
+type NetworkDisconnect struct {
+ Container string
+ Force bool
+}
+
+// NetworkInspectOptions holds parameters to inspect network
+type NetworkInspectOptions struct {
+ Scope string
+ Verbose bool
+}
+
+// Checkpoint represents the details of a checkpoint
+type Checkpoint struct {
+ Name string // Name is the name of the checkpoint
+}
+
+// Runtime describes an OCI runtime
+type Runtime struct {
+ Path string `json:"path"`
+ Args []string `json:"runtimeArgs,omitempty"`
+}
+
+// DiskUsage contains response of Engine API:
+// GET "/system/df"
+type DiskUsage struct {
+ LayersSize int64
+ Images []*ImageSummary
+ Containers []*Container
+ Volumes []*Volume
+ BuilderSize int64
+}
+
+// ContainersPruneReport contains the response for Engine API:
+// POST "/containers/prune"
+type ContainersPruneReport struct {
+ ContainersDeleted []string
+ SpaceReclaimed uint64
+}
+
+// VolumesPruneReport contains the response for Engine API:
+// POST "/volumes/prune"
+type VolumesPruneReport struct {
+ VolumesDeleted []string
+ SpaceReclaimed uint64
+}
+
+// ImagesPruneReport contains the response for Engine API:
+// POST "/images/prune"
+type ImagesPruneReport struct {
+ ImagesDeleted []ImageDeleteResponseItem
+ SpaceReclaimed uint64
+}
+
+// BuildCachePruneReport contains the response for Engine API:
+// POST "/build/prune"
+type BuildCachePruneReport struct {
+ SpaceReclaimed uint64
+}
+
+// NetworksPruneReport contains the response for Engine API:
+// POST "/networks/prune"
+type NetworksPruneReport struct {
+ NetworksDeleted []string
+}
+
+// SecretCreateResponse contains the information returned to a client
+// on the creation of a new secret.
+type SecretCreateResponse struct {
+ // ID is the id of the created secret.
+ ID string
+}
+
+// SecretListOptions holds parameters to list secrets
+type SecretListOptions struct {
+ Filters filters.Args
+}
+
+// ConfigCreateResponse contains the information returned to a client
+// on the creation of a new config.
+type ConfigCreateResponse struct {
+ // ID is the id of the created config.
+ ID string
+}
+
+// ConfigListOptions holds parameters to list configs
+type ConfigListOptions struct {
+ Filters filters.Args
+}
+
+// PushResult contains the tag, manifest digest, and manifest size from the
+// push. It's used to signal this information to the trust code in the client
+// so it can sign the manifest if necessary.
+type PushResult struct {
+ Tag string
+ Digest string
+ Size int
+}
+
+// BuildResult contains the image id of a successful build
+type BuildResult struct {
+ ID string
+}
diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md
new file mode 100644
index 000000000..1ef911edb
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/versions/README.md
@@ -0,0 +1,14 @@
+# Legacy API type versions
+
+This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`.
+
+Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`.
+
+## Package name conventions
+
+The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention:
+
+1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`.
+2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`.
+
+For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`.
diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go
new file mode 100644
index 000000000..611d4fed6
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/versions/compare.go
@@ -0,0 +1,62 @@
+package versions
+
+import (
+ "strconv"
+ "strings"
+)
+
+// compare compares two version strings
+// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise.
+func compare(v1, v2 string) int {
+ var (
+ currTab = strings.Split(v1, ".")
+ otherTab = strings.Split(v2, ".")
+ )
+
+ max := len(currTab)
+ if len(otherTab) > max {
+ max = len(otherTab)
+ }
+ for i := 0; i < max; i++ {
+ var currInt, otherInt int
+
+ if len(currTab) > i {
+ currInt, _ = strconv.Atoi(currTab[i])
+ }
+ if len(otherTab) > i {
+ otherInt, _ = strconv.Atoi(otherTab[i])
+ }
+ if currInt > otherInt {
+ return 1
+ }
+ if otherInt > currInt {
+ return -1
+ }
+ }
+ return 0
+}
+
+// LessThan checks if a version is less than another
+func LessThan(v, other string) bool {
+ return compare(v, other) == -1
+}
+
+// LessThanOrEqualTo checks if a version is less than or equal to another
+func LessThanOrEqualTo(v, other string) bool {
+ return compare(v, other) <= 0
+}
+
+// GreaterThan checks if a version is greater than another
+func GreaterThan(v, other string) bool {
+ return compare(v, other) == 1
+}
+
+// GreaterThanOrEqualTo checks if a version is greater than or equal to another
+func GreaterThanOrEqualTo(v, other string) bool {
+ return compare(v, other) >= 0
+}
+
+// Equal checks if a version is equal to another
+func Equal(v, other string) bool {
+ return compare(v, other) == 0
+}
diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume.go
new file mode 100644
index 000000000..b5ee96a50
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/volume.go
@@ -0,0 +1,69 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// Volume volume
+// swagger:model Volume
+type Volume struct {
+
+ // Date/Time the volume was created.
+ CreatedAt string `json:"CreatedAt,omitempty"`
+
+ // Name of the volume driver used by the volume.
+ // Required: true
+ Driver string `json:"Driver"`
+
+ // User-defined key/value metadata.
+ // Required: true
+ Labels map[string]string `json:"Labels"`
+
+ // Mount path of the volume on the host.
+ // Required: true
+ Mountpoint string `json:"Mountpoint"`
+
+ // Name of the volume.
+ // Required: true
+ Name string `json:"Name"`
+
+ // The driver specific options used when creating the volume.
+ // Required: true
+ Options map[string]string `json:"Options"`
+
+ // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level.
+ // Required: true
+ Scope string `json:"Scope"`
+
+ // Low-level details about the volume, provided by the volume driver.
+ // Details are returned as a map with key/value pairs:
+ // `{"key":"value","key2":"value2"}`.
+ //
+ // The `Status` field is optional, and is omitted if the volume driver
+ // does not support this feature.
+ //
+ Status map[string]interface{} `json:"Status,omitempty"`
+
+ // usage data
+ UsageData *VolumeUsageData `json:"UsageData,omitempty"`
+}
+
+// VolumeUsageData Usage details about the volume. This information is used by the
+// `GET /system/df` endpoint, and omitted in other endpoints.
+//
+// swagger:model VolumeUsageData
+type VolumeUsageData struct {
+
+ // The number of containers referencing this volume. This field
+ // is set to `-1` if the reference-count is not available.
+ //
+ // Required: true
+ RefCount int64 `json:"RefCount"`
+
+ // Amount of disk space used by the volume (in bytes). This information
+ // is only available for volumes created with the `"local"` volume
+ // driver. For volumes created with other volume drivers, this field
+ // is set to `-1` ("not available")
+ //
+ // Required: true
+ Size int64 `json:"Size"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_create.go b/vendor/github.com/docker/docker/api/types/volume/volumes_create.go
new file mode 100644
index 000000000..9f70e43ca
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/volume/volumes_create.go
@@ -0,0 +1,29 @@
+package volume
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// VolumesCreateBody volumes create body
+// swagger:model VolumesCreateBody
+type VolumesCreateBody struct {
+
+ // Name of the volume driver to use.
+ // Required: true
+ Driver string `json:"Driver"`
+
+ // A mapping of driver options and values. These options are passed directly to the driver and are driver specific.
+ // Required: true
+ DriverOpts map[string]string `json:"DriverOpts"`
+
+ // User-defined key/value metadata.
+ // Required: true
+ Labels map[string]string `json:"Labels"`
+
+ // The new volume's name. If not specified, Docker generates a name.
+ // Required: true
+ Name string `json:"Name"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_list.go b/vendor/github.com/docker/docker/api/types/volume/volumes_list.go
new file mode 100644
index 000000000..833dad933
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/volume/volumes_list.go
@@ -0,0 +1,23 @@
+package volume
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+import "github.com/docker/docker/api/types"
+
+// VolumesListOKBody volumes list o k body
+// swagger:model VolumesListOKBody
+type VolumesListOKBody struct {
+
+ // List of volumes
+ // Required: true
+ Volumes []*types.Volume `json:"Volumes"`
+
+ // Warnings that occurred when fetching the list of volumes
+ // Required: true
+ Warnings []string `json:"Warnings"`
+}
diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md
new file mode 100644
index 000000000..059dfb3ce
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/README.md
@@ -0,0 +1,35 @@
+# Go client for the Docker Engine API
+
+The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc.
+
+For example, to list running containers (the equivalent of `docker ps`):
+
+```go
+package main
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/client"
+)
+
+func main() {
+ cli, err := client.NewEnvClient()
+ if err != nil {
+ panic(err)
+ }
+
+ containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
+ if err != nil {
+ panic(err)
+ }
+
+ for _, container := range containers {
+ fmt.Printf("%s %s\n", container.ID[:10], container.Image)
+ }
+}
+```
+
+[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client)
diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go
new file mode 100644
index 000000000..ccab115d3
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/build_prune.go
@@ -0,0 +1,30 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// BuildCachePrune requests the daemon to delete unused cache data
+func (cli *Client) BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) {
+ if err := cli.NewVersionError("1.31", "build prune"); err != nil {
+ return nil, err
+ }
+
+ report := types.BuildCachePruneReport{}
+
+ serverResp, err := cli.post(ctx, "/build/prune", nil, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
+ return nil, fmt.Errorf("Error retrieving disk usage: %v", err)
+ }
+
+ return &report, nil
+}
diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go
new file mode 100644
index 000000000..0effe498b
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/checkpoint_create.go
@@ -0,0 +1,13 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// CheckpointCreate creates a checkpoint from the given container with the given name
+func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error {
+ resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go
new file mode 100644
index 000000000..e6e75588b
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go
@@ -0,0 +1,20 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// CheckpointDelete deletes the checkpoint with the given name from the given container
+func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error {
+ query := url.Values{}
+ if options.CheckpointDir != "" {
+ query.Set("dir", options.CheckpointDir)
+ }
+
+ resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go
new file mode 100644
index 000000000..ffe44bc97
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/checkpoint_list.go
@@ -0,0 +1,32 @@
+package client
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// CheckpointList returns the checkpoints of the given container in the docker host
+func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) {
+ var checkpoints []types.Checkpoint
+
+ query := url.Values{}
+ if options.CheckpointDir != "" {
+ query.Set("dir", options.CheckpointDir)
+ }
+
+ resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return checkpoints, containerNotFoundError{container}
+ }
+ return checkpoints, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&checkpoints)
+ ensureReaderClosed(resp)
+ return checkpoints, err
+}
diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go
new file mode 100644
index 000000000..c4e3914b1
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/client.go
@@ -0,0 +1,314 @@
+/*
+Package client is a Go client for the Docker Engine API.
+
+The "docker" command uses this package to communicate with the daemon. It can also
+be used by your own Go applications to do anything the command-line interface does
+- running containers, pulling images, managing swarms, etc.
+
+For more information about the Engine API, see the documentation:
+https://docs.docker.com/engine/reference/api/
+
+Usage
+
+You use the library by creating a client object and calling methods on it. The
+client can be created either from environment variables with NewEnvClient, or
+configured manually with NewClient.
+
+For example, to list running containers (the equivalent of "docker ps"):
+
+ package main
+
+ import (
+ "context"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/client"
+ )
+
+ func main() {
+ cli, err := client.NewEnvClient()
+ if err != nil {
+ panic(err)
+ }
+
+ containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
+ if err != nil {
+ panic(err)
+ }
+
+ for _, container := range containers {
+ fmt.Printf("%s %s\n", container.ID[:10], container.Image)
+ }
+ }
+
+*/
+package client
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/api"
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/versions"
+ "github.com/docker/go-connections/sockets"
+ "github.com/docker/go-connections/tlsconfig"
+ "golang.org/x/net/context"
+)
+
+// ErrRedirect is the error returned by checkRedirect when the request is non-GET.
+var ErrRedirect = errors.New("unexpected redirect in response")
+
+// Client is the API client that performs all operations
+// against a docker server.
+type Client struct {
+ // scheme sets the scheme for the client
+ scheme string
+ // host holds the server address to connect to
+ host string
+ // proto holds the client protocol i.e. unix.
+ proto string
+ // addr holds the client address.
+ addr string
+ // basePath holds the path to prepend to the requests.
+ basePath string
+ // client used to send and receive http requests.
+ client *http.Client
+ // version of the server to talk to.
+ version string
+ // custom http headers configured by users.
+ customHTTPHeaders map[string]string
+ // manualOverride is set to true when the version was set by users.
+ manualOverride bool
+}
+
+// CheckRedirect specifies the policy for dealing with redirect responses:
+// If the request is non-GET return `ErrRedirect`. Otherwise use the last response.
+//
+// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client .
+// The Docker client (and by extension docker API client) can be made to to send a request
+// like POST /containers//start where what would normally be in the name section of the URL is empty.
+// This triggers an HTTP 301 from the daemon.
+// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon.
+// This behavior change manifests in the client in that before the 301 was not followed and
+// the client did not generate an error, but now results in a message like Error response from daemon: page not found.
+func CheckRedirect(req *http.Request, via []*http.Request) error {
+ if via[0].Method == http.MethodGet {
+ return http.ErrUseLastResponse
+ }
+ return ErrRedirect
+}
+
+// NewEnvClient initializes a new API client based on environment variables.
+// Use DOCKER_HOST to set the url to the docker server.
+// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest.
+// Use DOCKER_CERT_PATH to load the TLS certificates from.
+// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default.
+func NewEnvClient() (*Client, error) {
+ var client *http.Client
+ if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" {
+ options := tlsconfig.Options{
+ CAFile: filepath.Join(dockerCertPath, "ca.pem"),
+ CertFile: filepath.Join(dockerCertPath, "cert.pem"),
+ KeyFile: filepath.Join(dockerCertPath, "key.pem"),
+ InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "",
+ }
+ tlsc, err := tlsconfig.Client(options)
+ if err != nil {
+ return nil, err
+ }
+
+ client = &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsc,
+ },
+ CheckRedirect: CheckRedirect,
+ }
+ }
+
+ host := os.Getenv("DOCKER_HOST")
+ if host == "" {
+ host = DefaultDockerHost
+ }
+ version := os.Getenv("DOCKER_API_VERSION")
+ if version == "" {
+ version = api.DefaultVersion
+ }
+
+ cli, err := NewClient(host, version, client, nil)
+ if err != nil {
+ return cli, err
+ }
+ if os.Getenv("DOCKER_API_VERSION") != "" {
+ cli.manualOverride = true
+ }
+ return cli, nil
+}
+
+// NewClient initializes a new API client for the given host and API version.
+// It uses the given http client as transport.
+// It also initializes the custom http headers to add to each request.
+//
+// It won't send any version information if the version number is empty. It is
+// highly recommended that you set a version or your client may break if the
+// server is upgraded.
+func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
+ proto, addr, basePath, err := ParseHost(host)
+ if err != nil {
+ return nil, err
+ }
+
+ if client != nil {
+ if _, ok := client.Transport.(http.RoundTripper); !ok {
+ return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport)
+ }
+ } else {
+ transport := new(http.Transport)
+ sockets.ConfigureTransport(transport, proto, addr)
+ client = &http.Client{
+ Transport: transport,
+ CheckRedirect: CheckRedirect,
+ }
+ }
+
+ scheme := "http"
+ tlsConfig := resolveTLSConfig(client.Transport)
+ if tlsConfig != nil {
+ // TODO(stevvooe): This isn't really the right way to write clients in Go.
+ // `NewClient` should probably only take an `*http.Client` and work from there.
+ // Unfortunately, the model of having a host-ish/url-thingy as the connection
+ // string has us confusing protocol and transport layers. We continue doing
+ // this to avoid breaking existing clients but this should be addressed.
+ scheme = "https"
+ }
+
+ return &Client{
+ scheme: scheme,
+ host: host,
+ proto: proto,
+ addr: addr,
+ basePath: basePath,
+ client: client,
+ version: version,
+ customHTTPHeaders: httpHeaders,
+ }, nil
+}
+
+// Close ensures that transport.Client is closed
+// especially needed while using NewClient with *http.Client = nil
+// for example
+// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"})
+func (cli *Client) Close() error {
+
+ if t, ok := cli.client.Transport.(*http.Transport); ok {
+ t.CloseIdleConnections()
+ }
+
+ return nil
+}
+
+// getAPIPath returns the versioned request path to call the api.
+// It appends the query parameters to the path if they are not empty.
+func (cli *Client) getAPIPath(p string, query url.Values) string {
+ var apiPath string
+ if cli.version != "" {
+ v := strings.TrimPrefix(cli.version, "v")
+ apiPath = cli.basePath + "/v" + v + p
+ } else {
+ apiPath = cli.basePath + p
+ }
+
+ u := &url.URL{
+ Path: apiPath,
+ }
+ if len(query) > 0 {
+ u.RawQuery = query.Encode()
+ }
+ return u.String()
+}
+
+// ClientVersion returns the version string associated with this
+// instance of the Client. Note that this value can be changed
+// via the DOCKER_API_VERSION env var.
+// This operation doesn't acquire a mutex.
+func (cli *Client) ClientVersion() string {
+ return cli.version
+}
+
+// NegotiateAPIVersion updates the version string associated with this
+// instance of the Client to match the latest version the server supports
+func (cli *Client) NegotiateAPIVersion(ctx context.Context) {
+ ping, _ := cli.Ping(ctx)
+ cli.NegotiateAPIVersionPing(ping)
+}
+
+// NegotiateAPIVersionPing updates the version string associated with this
+// instance of the Client to match the latest version the server supports
+func (cli *Client) NegotiateAPIVersionPing(p types.Ping) {
+ if cli.manualOverride {
+ return
+ }
+
+ // try the latest version before versioning headers existed
+ if p.APIVersion == "" {
+ p.APIVersion = "1.24"
+ }
+
+ // if the client is not initialized with a version, start with the latest supported version
+ if cli.version == "" {
+ cli.version = api.DefaultVersion
+ }
+
+ // if server version is lower than the maximum version supported by the Client, downgrade
+ if versions.LessThan(p.APIVersion, api.DefaultVersion) {
+ cli.version = p.APIVersion
+ }
+}
+
+// DaemonHost returns the host associated with this instance of the Client.
+// This operation doesn't acquire a mutex.
+func (cli *Client) DaemonHost() string {
+ return cli.host
+}
+
+// ParseHost verifies that the given host strings is valid.
+func ParseHost(host string) (string, string, string, error) {
+ protoAddrParts := strings.SplitN(host, "://", 2)
+ if len(protoAddrParts) == 1 {
+ return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host)
+ }
+
+ var basePath string
+ proto, addr := protoAddrParts[0], protoAddrParts[1]
+ if proto == "tcp" {
+ parsed, err := url.Parse("tcp://" + addr)
+ if err != nil {
+ return "", "", "", err
+ }
+ addr = parsed.Host
+ basePath = parsed.Path
+ }
+ return proto, addr, basePath, nil
+}
+
+// CustomHTTPHeaders returns the custom http headers associated with this
+// instance of the Client. This operation doesn't acquire a mutex.
+func (cli *Client) CustomHTTPHeaders() map[string]string {
+ m := make(map[string]string)
+ for k, v := range cli.customHTTPHeaders {
+ m[k] = v
+ }
+ return m
+}
+
+// SetCustomHTTPHeaders updates the custom http headers associated with this
+// instance of the Client. This operation doesn't acquire a mutex.
+func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) {
+ cli.customHTTPHeaders = headers
+}
diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go
new file mode 100644
index 000000000..89de892c8
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/client_unix.go
@@ -0,0 +1,6 @@
+// +build linux freebsd solaris openbsd darwin
+
+package client
+
+// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
+const DefaultDockerHost = "unix:///var/run/docker.sock"
diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go
new file mode 100644
index 000000000..07c0c7a77
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/client_windows.go
@@ -0,0 +1,4 @@
+package client
+
+// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
+const DefaultDockerHost = "npipe:////./pipe/docker_engine"
diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go
new file mode 100644
index 000000000..bc4a952b2
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/config_create.go
@@ -0,0 +1,25 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ConfigCreate creates a new Config.
+func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) {
+ var response types.ConfigCreateResponse
+ if err := cli.NewVersionError("1.30", "config create"); err != nil {
+ return response, err
+ }
+ resp, err := cli.post(ctx, "/configs/create", nil, config, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go
new file mode 100644
index 000000000..ebb6d636c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/config_inspect.go
@@ -0,0 +1,37 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ConfigInspectWithRaw returns the config information with raw data
+func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) {
+ if err := cli.NewVersionError("1.30", "config inspect"); err != nil {
+ return swarm.Config{}, nil, err
+ }
+ resp, err := cli.get(ctx, "/configs/"+id, nil, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return swarm.Config{}, nil, configNotFoundError{id}
+ }
+ return swarm.Config{}, nil, err
+ }
+ defer ensureReaderClosed(resp)
+
+ body, err := ioutil.ReadAll(resp.body)
+ if err != nil {
+ return swarm.Config{}, nil, err
+ }
+
+ var config swarm.Config
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&config)
+
+ return config, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go
new file mode 100644
index 000000000..8483ca14d
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/config_list.go
@@ -0,0 +1,38 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ConfigList returns the list of configs.
+func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) {
+ if err := cli.NewVersionError("1.30", "config list"); err != nil {
+ return nil, err
+ }
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/configs", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var configs []swarm.Config
+ err = json.NewDecoder(resp.body).Decode(&configs)
+ ensureReaderClosed(resp)
+ return configs, err
+}
diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go
new file mode 100644
index 000000000..726b5c853
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/config_remove.go
@@ -0,0 +1,13 @@
+package client
+
+import "golang.org/x/net/context"
+
+// ConfigRemove removes a Config.
+func (cli *Client) ConfigRemove(ctx context.Context, id string) error {
+ if err := cli.NewVersionError("1.30", "config remove"); err != nil {
+ return err
+ }
+ resp, err := cli.delete(ctx, "/configs/"+id, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go
new file mode 100644
index 000000000..823751bb8
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/config_update.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ConfigUpdate attempts to update a Config
+func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error {
+ if err := cli.NewVersionError("1.30", "config update"); err != nil {
+ return err
+ }
+ query := url.Values{}
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+ resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go
new file mode 100644
index 000000000..0fdf3ed0c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_attach.go
@@ -0,0 +1,57 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerAttach attaches a connection to a container in the server.
+// It returns a types.HijackedConnection with the hijacked connection
+// and the a reader to get output. It's up to the called to close
+// the hijacked connection by calling types.HijackedResponse.Close.
+//
+// The stream format on the response will be in one of two formats:
+//
+// If the container is using a TTY, there is only a single stream (stdout), and
+// data is copied directly from the container output stream, no extra
+// multiplexing or headers.
+//
+// If the container is *not* using a TTY, streams for stdout and stderr are
+// multiplexed.
+// The format of the multiplexed stream is as follows:
+//
+// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
+//
+// STREAM_TYPE can be 1 for stdout and 2 for stderr
+//
+// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian.
+// This is the size of OUTPUT.
+//
+// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this
+// stream.
+func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) {
+ query := url.Values{}
+ if options.Stream {
+ query.Set("stream", "1")
+ }
+ if options.Stdin {
+ query.Set("stdin", "1")
+ }
+ if options.Stdout {
+ query.Set("stdout", "1")
+ }
+ if options.Stderr {
+ query.Set("stderr", "1")
+ }
+ if options.DetachKeys != "" {
+ query.Set("detachKeys", options.DetachKeys)
+ }
+ if options.Logs {
+ query.Set("logs", "1")
+ }
+
+ headers := map[string][]string{"Content-Type": {"text/plain"}}
+ return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers)
+}
diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go
new file mode 100644
index 000000000..531d796ee
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_commit.go
@@ -0,0 +1,55 @@
+package client
+
+import (
+ "encoding/json"
+ "errors"
+ "net/url"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerCommit applies changes into a container and creates a new tagged image.
+func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) {
+ var repository, tag string
+ if options.Reference != "" {
+ ref, err := reference.ParseNormalizedNamed(options.Reference)
+ if err != nil {
+ return types.IDResponse{}, err
+ }
+
+ if _, isCanonical := ref.(reference.Canonical); isCanonical {
+ return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference")
+ }
+ ref = reference.TagNameOnly(ref)
+
+ if tagged, ok := ref.(reference.Tagged); ok {
+ tag = tagged.Tag()
+ }
+ repository = reference.FamiliarName(ref)
+ }
+
+ query := url.Values{}
+ query.Set("container", container)
+ query.Set("repo", repository)
+ query.Set("tag", tag)
+ query.Set("comment", options.Comment)
+ query.Set("author", options.Author)
+ for _, change := range options.Changes {
+ query.Add("changes", change)
+ }
+ if options.Pause != true {
+ query.Set("pause", "0")
+ }
+
+ var response types.IDResponse
+ resp, err := cli.post(ctx, "/commit", query, options.Config, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go
new file mode 100644
index 000000000..30ba6803f
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_copy.go
@@ -0,0 +1,102 @@
+package client
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path/filepath"
+ "strings"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+)
+
+// ContainerStatPath returns Stat information about a path inside the container filesystem.
+func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) {
+ query := url.Values{}
+ query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
+
+ urlStr := "/containers/" + containerID + "/archive"
+ response, err := cli.head(ctx, urlStr, query, nil)
+ if err != nil {
+ return types.ContainerPathStat{}, err
+ }
+ defer ensureReaderClosed(response)
+ return getContainerPathStatFromHeader(response.header)
+}
+
+// CopyToContainer copies content into the container filesystem.
+// Note that `content` must be a Reader for a TAR
+func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error {
+ query := url.Values{}
+ query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
+ // Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
+ if !options.AllowOverwriteDirWithFile {
+ query.Set("noOverwriteDirNonDir", "true")
+ }
+
+ if options.CopyUIDGID {
+ query.Set("copyUIDGID", "true")
+ }
+
+ apiPath := "/containers/" + container + "/archive"
+
+ response, err := cli.putRaw(ctx, apiPath, query, content, nil)
+ if err != nil {
+ return err
+ }
+ defer ensureReaderClosed(response)
+
+ if response.statusCode != http.StatusOK {
+ return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
+ }
+
+ return nil
+}
+
+// CopyFromContainer gets the content from the container and returns it as a Reader
+// to manipulate it in the host. It's up to the caller to close the reader.
+func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) {
+ query := make(url.Values, 1)
+ query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API.
+
+ apiPath := "/containers/" + container + "/archive"
+ response, err := cli.get(ctx, apiPath, query, nil)
+ if err != nil {
+ return nil, types.ContainerPathStat{}, err
+ }
+
+ if response.statusCode != http.StatusOK {
+ return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
+ }
+
+ // In order to get the copy behavior right, we need to know information
+ // about both the source and the destination. The response headers include
+ // stat info about the source that we can use in deciding exactly how to
+ // copy it locally. Along with the stat info about the local destination,
+ // we have everything we need to handle the multiple possibilities there
+ // can be when copying a file/dir from one location to another file/dir.
+ stat, err := getContainerPathStatFromHeader(response.header)
+ if err != nil {
+ return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err)
+ }
+ return response.body, stat, err
+}
+
+func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) {
+ var stat types.ContainerPathStat
+
+ encodedStat := header.Get("X-Docker-Container-Path-Stat")
+ statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat))
+
+ err := json.NewDecoder(statDecoder).Decode(&stat)
+ if err != nil {
+ err = fmt.Errorf("unable to decode container path stat header: %s", err)
+ }
+
+ return stat, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go
new file mode 100644
index 000000000..6841b0b28
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_create.go
@@ -0,0 +1,56 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+ "strings"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/versions"
+ "golang.org/x/net/context"
+)
+
+type configWrapper struct {
+ *container.Config
+ HostConfig *container.HostConfig
+ NetworkingConfig *network.NetworkingConfig
+}
+
+// ContainerCreate creates a new container based in the given configuration.
+// It can be associated with a name, but it's not mandatory.
+func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) {
+ var response container.ContainerCreateCreatedBody
+
+ if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil {
+ return response, err
+ }
+
+ // When using API 1.24 and under, the client is responsible for removing the container
+ if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") {
+ hostConfig.AutoRemove = false
+ }
+
+ query := url.Values{}
+ if containerName != "" {
+ query.Set("name", containerName)
+ }
+
+ body := configWrapper{
+ Config: config,
+ HostConfig: hostConfig,
+ NetworkingConfig: networkingConfig,
+ }
+
+ serverResp, err := cli.post(ctx, "/containers/create", query, body, nil)
+ if err != nil {
+ if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") {
+ return response, imageNotFoundError{config.Image}
+ }
+ return response, err
+ }
+
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go
new file mode 100644
index 000000000..884dc9fee
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_diff.go
@@ -0,0 +1,23 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types/container"
+ "golang.org/x/net/context"
+)
+
+// ContainerDiff shows differences in a container filesystem since it was started.
+func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) {
+ var changes []container.ContainerChangeResponseItem
+
+ serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil)
+ if err != nil {
+ return changes, err
+ }
+
+ err = json.NewDecoder(serverResp.body).Decode(&changes)
+ ensureReaderClosed(serverResp)
+ return changes, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go
new file mode 100644
index 000000000..0665c54fb
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_exec.go
@@ -0,0 +1,54 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerExecCreate creates a new exec configuration to run an exec process.
+func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) {
+ var response types.IDResponse
+
+ if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil {
+ return response, err
+ }
+
+ resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil)
+ if err != nil {
+ return response, err
+ }
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
+
+// ContainerExecStart starts an exec process already created in the docker host.
+func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error {
+ resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil)
+ ensureReaderClosed(resp)
+ return err
+}
+
+// ContainerExecAttach attaches a connection to an exec process in the server.
+// It returns a types.HijackedConnection with the hijacked connection
+// and the a reader to get output. It's up to the called to close
+// the hijacked connection by calling types.HijackedResponse.Close.
+func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) {
+ headers := map[string][]string{"Content-Type": {"application/json"}}
+ return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers)
+}
+
+// ContainerExecInspect returns information about a specific exec process on the docker host.
+func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) {
+ var response types.ContainerExecInspect
+ resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go
new file mode 100644
index 000000000..52194f3d3
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_export.go
@@ -0,0 +1,20 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// ContainerExport retrieves the raw contents of a container
+// and returns them as an io.ReadCloser. It's up to the caller
+// to close the stream.
+func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) {
+ serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return serverResp.body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go
new file mode 100644
index 000000000..17f180974
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_inspect.go
@@ -0,0 +1,54 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerInspect returns the container information.
+func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
+ serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return types.ContainerJSON{}, containerNotFoundError{containerID}
+ }
+ return types.ContainerJSON{}, err
+ }
+
+ var response types.ContainerJSON
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
+
+// ContainerInspectWithRaw returns the container information and its raw representation.
+func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) {
+ query := url.Values{}
+ if getSize {
+ query.Set("size", "1")
+ }
+ serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return types.ContainerJSON{}, nil, containerNotFoundError{containerID}
+ }
+ return types.ContainerJSON{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return types.ContainerJSON{}, nil, err
+ }
+
+ var response types.ContainerJSON
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go
new file mode 100644
index 000000000..29f80c73a
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_kill.go
@@ -0,0 +1,17 @@
+package client
+
+import (
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// ContainerKill terminates the container process but does not remove the container from the docker host.
+func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error {
+ query := url.Values{}
+ query.Set("signal", signal)
+
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go
new file mode 100644
index 000000000..439891219
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_list.go
@@ -0,0 +1,56 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// ContainerList returns the list of containers in the docker host.
+func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
+ query := url.Values{}
+
+ if options.All {
+ query.Set("all", "1")
+ }
+
+ if options.Limit != -1 {
+ query.Set("limit", strconv.Itoa(options.Limit))
+ }
+
+ if options.Since != "" {
+ query.Set("since", options.Since)
+ }
+
+ if options.Before != "" {
+ query.Set("before", options.Before)
+ }
+
+ if options.Size {
+ query.Set("size", "1")
+ }
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
+
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/containers/json", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var containers []types.Container
+ err = json.NewDecoder(resp.body).Decode(&containers)
+ ensureReaderClosed(resp)
+ return containers, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go
new file mode 100644
index 000000000..0f32e9f12
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_logs.go
@@ -0,0 +1,72 @@
+package client
+
+import (
+ "io"
+ "net/url"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+ timetypes "github.com/docker/docker/api/types/time"
+)
+
+// ContainerLogs returns the logs generated by a container in an io.ReadCloser.
+// It's up to the caller to close the stream.
+//
+// The stream format on the response will be in one of two formats:
+//
+// If the container is using a TTY, there is only a single stream (stdout), and
+// data is copied directly from the container output stream, no extra
+// multiplexing or headers.
+//
+// If the container is *not* using a TTY, streams for stdout and stderr are
+// multiplexed.
+// The format of the multiplexed stream is as follows:
+//
+// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
+//
+// STREAM_TYPE can be 1 for stdout and 2 for stderr
+//
+// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian.
+// This is the size of OUTPUT.
+//
+// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this
+// stream.
+func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
+ query := url.Values{}
+ if options.ShowStdout {
+ query.Set("stdout", "1")
+ }
+
+ if options.ShowStderr {
+ query.Set("stderr", "1")
+ }
+
+ if options.Since != "" {
+ ts, err := timetypes.GetTimestamp(options.Since, time.Now())
+ if err != nil {
+ return nil, err
+ }
+ query.Set("since", ts)
+ }
+
+ if options.Timestamps {
+ query.Set("timestamps", "1")
+ }
+
+ if options.Details {
+ query.Set("details", "1")
+ }
+
+ if options.Follow {
+ query.Set("follow", "1")
+ }
+ query.Set("tail", options.Tail)
+
+ resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go
new file mode 100644
index 000000000..412067a78
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_pause.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// ContainerPause pauses the main process of a given container without terminating it.
+func (cli *Client) ContainerPause(ctx context.Context, containerID string) error {
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go
new file mode 100644
index 000000000..b58217086
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_prune.go
@@ -0,0 +1,36 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// ContainersPrune requests the daemon to delete unused data
+func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) {
+ var report types.ContainersPruneReport
+
+ if err := cli.NewVersionError("1.25", "container prune"); err != nil {
+ return report, err
+ }
+
+ query, err := getFiltersQuery(pruneFilters)
+ if err != nil {
+ return report, err
+ }
+
+ serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil)
+ if err != nil {
+ return report, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
+ return report, fmt.Errorf("Error retrieving disk usage: %v", err)
+ }
+
+ return report, nil
+}
diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go
new file mode 100644
index 000000000..3a79590ce
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_remove.go
@@ -0,0 +1,27 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerRemove kills and removes a container from the docker host.
+func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error {
+ query := url.Values{}
+ if options.RemoveVolumes {
+ query.Set("v", "1")
+ }
+ if options.RemoveLinks {
+ query.Set("link", "1")
+ }
+
+ if options.Force {
+ query.Set("force", "1")
+ }
+
+ resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go
new file mode 100644
index 000000000..0e718da7c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_rename.go
@@ -0,0 +1,16 @@
+package client
+
+import (
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// ContainerRename changes the name of a given container.
+func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error {
+ query := url.Values{}
+ query.Set("name", newContainerName)
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go
new file mode 100644
index 000000000..66c3cc194
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_resize.go
@@ -0,0 +1,29 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerResize changes the size of the tty for a container.
+func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error {
+ return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width)
+}
+
+// ContainerExecResize changes the size of the tty for an exec process running inside a container.
+func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error {
+ return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width)
+}
+
+func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error {
+ query := url.Values{}
+ query.Set("h", strconv.Itoa(int(height)))
+ query.Set("w", strconv.Itoa(int(width)))
+
+ resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go
new file mode 100644
index 000000000..74d7455f0
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_restart.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "net/url"
+ "time"
+
+ timetypes "github.com/docker/docker/api/types/time"
+ "golang.org/x/net/context"
+)
+
+// ContainerRestart stops and starts a container again.
+// It makes the daemon to wait for the container to be up again for
+// a specific amount of time, given the timeout.
+func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error {
+ query := url.Values{}
+ if timeout != nil {
+ query.Set("t", timetypes.DurationToSecondsString(*timeout))
+ }
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go
new file mode 100644
index 000000000..b1f08de41
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_start.go
@@ -0,0 +1,24 @@
+package client
+
+import (
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+)
+
+// ContainerStart sends a request to the docker daemon to start a container.
+func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error {
+ query := url.Values{}
+ if len(options.CheckpointID) != 0 {
+ query.Set("checkpoint", options.CheckpointID)
+ }
+ if len(options.CheckpointDir) != 0 {
+ query.Set("checkpoint-dir", options.CheckpointDir)
+ }
+
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go
new file mode 100644
index 000000000..4758c66e3
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_stats.go
@@ -0,0 +1,26 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerStats returns near realtime stats for a given container.
+// It's up to the caller to close the io.ReadCloser returned.
+func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
+ query := url.Values{}
+ query.Set("stream", "0")
+ if stream {
+ query.Set("stream", "1")
+ }
+
+ resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil)
+ if err != nil {
+ return types.ContainerStats{}, err
+ }
+
+ osType := getDockerOS(resp.header.Get("Server"))
+ return types.ContainerStats{Body: resp.body, OSType: osType}, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go
new file mode 100644
index 000000000..b5418ae8c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_stop.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "net/url"
+ "time"
+
+ timetypes "github.com/docker/docker/api/types/time"
+ "golang.org/x/net/context"
+)
+
+// ContainerStop stops a container without terminating the process.
+// The process is blocked until the container stops or the timeout expires.
+func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error {
+ query := url.Values{}
+ if timeout != nil {
+ query.Set("t", timetypes.DurationToSecondsString(*timeout))
+ }
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go
new file mode 100644
index 000000000..9689123a4
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_top.go
@@ -0,0 +1,28 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+ "strings"
+
+ "github.com/docker/docker/api/types/container"
+ "golang.org/x/net/context"
+)
+
+// ContainerTop shows process information from within a container.
+func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) {
+ var response container.ContainerTopOKBody
+ query := url.Values{}
+ if len(arguments) > 0 {
+ query.Set("ps_args", strings.Join(arguments, " "))
+ }
+
+ resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go
new file mode 100644
index 000000000..5c7621125
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_unpause.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// ContainerUnpause resumes the process execution within a container
+func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error {
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go
new file mode 100644
index 000000000..5082f22df
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_update.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types/container"
+ "golang.org/x/net/context"
+)
+
+// ContainerUpdate updates resources of a container
+func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) {
+ var response container.ContainerUpdateOKBody
+ serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go
new file mode 100644
index 000000000..854c6c053
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_wait.go
@@ -0,0 +1,84 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/versions"
+)
+
+// ContainerWait waits until the specified container is in a certain state
+// indicated by the given condition, either "not-running" (default),
+// "next-exit", or "removed".
+//
+// If this client's API version is before 1.30, condition is ignored and
+// ContainerWait will return immediately with the two channels, as the server
+// will wait as if the condition were "not-running".
+//
+// If this client's API version is at least 1.30, ContainerWait blocks until
+// the request has been acknowledged by the server (with a response header),
+// then returns two channels on which the caller can wait for the exit status
+// of the container or an error if there was a problem either beginning the
+// wait request or in getting the response. This allows the caller to
+// synchronize ContainerWait with other calls, such as specifying a
+// "next-exit" condition before issuing a ContainerStart request.
+func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) {
+ if versions.LessThan(cli.ClientVersion(), "1.30") {
+ return cli.legacyContainerWait(ctx, containerID)
+ }
+
+ resultC := make(chan container.ContainerWaitOKBody)
+ errC := make(chan error, 1)
+
+ query := url.Values{}
+ query.Set("condition", string(condition))
+
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil)
+ if err != nil {
+ defer ensureReaderClosed(resp)
+ errC <- err
+ return resultC, errC
+ }
+
+ go func() {
+ defer ensureReaderClosed(resp)
+ var res container.ContainerWaitOKBody
+ if err := json.NewDecoder(resp.body).Decode(&res); err != nil {
+ errC <- err
+ return
+ }
+
+ resultC <- res
+ }()
+
+ return resultC, errC
+}
+
+// legacyContainerWait returns immediately and doesn't have an option to wait
+// until the container is removed.
+func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) {
+ resultC := make(chan container.ContainerWaitOKBody)
+ errC := make(chan error)
+
+ go func() {
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil)
+ if err != nil {
+ errC <- err
+ return
+ }
+ defer ensureReaderClosed(resp)
+
+ var res container.ContainerWaitOKBody
+ if err := json.NewDecoder(resp.body).Decode(&res); err != nil {
+ errC <- err
+ return
+ }
+
+ resultC <- res
+ }()
+
+ return resultC, errC
+}
diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go
new file mode 100644
index 000000000..03c80b39a
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/disk_usage.go
@@ -0,0 +1,26 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// DiskUsage requests the current data usage from the daemon
+func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) {
+ var du types.DiskUsage
+
+ serverResp, err := cli.get(ctx, "/system/df", nil, nil)
+ if err != nil {
+ return du, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil {
+ return du, fmt.Errorf("Error retrieving disk usage: %v", err)
+ }
+
+ return du, nil
+}
diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go
new file mode 100644
index 000000000..aa5bc6a6c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/distribution_inspect.go
@@ -0,0 +1,35 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ registrytypes "github.com/docker/docker/api/types/registry"
+ "golang.org/x/net/context"
+)
+
+// DistributionInspect returns the image digest with full Manifest
+func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) {
+ // Contact the registry to retrieve digest and platform information
+ var distributionInspect registrytypes.DistributionInspect
+
+ if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil {
+ return distributionInspect, err
+ }
+ var headers map[string][]string
+
+ if encodedRegistryAuth != "" {
+ headers = map[string][]string{
+ "X-Registry-Auth": {encodedRegistryAuth},
+ }
+ }
+
+ resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers)
+ if err != nil {
+ return distributionInspect, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&distributionInspect)
+ ensureReaderClosed(resp)
+ return distributionInspect, err
+}
diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go
new file mode 100644
index 000000000..fc7df9f1e
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/errors.go
@@ -0,0 +1,300 @@
+package client
+
+import (
+ "fmt"
+
+ "github.com/docker/docker/api/types/versions"
+ "github.com/pkg/errors"
+)
+
+// errConnectionFailed implements an error returned when connection failed.
+type errConnectionFailed struct {
+ host string
+}
+
+// Error returns a string representation of an errConnectionFailed
+func (err errConnectionFailed) Error() string {
+ if err.host == "" {
+ return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?"
+ }
+ return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host)
+}
+
+// IsErrConnectionFailed returns true if the error is caused by connection failed.
+func IsErrConnectionFailed(err error) bool {
+ _, ok := errors.Cause(err).(errConnectionFailed)
+ return ok
+}
+
+// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed.
+func ErrorConnectionFailed(host string) error {
+ return errConnectionFailed{host: host}
+}
+
+type notFound interface {
+ error
+ NotFound() bool // Is the error a NotFound error
+}
+
+// IsErrNotFound returns true if the error is caused with an
+// object (image, container, network, volume, …) is not found in the docker host.
+func IsErrNotFound(err error) bool {
+ te, ok := err.(notFound)
+ return ok && te.NotFound()
+}
+
+// imageNotFoundError implements an error returned when an image is not in the docker host.
+type imageNotFoundError struct {
+ imageID string
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e imageNotFoundError) NotFound() bool {
+ return true
+}
+
+// Error returns a string representation of an imageNotFoundError
+func (e imageNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such image: %s", e.imageID)
+}
+
+// IsErrImageNotFound returns true if the error is caused
+// when an image is not found in the docker host.
+func IsErrImageNotFound(err error) bool {
+ return IsErrNotFound(err)
+}
+
+// containerNotFoundError implements an error returned when a container is not in the docker host.
+type containerNotFoundError struct {
+ containerID string
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e containerNotFoundError) NotFound() bool {
+ return true
+}
+
+// Error returns a string representation of a containerNotFoundError
+func (e containerNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such container: %s", e.containerID)
+}
+
+// IsErrContainerNotFound returns true if the error is caused
+// when a container is not found in the docker host.
+func IsErrContainerNotFound(err error) bool {
+ return IsErrNotFound(err)
+}
+
+// networkNotFoundError implements an error returned when a network is not in the docker host.
+type networkNotFoundError struct {
+ networkID string
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e networkNotFoundError) NotFound() bool {
+ return true
+}
+
+// Error returns a string representation of a networkNotFoundError
+func (e networkNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such network: %s", e.networkID)
+}
+
+// IsErrNetworkNotFound returns true if the error is caused
+// when a network is not found in the docker host.
+func IsErrNetworkNotFound(err error) bool {
+ return IsErrNotFound(err)
+}
+
+// volumeNotFoundError implements an error returned when a volume is not in the docker host.
+type volumeNotFoundError struct {
+ volumeID string
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e volumeNotFoundError) NotFound() bool {
+ return true
+}
+
+// Error returns a string representation of a volumeNotFoundError
+func (e volumeNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such volume: %s", e.volumeID)
+}
+
+// IsErrVolumeNotFound returns true if the error is caused
+// when a volume is not found in the docker host.
+func IsErrVolumeNotFound(err error) bool {
+ return IsErrNotFound(err)
+}
+
+// unauthorizedError represents an authorization error in a remote registry.
+type unauthorizedError struct {
+ cause error
+}
+
+// Error returns a string representation of an unauthorizedError
+func (u unauthorizedError) Error() string {
+ return u.cause.Error()
+}
+
+// IsErrUnauthorized returns true if the error is caused
+// when a remote registry authentication fails
+func IsErrUnauthorized(err error) bool {
+ _, ok := err.(unauthorizedError)
+ return ok
+}
+
+// nodeNotFoundError implements an error returned when a node is not found.
+type nodeNotFoundError struct {
+ nodeID string
+}
+
+// Error returns a string representation of a nodeNotFoundError
+func (e nodeNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such node: %s", e.nodeID)
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e nodeNotFoundError) NotFound() bool {
+ return true
+}
+
+// IsErrNodeNotFound returns true if the error is caused
+// when a node is not found.
+func IsErrNodeNotFound(err error) bool {
+ _, ok := err.(nodeNotFoundError)
+ return ok
+}
+
+// serviceNotFoundError implements an error returned when a service is not found.
+type serviceNotFoundError struct {
+ serviceID string
+}
+
+// Error returns a string representation of a serviceNotFoundError
+func (e serviceNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such service: %s", e.serviceID)
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e serviceNotFoundError) NotFound() bool {
+ return true
+}
+
+// IsErrServiceNotFound returns true if the error is caused
+// when a service is not found.
+func IsErrServiceNotFound(err error) bool {
+ _, ok := err.(serviceNotFoundError)
+ return ok
+}
+
+// taskNotFoundError implements an error returned when a task is not found.
+type taskNotFoundError struct {
+ taskID string
+}
+
+// Error returns a string representation of a taskNotFoundError
+func (e taskNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such task: %s", e.taskID)
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e taskNotFoundError) NotFound() bool {
+ return true
+}
+
+// IsErrTaskNotFound returns true if the error is caused
+// when a task is not found.
+func IsErrTaskNotFound(err error) bool {
+ _, ok := err.(taskNotFoundError)
+ return ok
+}
+
+type pluginPermissionDenied struct {
+ name string
+}
+
+func (e pluginPermissionDenied) Error() string {
+ return "Permission denied while installing plugin " + e.name
+}
+
+// IsErrPluginPermissionDenied returns true if the error is caused
+// when a user denies a plugin's permissions
+func IsErrPluginPermissionDenied(err error) bool {
+ _, ok := err.(pluginPermissionDenied)
+ return ok
+}
+
+// NewVersionError returns an error if the APIVersion required
+// if less than the current supported version
+func (cli *Client) NewVersionError(APIrequired, feature string) error {
+ if cli.version != "" && versions.LessThan(cli.version, APIrequired) {
+ return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version)
+ }
+ return nil
+}
+
+// secretNotFoundError implements an error returned when a secret is not found.
+type secretNotFoundError struct {
+ name string
+}
+
+// Error returns a string representation of a secretNotFoundError
+func (e secretNotFoundError) Error() string {
+ return fmt.Sprintf("Error: no such secret: %s", e.name)
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e secretNotFoundError) NotFound() bool {
+ return true
+}
+
+// IsErrSecretNotFound returns true if the error is caused
+// when a secret is not found.
+func IsErrSecretNotFound(err error) bool {
+ _, ok := err.(secretNotFoundError)
+ return ok
+}
+
+// configNotFoundError implements an error returned when a config is not found.
+type configNotFoundError struct {
+ name string
+}
+
+// Error returns a string representation of a configNotFoundError
+func (e configNotFoundError) Error() string {
+ return fmt.Sprintf("Error: no such config: %s", e.name)
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e configNotFoundError) NotFound() bool {
+ return true
+}
+
+// IsErrConfigNotFound returns true if the error is caused
+// when a config is not found.
+func IsErrConfigNotFound(err error) bool {
+ _, ok := err.(configNotFoundError)
+ return ok
+}
+
+// pluginNotFoundError implements an error returned when a plugin is not in the docker host.
+type pluginNotFoundError struct {
+ name string
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e pluginNotFoundError) NotFound() bool {
+ return true
+}
+
+// Error returns a string representation of a pluginNotFoundError
+func (e pluginNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such plugin: %s", e.name)
+}
+
+// IsErrPluginNotFound returns true if the error is caused
+// when a plugin is not found in the docker host.
+func IsErrPluginNotFound(err error) bool {
+ return IsErrNotFound(err)
+}
diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go
new file mode 100644
index 000000000..af47aefa7
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/events.go
@@ -0,0 +1,102 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/events"
+ "github.com/docker/docker/api/types/filters"
+ timetypes "github.com/docker/docker/api/types/time"
+)
+
+// Events returns a stream of events in the daemon. It's up to the caller to close the stream
+// by cancelling the context. Once the stream has been completely read an io.EOF error will
+// be sent over the error channel. If an error is sent all processing will be stopped. It's up
+// to the caller to reopen the stream in the event of an error by reinvoking this method.
+func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) {
+
+ messages := make(chan events.Message)
+ errs := make(chan error, 1)
+
+ started := make(chan struct{})
+ go func() {
+ defer close(errs)
+
+ query, err := buildEventsQueryParams(cli.version, options)
+ if err != nil {
+ close(started)
+ errs <- err
+ return
+ }
+
+ resp, err := cli.get(ctx, "/events", query, nil)
+ if err != nil {
+ close(started)
+ errs <- err
+ return
+ }
+ defer resp.body.Close()
+
+ decoder := json.NewDecoder(resp.body)
+
+ close(started)
+ for {
+ select {
+ case <-ctx.Done():
+ errs <- ctx.Err()
+ return
+ default:
+ var event events.Message
+ if err := decoder.Decode(&event); err != nil {
+ errs <- err
+ return
+ }
+
+ select {
+ case messages <- event:
+ case <-ctx.Done():
+ errs <- ctx.Err()
+ return
+ }
+ }
+ }
+ }()
+ <-started
+
+ return messages, errs
+}
+
+func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) {
+ query := url.Values{}
+ ref := time.Now()
+
+ if options.Since != "" {
+ ts, err := timetypes.GetTimestamp(options.Since, ref)
+ if err != nil {
+ return nil, err
+ }
+ query.Set("since", ts)
+ }
+
+ if options.Until != "" {
+ ts, err := timetypes.GetTimestamp(options.Until, ref)
+ if err != nil {
+ return nil, err
+ }
+ query.Set("until", ts)
+ }
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters)
+ if err != nil {
+ return nil, err
+ }
+ query.Set("filters", filterJSON)
+ }
+
+ return query, nil
+}
diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go
new file mode 100644
index 000000000..8cf0119f3
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/hijack.go
@@ -0,0 +1,208 @@
+package client
+
+import (
+ "bufio"
+ "crypto/tls"
+ "fmt"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/pkg/tlsconfig"
+ "github.com/docker/go-connections/sockets"
+ "github.com/pkg/errors"
+ "golang.org/x/net/context"
+)
+
+// tlsClientCon holds tls information and a dialed connection.
+type tlsClientCon struct {
+ *tls.Conn
+ rawConn net.Conn
+}
+
+func (c *tlsClientCon) CloseWrite() error {
+ // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it
+ // on its underlying connection.
+ if conn, ok := c.rawConn.(types.CloseWriter); ok {
+ return conn.CloseWrite()
+ }
+ return nil
+}
+
+// postHijacked sends a POST request and hijacks the connection.
+func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) {
+ bodyEncoded, err := encodeData(body)
+ if err != nil {
+ return types.HijackedResponse{}, err
+ }
+
+ apiPath := cli.getAPIPath(path, query)
+ req, err := http.NewRequest("POST", apiPath, bodyEncoded)
+ if err != nil {
+ return types.HijackedResponse{}, err
+ }
+ req = cli.addHeaders(req, headers)
+
+ conn, err := cli.setupHijackConn(req, "tcp")
+ if err != nil {
+ return types.HijackedResponse{}, err
+ }
+
+ return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err
+}
+
+func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) {
+ return tlsDialWithDialer(new(net.Dialer), network, addr, config)
+}
+
+// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in
+// order to return our custom tlsClientCon struct which holds both the tls.Conn
+// object _and_ its underlying raw connection. The rationale for this is that
+// we need to be able to close the write end of the connection when attaching,
+// which tls.Conn does not provide.
+func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) {
+ // We want the Timeout and Deadline values from dialer to cover the
+ // whole process: TCP connection and TLS handshake. This means that we
+ // also need to start our own timers now.
+ timeout := dialer.Timeout
+
+ if !dialer.Deadline.IsZero() {
+ deadlineTimeout := dialer.Deadline.Sub(time.Now())
+ if timeout == 0 || deadlineTimeout < timeout {
+ timeout = deadlineTimeout
+ }
+ }
+
+ var errChannel chan error
+
+ if timeout != 0 {
+ errChannel = make(chan error, 2)
+ time.AfterFunc(timeout, func() {
+ errChannel <- errors.New("")
+ })
+ }
+
+ proxyDialer, err := sockets.DialerFromEnvironment(dialer)
+ if err != nil {
+ return nil, err
+ }
+
+ rawConn, err := proxyDialer.Dial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+ // When we set up a TCP connection for hijack, there could be long periods
+ // of inactivity (a long running command with no output) that in certain
+ // network setups may cause ECONNTIMEOUT, leaving the client in an unknown
+ // state. Setting TCP KeepAlive on the socket connection will prohibit
+ // ECONNTIMEOUT unless the socket connection truly is broken
+ if tcpConn, ok := rawConn.(*net.TCPConn); ok {
+ tcpConn.SetKeepAlive(true)
+ tcpConn.SetKeepAlivePeriod(30 * time.Second)
+ }
+
+ colonPos := strings.LastIndex(addr, ":")
+ if colonPos == -1 {
+ colonPos = len(addr)
+ }
+ hostname := addr[:colonPos]
+
+ // If no ServerName is set, infer the ServerName
+ // from the hostname we're connecting to.
+ if config.ServerName == "" {
+ // Make a copy to avoid polluting argument or default.
+ config = tlsconfig.Clone(config)
+ config.ServerName = hostname
+ }
+
+ conn := tls.Client(rawConn, config)
+
+ if timeout == 0 {
+ err = conn.Handshake()
+ } else {
+ go func() {
+ errChannel <- conn.Handshake()
+ }()
+
+ err = <-errChannel
+ }
+
+ if err != nil {
+ rawConn.Close()
+ return nil, err
+ }
+
+ // This is Docker difference with standard's crypto/tls package: returned a
+ // wrapper which holds both the TLS and raw connections.
+ return &tlsClientCon{conn, rawConn}, nil
+}
+
+func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) {
+ if tlsConfig != nil && proto != "unix" && proto != "npipe" {
+ // Notice this isn't Go standard's tls.Dial function
+ return tlsDial(proto, addr, tlsConfig)
+ }
+ if proto == "npipe" {
+ return sockets.DialPipe(addr, 32*time.Second)
+ }
+ return net.Dial(proto, addr)
+}
+
+func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, error) {
+ req.Host = cli.addr
+ req.Header.Set("Connection", "Upgrade")
+ req.Header.Set("Upgrade", proto)
+
+ conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport))
+ if err != nil {
+ return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?")
+ }
+
+ // When we set up a TCP connection for hijack, there could be long periods
+ // of inactivity (a long running command with no output) that in certain
+ // network setups may cause ECONNTIMEOUT, leaving the client in an unknown
+ // state. Setting TCP KeepAlive on the socket connection will prohibit
+ // ECONNTIMEOUT unless the socket connection truly is broken
+ if tcpConn, ok := conn.(*net.TCPConn); ok {
+ tcpConn.SetKeepAlive(true)
+ tcpConn.SetKeepAlivePeriod(30 * time.Second)
+ }
+
+ clientconn := httputil.NewClientConn(conn, nil)
+ defer clientconn.Close()
+
+ // Server hijacks the connection, error 'connection closed' expected
+ resp, err := clientconn.Do(req)
+ if err != httputil.ErrPersistEOF {
+ if err != nil {
+ return nil, err
+ }
+ if resp.StatusCode != http.StatusSwitchingProtocols {
+ resp.Body.Close()
+ return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode)
+ }
+ }
+
+ c, br := clientconn.Hijack()
+ if br.Buffered() > 0 {
+ // If there is buffered content, wrap the connection
+ c = &hijackedConn{c, br}
+ } else {
+ br.Reset(nil)
+ }
+
+ return c, nil
+}
+
+type hijackedConn struct {
+ net.Conn
+ r *bufio.Reader
+}
+
+func (c *hijackedConn) Read(b []byte) (int, error) {
+ return c.r.Read(b)
+}
diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go
new file mode 100644
index 000000000..44a215f90
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_build.go
@@ -0,0 +1,128 @@
+package client
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+)
+
+// ImageBuild sends request to the daemon to build images.
+// The Body in the response implement an io.ReadCloser and it's up to the caller to
+// close it.
+func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
+ query, err := cli.imageBuildOptionsToQuery(options)
+ if err != nil {
+ return types.ImageBuildResponse{}, err
+ }
+
+ headers := http.Header(make(map[string][]string))
+ buf, err := json.Marshal(options.AuthConfigs)
+ if err != nil {
+ return types.ImageBuildResponse{}, err
+ }
+ headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
+ headers.Set("Content-Type", "application/x-tar")
+
+ serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers)
+ if err != nil {
+ return types.ImageBuildResponse{}, err
+ }
+
+ osType := getDockerOS(serverResp.header.Get("Server"))
+
+ return types.ImageBuildResponse{
+ Body: serverResp.body,
+ OSType: osType,
+ }, nil
+}
+
+func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) {
+ query := url.Values{
+ "t": options.Tags,
+ "securityopt": options.SecurityOpt,
+ "extrahosts": options.ExtraHosts,
+ }
+ if options.SuppressOutput {
+ query.Set("q", "1")
+ }
+ if options.RemoteContext != "" {
+ query.Set("remote", options.RemoteContext)
+ }
+ if options.NoCache {
+ query.Set("nocache", "1")
+ }
+ if options.Remove {
+ query.Set("rm", "1")
+ } else {
+ query.Set("rm", "0")
+ }
+
+ if options.ForceRemove {
+ query.Set("forcerm", "1")
+ }
+
+ if options.PullParent {
+ query.Set("pull", "1")
+ }
+
+ if options.Squash {
+ if err := cli.NewVersionError("1.25", "squash"); err != nil {
+ return query, err
+ }
+ query.Set("squash", "1")
+ }
+
+ if !container.Isolation.IsDefault(options.Isolation) {
+ query.Set("isolation", string(options.Isolation))
+ }
+
+ query.Set("cpusetcpus", options.CPUSetCPUs)
+ query.Set("networkmode", options.NetworkMode)
+ query.Set("cpusetmems", options.CPUSetMems)
+ query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10))
+ query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10))
+ query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10))
+ query.Set("memory", strconv.FormatInt(options.Memory, 10))
+ query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10))
+ query.Set("cgroupparent", options.CgroupParent)
+ query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10))
+ query.Set("dockerfile", options.Dockerfile)
+ query.Set("target", options.Target)
+
+ ulimitsJSON, err := json.Marshal(options.Ulimits)
+ if err != nil {
+ return query, err
+ }
+ query.Set("ulimits", string(ulimitsJSON))
+
+ buildArgsJSON, err := json.Marshal(options.BuildArgs)
+ if err != nil {
+ return query, err
+ }
+ query.Set("buildargs", string(buildArgsJSON))
+
+ labelsJSON, err := json.Marshal(options.Labels)
+ if err != nil {
+ return query, err
+ }
+ query.Set("labels", string(labelsJSON))
+
+ cacheFromJSON, err := json.Marshal(options.CacheFrom)
+ if err != nil {
+ return query, err
+ }
+ query.Set("cachefrom", string(cacheFromJSON))
+ if options.SessionID != "" {
+ query.Set("session", options.SessionID)
+ }
+
+ return query, nil
+}
diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go
new file mode 100644
index 000000000..4436abb0d
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_create.go
@@ -0,0 +1,34 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+)
+
+// ImageCreate creates a new image based in the parent options.
+// It returns the JSON content in the response body.
+func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) {
+ ref, err := reference.ParseNormalizedNamed(parentReference)
+ if err != nil {
+ return nil, err
+ }
+
+ query := url.Values{}
+ query.Set("fromImage", reference.FamiliarName(ref))
+ query.Set("tag", getAPITagFromNamedRef(ref))
+ resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
+
+func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.post(ctx, "/images/create", query, nil, headers)
+}
diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go
new file mode 100644
index 000000000..7b4babcba
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_history.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types/image"
+ "golang.org/x/net/context"
+)
+
+// ImageHistory returns the changes in an image in history format.
+func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) {
+ var history []image.HistoryResponseItem
+ serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil)
+ if err != nil {
+ return history, err
+ }
+
+ err = json.NewDecoder(serverResp.body).Decode(&history)
+ ensureReaderClosed(serverResp)
+ return history, err
+}
diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go
new file mode 100644
index 000000000..d7dedd823
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_import.go
@@ -0,0 +1,37 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+)
+
+// ImageImport creates a new image based in the source options.
+// It returns the JSON content in the response body.
+func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) {
+ if ref != "" {
+ //Check if the given image name can be resolved
+ if _, err := reference.ParseNormalizedNamed(ref); err != nil {
+ return nil, err
+ }
+ }
+
+ query := url.Values{}
+ query.Set("fromSrc", source.SourceName)
+ query.Set("repo", ref)
+ query.Set("tag", options.Tag)
+ query.Set("message", options.Message)
+ for _, change := range options.Changes {
+ query.Add("changes", change)
+ }
+
+ resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go
new file mode 100644
index 000000000..b3a64ce2f
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_inspect.go
@@ -0,0 +1,33 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ImageInspectWithRaw returns the image information and its raw representation.
+func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) {
+ serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return types.ImageInspect{}, nil, imageNotFoundError{imageID}
+ }
+ return types.ImageInspect{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return types.ImageInspect{}, nil, err
+ }
+
+ var response types.ImageInspect
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go
new file mode 100644
index 000000000..f26464f67
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_list.go
@@ -0,0 +1,45 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/versions"
+ "golang.org/x/net/context"
+)
+
+// ImageList returns a list of images in the docker host.
+func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) {
+ var images []types.ImageSummary
+ query := url.Values{}
+
+ optionFilters := options.Filters
+ referenceFilters := optionFilters.Get("reference")
+ if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 {
+ query.Set("filter", referenceFilters[0])
+ for _, filterValue := range referenceFilters {
+ optionFilters.Del("reference", filterValue)
+ }
+ }
+ if optionFilters.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters)
+ if err != nil {
+ return images, err
+ }
+ query.Set("filters", filterJSON)
+ }
+ if options.All {
+ query.Set("all", "1")
+ }
+
+ serverResp, err := cli.get(ctx, "/images/json", query, nil)
+ if err != nil {
+ return images, err
+ }
+
+ err = json.NewDecoder(serverResp.body).Decode(&images)
+ ensureReaderClosed(serverResp)
+ return images, err
+}
diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go
new file mode 100644
index 000000000..77aaf1af3
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_load.go
@@ -0,0 +1,30 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+)
+
+// ImageLoad loads an image in the docker host from the client host.
+// It's up to the caller to close the io.ReadCloser in the
+// ImageLoadResponse returned by this function.
+func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) {
+ v := url.Values{}
+ v.Set("quiet", "0")
+ if quiet {
+ v.Set("quiet", "1")
+ }
+ headers := map[string][]string{"Content-Type": {"application/x-tar"}}
+ resp, err := cli.postRaw(ctx, "/images/load", v, input, headers)
+ if err != nil {
+ return types.ImageLoadResponse{}, err
+ }
+ return types.ImageLoadResponse{
+ Body: resp.body,
+ JSON: resp.header.Get("Content-Type") == "application/json",
+ }, nil
+}
diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go
new file mode 100644
index 000000000..5ef98b7f0
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_prune.go
@@ -0,0 +1,36 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// ImagesPrune requests the daemon to delete unused data
+func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) {
+ var report types.ImagesPruneReport
+
+ if err := cli.NewVersionError("1.25", "image prune"); err != nil {
+ return report, err
+ }
+
+ query, err := getFiltersQuery(pruneFilters)
+ if err != nil {
+ return report, err
+ }
+
+ serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil)
+ if err != nil {
+ return report, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
+ return report, fmt.Errorf("Error retrieving disk usage: %v", err)
+ }
+
+ return report, nil
+}
diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go
new file mode 100644
index 000000000..a72b9bf7f
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_pull.go
@@ -0,0 +1,61 @@
+package client
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+)
+
+// ImagePull requests the docker host to pull an image from a remote registry.
+// It executes the privileged function if the operation is unauthorized
+// and it tries one more time.
+// It's up to the caller to handle the io.ReadCloser and close it properly.
+//
+// FIXME(vdemeester): there is currently used in a few way in docker/docker
+// - if not in trusted content, ref is used to pass the whole reference, and tag is empty
+// - if in trusted content, ref is used to pass the reference name, and tag for the digest
+func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) {
+ ref, err := reference.ParseNormalizedNamed(refStr)
+ if err != nil {
+ return nil, err
+ }
+
+ query := url.Values{}
+ query.Set("fromImage", reference.FamiliarName(ref))
+ if !options.All {
+ query.Set("tag", getAPITagFromNamedRef(ref))
+ }
+
+ resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
+ if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
+ newAuthHeader, privilegeErr := options.PrivilegeFunc()
+ if privilegeErr != nil {
+ return nil, privilegeErr
+ }
+ resp, err = cli.tryImageCreate(ctx, query, newAuthHeader)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
+
+// getAPITagFromNamedRef returns a tag from the specified reference.
+// This function is necessary as long as the docker "server" api expects
+// digests to be sent as tags and makes a distinction between the name
+// and tag/digest part of a reference.
+func getAPITagFromNamedRef(ref reference.Named) string {
+ if digested, ok := ref.(reference.Digested); ok {
+ return digested.Digest().String()
+ }
+ ref = reference.TagNameOnly(ref)
+ if tagged, ok := ref.(reference.Tagged); ok {
+ return tagged.Tag()
+ }
+ return ""
+}
diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go
new file mode 100644
index 000000000..410d2fb91
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_push.go
@@ -0,0 +1,56 @@
+package client
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+)
+
+// ImagePush requests the docker host to push an image to a remote registry.
+// It executes the privileged function if the operation is unauthorized
+// and it tries one more time.
+// It's up to the caller to handle the io.ReadCloser and close it properly.
+func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) {
+ ref, err := reference.ParseNormalizedNamed(image)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, isCanonical := ref.(reference.Canonical); isCanonical {
+ return nil, errors.New("cannot push a digest reference")
+ }
+
+ tag := ""
+ name := reference.FamiliarName(ref)
+
+ if nameTaggedRef, isNamedTagged := ref.(reference.NamedTagged); isNamedTagged {
+ tag = nameTaggedRef.Tag()
+ }
+
+ query := url.Values{}
+ query.Set("tag", tag)
+
+ resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth)
+ if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
+ newAuthHeader, privilegeErr := options.PrivilegeFunc()
+ if privilegeErr != nil {
+ return nil, privilegeErr
+ }
+ resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
+
+func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers)
+}
diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go
new file mode 100644
index 000000000..6921209ee
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_remove.go
@@ -0,0 +1,31 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ImageRemove removes an image from the docker host.
+func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) {
+ query := url.Values{}
+
+ if options.Force {
+ query.Set("force", "1")
+ }
+ if !options.PruneChildren {
+ query.Set("noprune", "1")
+ }
+
+ resp, err := cli.delete(ctx, "/images/"+imageID, query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var dels []types.ImageDeleteResponseItem
+ err = json.NewDecoder(resp.body).Decode(&dels)
+ ensureReaderClosed(resp)
+ return dels, err
+}
diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go
new file mode 100644
index 000000000..ecac880a3
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_save.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// ImageSave retrieves one or more images from the docker host as an io.ReadCloser.
+// It's up to the caller to store the images and close the stream.
+func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) {
+ query := url.Values{
+ "names": imageIDs,
+ }
+
+ resp, err := cli.get(ctx, "/images/get", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go
new file mode 100644
index 000000000..b0fcd5c23
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_search.go
@@ -0,0 +1,51 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/registry"
+ "golang.org/x/net/context"
+)
+
+// ImageSearch makes the docker host to search by a term in a remote registry.
+// The list of results is not sorted in any fashion.
+func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) {
+ var results []registry.SearchResult
+ query := url.Values{}
+ query.Set("term", term)
+ query.Set("limit", fmt.Sprintf("%d", options.Limit))
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filters)
+ if err != nil {
+ return results, err
+ }
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth)
+ if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
+ newAuthHeader, privilegeErr := options.PrivilegeFunc()
+ if privilegeErr != nil {
+ return results, privilegeErr
+ }
+ resp, err = cli.tryImageSearch(ctx, query, newAuthHeader)
+ }
+ if err != nil {
+ return results, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&results)
+ ensureReaderClosed(resp)
+ return results, err
+}
+
+func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.get(ctx, "/images/search", query, headers)
+}
diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go
new file mode 100644
index 000000000..8924f71eb
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_tag.go
@@ -0,0 +1,37 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/distribution/reference"
+ "github.com/pkg/errors"
+ "golang.org/x/net/context"
+)
+
+// ImageTag tags an image in the docker host
+func (cli *Client) ImageTag(ctx context.Context, source, target string) error {
+ if _, err := reference.ParseAnyReference(source); err != nil {
+ return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source)
+ }
+
+ ref, err := reference.ParseNormalizedNamed(target)
+ if err != nil {
+ return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target)
+ }
+
+ if _, isCanonical := ref.(reference.Canonical); isCanonical {
+ return errors.New("refusing to create a tag with a digest reference")
+ }
+
+ ref = reference.TagNameOnly(ref)
+
+ query := url.Values{}
+ query.Set("repo", reference.FamiliarName(ref))
+ if tagged, ok := ref.(reference.Tagged); ok {
+ query.Set("tag", tagged.Tag())
+ }
+
+ resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go
new file mode 100644
index 000000000..ac0796122
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/info.go
@@ -0,0 +1,26 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// Info returns information about the docker server.
+func (cli *Client) Info(ctx context.Context) (types.Info, error) {
+ var info types.Info
+ serverResp, err := cli.get(ctx, "/info", url.Values{}, nil)
+ if err != nil {
+ return info, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil {
+ return info, fmt.Errorf("Error reading remote info: %v", err)
+ }
+
+ return info, nil
+}
diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go
new file mode 100644
index 000000000..acd4de1db
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/interface.go
@@ -0,0 +1,194 @@
+package client
+
+import (
+ "io"
+ "net"
+ "time"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/events"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/registry"
+ "github.com/docker/docker/api/types/swarm"
+ volumetypes "github.com/docker/docker/api/types/volume"
+ "golang.org/x/net/context"
+)
+
+// CommonAPIClient is the common methods between stable and experimental versions of APIClient.
+type CommonAPIClient interface {
+ ConfigAPIClient
+ ContainerAPIClient
+ DistributionAPIClient
+ ImageAPIClient
+ NodeAPIClient
+ NetworkAPIClient
+ PluginAPIClient
+ ServiceAPIClient
+ SwarmAPIClient
+ SecretAPIClient
+ SystemAPIClient
+ VolumeAPIClient
+ ClientVersion() string
+ DaemonHost() string
+ ServerVersion(ctx context.Context) (types.Version, error)
+ NegotiateAPIVersion(ctx context.Context)
+ NegotiateAPIVersionPing(types.Ping)
+ DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error)
+}
+
+// ContainerAPIClient defines API client methods for the containers
+type ContainerAPIClient interface {
+ ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error)
+ ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error)
+ ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error)
+ ContainerDiff(ctx context.Context, container string) ([]container.ContainerChangeResponseItem, error)
+ ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error)
+ ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error)
+ ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error)
+ ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error
+ ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error
+ ContainerExport(ctx context.Context, container string) (io.ReadCloser, error)
+ ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error)
+ ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error)
+ ContainerKill(ctx context.Context, container, signal string) error
+ ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
+ ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error)
+ ContainerPause(ctx context.Context, container string) error
+ ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error
+ ContainerRename(ctx context.Context, container, newContainerName string) error
+ ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error
+ ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error
+ ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error)
+ ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error)
+ ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error
+ ContainerStop(ctx context.Context, container string, timeout *time.Duration) error
+ ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error)
+ ContainerUnpause(ctx context.Context, container string) error
+ ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error)
+ ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error)
+ CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
+ CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error
+ ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error)
+}
+
+// DistributionAPIClient defines API client methods for the registry
+type DistributionAPIClient interface {
+ DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error)
+}
+
+// ImageAPIClient defines API client methods for the images
+type ImageAPIClient interface {
+ ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
+ BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error)
+ ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
+ ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error)
+ ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error)
+ ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error)
+ ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error)
+ ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error)
+ ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error)
+ ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error)
+ ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error)
+ ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error)
+ ImageSave(ctx context.Context, images []string) (io.ReadCloser, error)
+ ImageTag(ctx context.Context, image, ref string) error
+ ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error)
+}
+
+// NetworkAPIClient defines API client methods for the networks
+type NetworkAPIClient interface {
+ NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error
+ NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error)
+ NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error
+ NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error)
+ NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error)
+ NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error)
+ NetworkRemove(ctx context.Context, networkID string) error
+ NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error)
+}
+
+// NodeAPIClient defines API client methods for the nodes
+type NodeAPIClient interface {
+ NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error)
+ NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
+ NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error
+ NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error
+}
+
+// PluginAPIClient defines API client methods for the plugins
+type PluginAPIClient interface {
+ PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error)
+ PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error
+ PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error
+ PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error
+ PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error)
+ PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error)
+ PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error)
+ PluginSet(ctx context.Context, name string, args []string) error
+ PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error)
+ PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error
+}
+
+// ServiceAPIClient defines API client methods for the services
+type ServiceAPIClient interface {
+ ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error)
+ ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error)
+ ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
+ ServiceRemove(ctx context.Context, serviceID string) error
+ ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error)
+ ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error)
+ TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error)
+ TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error)
+ TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
+}
+
+// SwarmAPIClient defines API client methods for the swarm
+type SwarmAPIClient interface {
+ SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error)
+ SwarmJoin(ctx context.Context, req swarm.JoinRequest) error
+ SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error)
+ SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error
+ SwarmLeave(ctx context.Context, force bool) error
+ SwarmInspect(ctx context.Context) (swarm.Swarm, error)
+ SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error
+}
+
+// SystemAPIClient defines API client methods for the system
+type SystemAPIClient interface {
+ Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error)
+ Info(ctx context.Context) (types.Info, error)
+ RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error)
+ DiskUsage(ctx context.Context) (types.DiskUsage, error)
+ Ping(ctx context.Context) (types.Ping, error)
+}
+
+// VolumeAPIClient defines API client methods for the volumes
+type VolumeAPIClient interface {
+ VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error)
+ VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error)
+ VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error)
+ VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error)
+ VolumeRemove(ctx context.Context, volumeID string, force bool) error
+ VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error)
+}
+
+// SecretAPIClient defines API client methods for secrets
+type SecretAPIClient interface {
+ SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error)
+ SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error)
+ SecretRemove(ctx context.Context, id string) error
+ SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error)
+ SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error
+}
+
+// ConfigAPIClient defines API client methods for configs
+type ConfigAPIClient interface {
+ ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error)
+ ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error)
+ ConfigRemove(ctx context.Context, id string) error
+ ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error)
+ ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error
+}
diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go
new file mode 100644
index 000000000..51da98ecd
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/interface_experimental.go
@@ -0,0 +1,17 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+type apiClientExperimental interface {
+ CheckpointAPIClient
+}
+
+// CheckpointAPIClient defines API client methods for the checkpoints
+type CheckpointAPIClient interface {
+ CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error
+ CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error
+ CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error)
+}
diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go
new file mode 100644
index 000000000..cc90a3cbb
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/interface_stable.go
@@ -0,0 +1,10 @@
+package client
+
+// APIClient is an interface that clients that talk with a docker server must implement.
+type APIClient interface {
+ CommonAPIClient
+ apiClientExperimental
+}
+
+// Ensure that Client always implements APIClient.
+var _ APIClient = &Client{}
diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go
new file mode 100644
index 000000000..79219ff59
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/login.go
@@ -0,0 +1,29 @@
+package client
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/registry"
+ "golang.org/x/net/context"
+)
+
+// RegistryLogin authenticates the docker server with a given docker registry.
+// It returns unauthorizedError when the authentication fails.
+func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) {
+ resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil)
+
+ if resp.statusCode == http.StatusUnauthorized {
+ return registry.AuthenticateOKBody{}, unauthorizedError{err}
+ }
+ if err != nil {
+ return registry.AuthenticateOKBody{}, err
+ }
+
+ var response registry.AuthenticateOKBody
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go
new file mode 100644
index 000000000..c022c17b5
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_connect.go
@@ -0,0 +1,18 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/network"
+ "golang.org/x/net/context"
+)
+
+// NetworkConnect connects a container to an existent network in the docker host.
+func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error {
+ nc := types.NetworkConnect{
+ Container: containerID,
+ EndpointConfig: config,
+ }
+ resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go
new file mode 100644
index 000000000..4067a541f
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_create.go
@@ -0,0 +1,25 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// NetworkCreate creates a new network in the docker host.
+func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) {
+ networkCreateRequest := types.NetworkCreateRequest{
+ NetworkCreate: options,
+ Name: name,
+ }
+ var response types.NetworkCreateResponse
+ serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil)
+ if err != nil {
+ return response, err
+ }
+
+ json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go
new file mode 100644
index 000000000..24b58e3c1
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_disconnect.go
@@ -0,0 +1,14 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// NetworkDisconnect disconnects a container from an existent network in the docker host.
+func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error {
+ nd := types.NetworkDisconnect{Container: containerID, Force: force}
+ resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go
new file mode 100644
index 000000000..848c9799f
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_inspect.go
@@ -0,0 +1,50 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// NetworkInspect returns the information for a specific network configured in the docker host.
+func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) {
+ networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options)
+ return networkResource, err
+}
+
+// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation.
+func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) {
+ var (
+ networkResource types.NetworkResource
+ resp serverResponse
+ err error
+ )
+ query := url.Values{}
+ if options.Verbose {
+ query.Set("verbose", "true")
+ }
+ if options.Scope != "" {
+ query.Set("scope", options.Scope)
+ }
+ resp, err = cli.get(ctx, "/networks/"+networkID, query, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return networkResource, nil, networkNotFoundError{networkID}
+ }
+ return networkResource, nil, err
+ }
+ defer ensureReaderClosed(resp)
+
+ body, err := ioutil.ReadAll(resp.body)
+ if err != nil {
+ return networkResource, nil, err
+ }
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&networkResource)
+ return networkResource, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go
new file mode 100644
index 000000000..e566a93e2
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_list.go
@@ -0,0 +1,31 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// NetworkList returns the list of networks configured in the docker host.
+func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) {
+ query := url.Values{}
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+ var networkResources []types.NetworkResource
+ resp, err := cli.get(ctx, "/networks", query, nil)
+ if err != nil {
+ return networkResources, err
+ }
+ err = json.NewDecoder(resp.body).Decode(&networkResources)
+ ensureReaderClosed(resp)
+ return networkResources, err
+}
diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go
new file mode 100644
index 000000000..7352a7f0c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_prune.go
@@ -0,0 +1,36 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// NetworksPrune requests the daemon to delete unused networks
+func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) {
+ var report types.NetworksPruneReport
+
+ if err := cli.NewVersionError("1.25", "network prune"); err != nil {
+ return report, err
+ }
+
+ query, err := getFiltersQuery(pruneFilters)
+ if err != nil {
+ return report, err
+ }
+
+ serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil)
+ if err != nil {
+ return report, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
+ return report, fmt.Errorf("Error retrieving network prune report: %v", err)
+ }
+
+ return report, nil
+}
diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go
new file mode 100644
index 000000000..6bd674892
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_remove.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// NetworkRemove removes an existent network from the docker host.
+func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error {
+ resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go
new file mode 100644
index 000000000..abf505d29
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/node_inspect.go
@@ -0,0 +1,33 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// NodeInspectWithRaw returns the node information.
+func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) {
+ serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return swarm.Node{}, nil, nodeNotFoundError{nodeID}
+ }
+ return swarm.Node{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return swarm.Node{}, nil, err
+ }
+
+ var response swarm.Node
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go
new file mode 100644
index 000000000..3e8440f08
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/node_list.go
@@ -0,0 +1,36 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// NodeList returns the list of nodes.
+func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) {
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filters)
+
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/nodes", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var nodes []swarm.Node
+ err = json.NewDecoder(resp.body).Decode(&nodes)
+ ensureReaderClosed(resp)
+ return nodes, err
+}
diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go
new file mode 100644
index 000000000..0a77f3d57
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/node_remove.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+
+ "golang.org/x/net/context"
+)
+
+// NodeRemove removes a Node.
+func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error {
+ query := url.Values{}
+ if options.Force {
+ query.Set("force", "1")
+ }
+
+ resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go
new file mode 100644
index 000000000..3ca976028
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/node_update.go
@@ -0,0 +1,18 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// NodeUpdate updates a Node.
+func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error {
+ query := url.Values{}
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+ resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/parse_logs.go b/vendor/github.com/docker/docker/client/parse_logs.go
new file mode 100644
index 000000000..e427f80a7
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/parse_logs.go
@@ -0,0 +1,41 @@
+package client
+
+// parse_logs.go contains utility helpers for getting information out of docker
+// log lines. really, it only contains ParseDetails right now. maybe in the
+// future there will be some desire to parse log messages back into a struct?
+// that would go here if we did
+
+import (
+ "net/url"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+// ParseLogDetails takes a details string of key value pairs in the form
+// "k=v,l=w", where the keys and values are url query escaped, and each pair
+// is separated by a comma, returns a map. returns an error if the details
+// string is not in a valid format
+// the exact form of details encoding is implemented in
+// api/server/httputils/write_log_stream.go
+func ParseLogDetails(details string) (map[string]string, error) {
+ pairs := strings.Split(details, ",")
+ detailsMap := make(map[string]string, len(pairs))
+ for _, pair := range pairs {
+ p := strings.SplitN(pair, "=", 2)
+ // if there is no equals sign, we will only get 1 part back
+ if len(p) != 2 {
+ return nil, errors.New("invalid details format")
+ }
+ k, err := url.QueryUnescape(p[0])
+ if err != nil {
+ return nil, err
+ }
+ v, err := url.QueryUnescape(p[1])
+ if err != nil {
+ return nil, err
+ }
+ detailsMap[k] = v
+ }
+ return detailsMap, nil
+}
diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go
new file mode 100644
index 000000000..a4c2e2c4d
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/ping.go
@@ -0,0 +1,32 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// Ping pings the server and returns the value of the "Docker-Experimental", "OS-Type" & "API-Version" headers
+func (cli *Client) Ping(ctx context.Context) (types.Ping, error) {
+ var ping types.Ping
+ req, err := cli.buildRequest("GET", cli.basePath+"/_ping", nil, nil)
+ if err != nil {
+ return ping, err
+ }
+ serverResp, err := cli.doRequest(ctx, req)
+ if err != nil {
+ return ping, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if serverResp.header != nil {
+ ping.APIVersion = serverResp.header.Get("API-Version")
+
+ if serverResp.header.Get("Docker-Experimental") == "true" {
+ ping.Experimental = true
+ }
+ ping.OSType = serverResp.header.Get("OSType")
+ }
+
+ err = cli.checkResponseErr(serverResp)
+ return ping, err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go
new file mode 100644
index 000000000..27954aa57
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_create.go
@@ -0,0 +1,26 @@
+package client
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// PluginCreate creates a plugin
+func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error {
+ headers := http.Header(make(map[string][]string))
+ headers.Set("Content-Type", "application/x-tar")
+
+ query := url.Values{}
+ query.Set("name", createOptions.RepoName)
+
+ resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers)
+ if err != nil {
+ return err
+ }
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go
new file mode 100644
index 000000000..30467db74
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_disable.go
@@ -0,0 +1,19 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// PluginDisable disables a plugin
+func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error {
+ query := url.Values{}
+ if options.Force {
+ query.Set("force", "1")
+ }
+ resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go
new file mode 100644
index 000000000..95517c4b8
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_enable.go
@@ -0,0 +1,19 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// PluginEnable enables a plugin
+func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error {
+ query := url.Values{}
+ query.Set("timeout", strconv.Itoa(options.Timeout))
+
+ resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go
new file mode 100644
index 000000000..89f39ee2c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_inspect.go
@@ -0,0 +1,32 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// PluginInspectWithRaw inspects an existing plugin
+func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) {
+ resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return nil, nil, pluginNotFoundError{name}
+ }
+ return nil, nil, err
+ }
+
+ defer ensureReaderClosed(resp)
+ body, err := ioutil.ReadAll(resp.body)
+ if err != nil {
+ return nil, nil, err
+ }
+ var p types.Plugin
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&p)
+ return &p, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go
new file mode 100644
index 000000000..ce3e0506e
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_install.go
@@ -0,0 +1,113 @@
+package client
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+ "github.com/pkg/errors"
+ "golang.org/x/net/context"
+)
+
+// PluginInstall installs a plugin
+func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) {
+ query := url.Values{}
+ if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil {
+ return nil, errors.Wrap(err, "invalid remote reference")
+ }
+ query.Set("remote", options.RemoteRef)
+
+ privileges, err := cli.checkPluginPermissions(ctx, query, options)
+ if err != nil {
+ return nil, err
+ }
+
+ // set name for plugin pull, if empty should default to remote reference
+ query.Set("name", name)
+
+ resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth)
+ if err != nil {
+ return nil, err
+ }
+
+ name = resp.header.Get("Docker-Plugin-Name")
+
+ pr, pw := io.Pipe()
+ go func() { // todo: the client should probably be designed more around the actual api
+ _, err := io.Copy(pw, resp.body)
+ if err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ defer func() {
+ if err != nil {
+ delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil)
+ ensureReaderClosed(delResp)
+ }
+ }()
+ if len(options.Args) > 0 {
+ if err := cli.PluginSet(ctx, name, options.Args); err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ }
+
+ if options.Disabled {
+ pw.Close()
+ return
+ }
+
+ enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0})
+ pw.CloseWithError(enableErr)
+ }()
+ return pr, nil
+}
+
+func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.get(ctx, "/plugins/privileges", query, headers)
+}
+
+func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.post(ctx, "/plugins/pull", query, privileges, headers)
+}
+
+func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) {
+ resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
+ if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
+ // todo: do inspect before to check existing name before checking privileges
+ newAuthHeader, privilegeErr := options.PrivilegeFunc()
+ if privilegeErr != nil {
+ ensureReaderClosed(resp)
+ return nil, privilegeErr
+ }
+ options.RegistryAuth = newAuthHeader
+ resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
+ }
+ if err != nil {
+ ensureReaderClosed(resp)
+ return nil, err
+ }
+
+ var privileges types.PluginPrivileges
+ if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil {
+ ensureReaderClosed(resp)
+ return nil, err
+ }
+ ensureReaderClosed(resp)
+
+ if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 {
+ accept, err := options.AcceptPermissionsFunc(privileges)
+ if err != nil {
+ return nil, err
+ }
+ if !accept {
+ return nil, pluginPermissionDenied{options.RemoteRef}
+ }
+ }
+ return privileges, nil
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go
new file mode 100644
index 000000000..3acde3b96
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_list.go
@@ -0,0 +1,32 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// PluginList returns the installed plugins
+func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) {
+ var plugins types.PluginsListResponse
+ query := url.Values{}
+
+ if filter.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, filter)
+ if err != nil {
+ return plugins, err
+ }
+ query.Set("filters", filterJSON)
+ }
+ resp, err := cli.get(ctx, "/plugins", query, nil)
+ if err != nil {
+ return plugins, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&plugins)
+ ensureReaderClosed(resp)
+ return plugins, err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go
new file mode 100644
index 000000000..1e5f96325
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_push.go
@@ -0,0 +1,17 @@
+package client
+
+import (
+ "io"
+
+ "golang.org/x/net/context"
+)
+
+// PluginPush pushes a plugin to a registry
+func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go
new file mode 100644
index 000000000..b017e4d34
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_remove.go
@@ -0,0 +1,20 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// PluginRemove removes a plugin
+func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error {
+ query := url.Values{}
+ if options.Force {
+ query.Set("force", "1")
+ }
+
+ resp, err := cli.delete(ctx, "/plugins/"+name, query, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go
new file mode 100644
index 000000000..3260d2a90
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_set.go
@@ -0,0 +1,12 @@
+package client
+
+import (
+ "golang.org/x/net/context"
+)
+
+// PluginSet modifies settings for an existing plugin
+func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error {
+ resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go
new file mode 100644
index 000000000..049ebfa2a
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go
@@ -0,0 +1,39 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+ "github.com/pkg/errors"
+ "golang.org/x/net/context"
+)
+
+// PluginUpgrade upgrades a plugin
+func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) {
+ if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil {
+ return nil, err
+ }
+ query := url.Values{}
+ if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil {
+ return nil, errors.Wrap(err, "invalid remote reference")
+ }
+ query.Set("remote", options.RemoteRef)
+
+ privileges, err := cli.checkPluginPermissions(ctx, query, options)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
+
+func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers)
+}
diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go
new file mode 100644
index 000000000..3e7d43fea
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/request.go
@@ -0,0 +1,262 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/versions"
+ "github.com/pkg/errors"
+ "golang.org/x/net/context"
+ "golang.org/x/net/context/ctxhttp"
+)
+
+// serverResponse is a wrapper for http API responses.
+type serverResponse struct {
+ body io.ReadCloser
+ header http.Header
+ statusCode int
+ reqURL *url.URL
+}
+
+// head sends an http request to the docker API using the method HEAD.
+func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {
+ return cli.sendRequest(ctx, "HEAD", path, query, nil, headers)
+}
+
+// get sends an http request to the docker API using the method GET with a specific Go context.
+func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {
+ return cli.sendRequest(ctx, "GET", path, query, nil, headers)
+}
+
+// post sends an http request to the docker API using the method POST with a specific Go context.
+func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) {
+ body, headers, err := encodeBody(obj, headers)
+ if err != nil {
+ return serverResponse{}, err
+ }
+ return cli.sendRequest(ctx, "POST", path, query, body, headers)
+}
+
+func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) {
+ return cli.sendRequest(ctx, "POST", path, query, body, headers)
+}
+
+// put sends an http request to the docker API using the method PUT.
+func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) {
+ body, headers, err := encodeBody(obj, headers)
+ if err != nil {
+ return serverResponse{}, err
+ }
+ return cli.sendRequest(ctx, "PUT", path, query, body, headers)
+}
+
+// putRaw sends an http request to the docker API using the method PUT.
+func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) {
+ return cli.sendRequest(ctx, "PUT", path, query, body, headers)
+}
+
+// delete sends an http request to the docker API using the method DELETE.
+func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {
+ return cli.sendRequest(ctx, "DELETE", path, query, nil, headers)
+}
+
+type headers map[string][]string
+
+func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) {
+ if obj == nil {
+ return nil, headers, nil
+ }
+
+ body, err := encodeData(obj)
+ if err != nil {
+ return nil, headers, err
+ }
+ if headers == nil {
+ headers = make(map[string][]string)
+ }
+ headers["Content-Type"] = []string{"application/json"}
+ return body, headers, nil
+}
+
+func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) {
+ expectedPayload := (method == "POST" || method == "PUT")
+ if expectedPayload && body == nil {
+ body = bytes.NewReader([]byte{})
+ }
+
+ req, err := http.NewRequest(method, path, body)
+ if err != nil {
+ return nil, err
+ }
+ req = cli.addHeaders(req, headers)
+
+ if cli.proto == "unix" || cli.proto == "npipe" {
+ // For local communications, it doesn't matter what the host is. We just
+ // need a valid and meaningful host name. (See #189)
+ req.Host = "docker"
+ }
+
+ req.URL.Host = cli.addr
+ req.URL.Scheme = cli.scheme
+
+ if expectedPayload && req.Header.Get("Content-Type") == "" {
+ req.Header.Set("Content-Type", "text/plain")
+ }
+ return req, nil
+}
+
+func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) {
+ req, err := cli.buildRequest(method, cli.getAPIPath(path, query), body, headers)
+ if err != nil {
+ return serverResponse{}, err
+ }
+ resp, err := cli.doRequest(ctx, req)
+ if err != nil {
+ return resp, err
+ }
+ if err := cli.checkResponseErr(resp); err != nil {
+ return resp, err
+ }
+ return resp, nil
+}
+
+func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) {
+ serverResp := serverResponse{statusCode: -1, reqURL: req.URL}
+
+ resp, err := ctxhttp.Do(ctx, cli.client, req)
+ if err != nil {
+ if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") {
+ return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)
+ }
+
+ if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") {
+ return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err)
+ }
+
+ // Don't decorate context sentinel errors; users may be comparing to
+ // them directly.
+ switch err {
+ case context.Canceled, context.DeadlineExceeded:
+ return serverResp, err
+ }
+
+ if nErr, ok := err.(*url.Error); ok {
+ if nErr, ok := nErr.Err.(*net.OpError); ok {
+ if os.IsPermission(nErr.Err) {
+ return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host)
+ }
+ }
+ }
+
+ if err, ok := err.(net.Error); ok {
+ if err.Timeout() {
+ return serverResp, ErrorConnectionFailed(cli.host)
+ }
+ if !err.Temporary() {
+ if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") {
+ return serverResp, ErrorConnectionFailed(cli.host)
+ }
+ }
+ }
+
+ // Although there's not a strongly typed error for this in go-winio,
+ // lots of people are using the default configuration for the docker
+ // daemon on Windows where the daemon is listening on a named pipe
+ // `//./pipe/docker_engine, and the client must be running elevated.
+ // Give users a clue rather than the not-overly useful message
+ // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info:
+ // open //./pipe/docker_engine: The system cannot find the file specified.`.
+ // Note we can't string compare "The system cannot find the file specified" as
+ // this is localised - for example in French the error would be
+ // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.`
+ if strings.Contains(err.Error(), `open //./pipe/docker_engine`) {
+ err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.")
+ }
+
+ return serverResp, errors.Wrap(err, "error during connect")
+ }
+
+ if resp != nil {
+ serverResp.statusCode = resp.StatusCode
+ serverResp.body = resp.Body
+ serverResp.header = resp.Header
+ }
+ return serverResp, nil
+}
+
+func (cli *Client) checkResponseErr(serverResp serverResponse) error {
+ if serverResp.statusCode >= 200 && serverResp.statusCode < 400 {
+ return nil
+ }
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return err
+ }
+ if len(body) == 0 {
+ return fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL)
+ }
+
+ var ct string
+ if serverResp.header != nil {
+ ct = serverResp.header.Get("Content-Type")
+ }
+
+ var errorMessage string
+ if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" {
+ var errorResponse types.ErrorResponse
+ if err := json.Unmarshal(body, &errorResponse); err != nil {
+ return fmt.Errorf("Error reading JSON: %v", err)
+ }
+ errorMessage = errorResponse.Message
+ } else {
+ errorMessage = string(body)
+ }
+
+ return fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage))
+}
+
+func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request {
+ // Add CLI Config's HTTP Headers BEFORE we set the Docker headers
+ // then the user can't change OUR headers
+ for k, v := range cli.customHTTPHeaders {
+ if versions.LessThan(cli.version, "1.25") && k == "User-Agent" {
+ continue
+ }
+ req.Header.Set(k, v)
+ }
+
+ if headers != nil {
+ for k, v := range headers {
+ req.Header[k] = v
+ }
+ }
+ return req
+}
+
+func encodeData(data interface{}) (*bytes.Buffer, error) {
+ params := bytes.NewBuffer(nil)
+ if data != nil {
+ if err := json.NewEncoder(params).Encode(data); err != nil {
+ return nil, err
+ }
+ }
+ return params, nil
+}
+
+func ensureReaderClosed(response serverResponse) {
+ if response.body != nil {
+ // Drain up to 512 bytes and close the body to let the Transport reuse the connection
+ io.CopyN(ioutil.Discard, response.body, 512)
+ response.body.Close()
+ }
+}
diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go
new file mode 100644
index 000000000..4354afea6
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/secret_create.go
@@ -0,0 +1,25 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SecretCreate creates a new Secret.
+func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) {
+ var response types.SecretCreateResponse
+ if err := cli.NewVersionError("1.25", "secret create"); err != nil {
+ return response, err
+ }
+ resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go
new file mode 100644
index 000000000..9b602972b
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/secret_inspect.go
@@ -0,0 +1,37 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SecretInspectWithRaw returns the secret information with raw data
+func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) {
+ if err := cli.NewVersionError("1.25", "secret inspect"); err != nil {
+ return swarm.Secret{}, nil, err
+ }
+ resp, err := cli.get(ctx, "/secrets/"+id, nil, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return swarm.Secret{}, nil, secretNotFoundError{id}
+ }
+ return swarm.Secret{}, nil, err
+ }
+ defer ensureReaderClosed(resp)
+
+ body, err := ioutil.ReadAll(resp.body)
+ if err != nil {
+ return swarm.Secret{}, nil, err
+ }
+
+ var secret swarm.Secret
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&secret)
+
+ return secret, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go
new file mode 100644
index 000000000..0d33ecfbc
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/secret_list.go
@@ -0,0 +1,38 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SecretList returns the list of secrets.
+func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) {
+ if err := cli.NewVersionError("1.25", "secret list"); err != nil {
+ return nil, err
+ }
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/secrets", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var secrets []swarm.Secret
+ err = json.NewDecoder(resp.body).Decode(&secrets)
+ ensureReaderClosed(resp)
+ return secrets, err
+}
diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go
new file mode 100644
index 000000000..c5e37af17
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/secret_remove.go
@@ -0,0 +1,13 @@
+package client
+
+import "golang.org/x/net/context"
+
+// SecretRemove removes a Secret.
+func (cli *Client) SecretRemove(ctx context.Context, id string) error {
+ if err := cli.NewVersionError("1.25", "secret remove"); err != nil {
+ return err
+ }
+ resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go
new file mode 100644
index 000000000..875a4c901
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/secret_update.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SecretUpdate attempts to update a Secret
+func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error {
+ if err := cli.NewVersionError("1.25", "secret update"); err != nil {
+ return err
+ }
+ query := url.Values{}
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+ resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go
new file mode 100644
index 000000000..a36839443
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/service_create.go
@@ -0,0 +1,156 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "golang.org/x/net/context"
+)
+
+// ServiceCreate creates a new Service.
+func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) {
+ var distErr error
+
+ headers := map[string][]string{
+ "version": {cli.version},
+ }
+
+ if options.EncodedRegistryAuth != "" {
+ headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth}
+ }
+
+ // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container
+ if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) {
+ service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
+ }
+
+ if err := validateServiceSpec(service); err != nil {
+ return types.ServiceCreateResponse{}, err
+ }
+
+ // ensure that the image is tagged
+ var imgPlatforms []swarm.Platform
+ if service.TaskTemplate.ContainerSpec != nil {
+ if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" {
+ service.TaskTemplate.ContainerSpec.Image = taggedImg
+ }
+ if options.QueryRegistry {
+ var img string
+ img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth)
+ if img != "" {
+ service.TaskTemplate.ContainerSpec.Image = img
+ }
+ }
+ }
+
+ // ensure that the image is tagged
+ if service.TaskTemplate.PluginSpec != nil {
+ if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" {
+ service.TaskTemplate.PluginSpec.Remote = taggedImg
+ }
+ if options.QueryRegistry {
+ var img string
+ img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth)
+ if img != "" {
+ service.TaskTemplate.PluginSpec.Remote = img
+ }
+ }
+ }
+
+ if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 {
+ service.TaskTemplate.Placement = &swarm.Placement{}
+ }
+ if len(imgPlatforms) > 0 {
+ service.TaskTemplate.Placement.Platforms = imgPlatforms
+ }
+
+ var response types.ServiceCreateResponse
+ resp, err := cli.post(ctx, "/services/create", nil, service, headers)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+
+ if distErr != nil {
+ response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image))
+ }
+
+ ensureReaderClosed(resp)
+ return response, err
+}
+
+func imageDigestAndPlatforms(ctx context.Context, cli *Client, image, encodedAuth string) (string, []swarm.Platform, error) {
+ distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth)
+ imageWithDigest := image
+ var platforms []swarm.Platform
+ if err != nil {
+ return "", nil, err
+ }
+
+ imageWithDigest = imageWithDigestString(image, distributionInspect.Descriptor.Digest)
+
+ if len(distributionInspect.Platforms) > 0 {
+ platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms))
+ for _, p := range distributionInspect.Platforms {
+ platforms = append(platforms, swarm.Platform{
+ Architecture: p.Architecture,
+ OS: p.OS,
+ })
+ }
+ }
+ return imageWithDigest, platforms, err
+}
+
+// imageWithDigestString takes an image string and a digest, and updates
+// the image string if it didn't originally contain a digest. It returns
+// an empty string if there are no updates.
+func imageWithDigestString(image string, dgst digest.Digest) string {
+ namedRef, err := reference.ParseNormalizedNamed(image)
+ if err == nil {
+ if _, isCanonical := namedRef.(reference.Canonical); !isCanonical {
+ // ensure that image gets a default tag if none is provided
+ img, err := reference.WithDigest(namedRef, dgst)
+ if err == nil {
+ return reference.FamiliarString(img)
+ }
+ }
+ }
+ return ""
+}
+
+// imageWithTagString takes an image string, and returns a tagged image
+// string, adding a 'latest' tag if one was not provided. It returns an
+// emptry string if a canonical reference was provided
+func imageWithTagString(image string) string {
+ namedRef, err := reference.ParseNormalizedNamed(image)
+ if err == nil {
+ return reference.FamiliarString(reference.TagNameOnly(namedRef))
+ }
+ return ""
+}
+
+// digestWarning constructs a formatted warning string using the
+// image name that could not be pinned by digest. The formatting
+// is hardcoded, but could me made smarter in the future
+func digestWarning(image string) string {
+ return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image)
+}
+
+func validateServiceSpec(s swarm.ServiceSpec) error {
+ if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil {
+ return errors.New("must not specify both a container spec and a plugin spec in the task template")
+ }
+ if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin {
+ return errors.New("mismatched runtime with plugin spec")
+ }
+ if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) {
+ return errors.New("mismatched runtime with container spec")
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go
new file mode 100644
index 000000000..d7e051e3a
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/service_inspect.go
@@ -0,0 +1,38 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ServiceInspectWithRaw returns the service information and the raw data.
+func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) {
+ query := url.Values{}
+ query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults))
+ serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return swarm.Service{}, nil, serviceNotFoundError{serviceID}
+ }
+ return swarm.Service{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return swarm.Service{}, nil, err
+ }
+
+ var response swarm.Service
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go
new file mode 100644
index 000000000..c29e6d407
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/service_list.go
@@ -0,0 +1,35 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ServiceList returns the list of services.
+func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) {
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/services", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var services []swarm.Service
+ err = json.NewDecoder(resp.body).Decode(&services)
+ ensureReaderClosed(resp)
+ return services, err
+}
diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go
new file mode 100644
index 000000000..24384e3ec
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/service_logs.go
@@ -0,0 +1,52 @@
+package client
+
+import (
+ "io"
+ "net/url"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+ timetypes "github.com/docker/docker/api/types/time"
+)
+
+// ServiceLogs returns the logs generated by a service in an io.ReadCloser.
+// It's up to the caller to close the stream.
+func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
+ query := url.Values{}
+ if options.ShowStdout {
+ query.Set("stdout", "1")
+ }
+
+ if options.ShowStderr {
+ query.Set("stderr", "1")
+ }
+
+ if options.Since != "" {
+ ts, err := timetypes.GetTimestamp(options.Since, time.Now())
+ if err != nil {
+ return nil, err
+ }
+ query.Set("since", ts)
+ }
+
+ if options.Timestamps {
+ query.Set("timestamps", "1")
+ }
+
+ if options.Details {
+ query.Set("details", "1")
+ }
+
+ if options.Follow {
+ query.Set("follow", "1")
+ }
+ query.Set("tail", options.Tail)
+
+ resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go
new file mode 100644
index 000000000..a9331f92c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/service_remove.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// ServiceRemove kills and removes a service.
+func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error {
+ resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go
new file mode 100644
index 000000000..8764f299a
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/service_update.go
@@ -0,0 +1,92 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ServiceUpdate updates a Service.
+func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) {
+ var (
+ query = url.Values{}
+ distErr error
+ )
+
+ headers := map[string][]string{
+ "version": {cli.version},
+ }
+
+ if options.EncodedRegistryAuth != "" {
+ headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth}
+ }
+
+ if options.RegistryAuthFrom != "" {
+ query.Set("registryAuthFrom", options.RegistryAuthFrom)
+ }
+
+ if options.Rollback != "" {
+ query.Set("rollback", options.Rollback)
+ }
+
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+
+ if err := validateServiceSpec(service); err != nil {
+ return types.ServiceUpdateResponse{}, err
+ }
+
+ var imgPlatforms []swarm.Platform
+ // ensure that the image is tagged
+ if service.TaskTemplate.ContainerSpec != nil {
+ if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" {
+ service.TaskTemplate.ContainerSpec.Image = taggedImg
+ }
+ if options.QueryRegistry {
+ var img string
+ img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth)
+ if img != "" {
+ service.TaskTemplate.ContainerSpec.Image = img
+ }
+ }
+ }
+
+ // ensure that the image is tagged
+ if service.TaskTemplate.PluginSpec != nil {
+ if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" {
+ service.TaskTemplate.PluginSpec.Remote = taggedImg
+ }
+ if options.QueryRegistry {
+ var img string
+ img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth)
+ if img != "" {
+ service.TaskTemplate.PluginSpec.Remote = img
+ }
+ }
+ }
+
+ if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 {
+ service.TaskTemplate.Placement = &swarm.Placement{}
+ }
+ if len(imgPlatforms) > 0 {
+ service.TaskTemplate.Placement.Platforms = imgPlatforms
+ }
+
+ var response types.ServiceUpdateResponse
+ resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+
+ if distErr != nil {
+ response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image))
+ }
+
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/session.go b/vendor/github.com/docker/docker/client/session.go
new file mode 100644
index 000000000..8ee916213
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/session.go
@@ -0,0 +1,19 @@
+package client
+
+import (
+ "net"
+ "net/http"
+
+ "golang.org/x/net/context"
+)
+
+// DialSession returns a connection that can be used communication with daemon
+func (cli *Client) DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) {
+ req, err := http.NewRequest("POST", "/session", nil)
+ if err != nil {
+ return nil, err
+ }
+ req = cli.addHeaders(req, meta)
+
+ return cli.setupHijackConn(req, proto)
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go
new file mode 100644
index 000000000..be28d3262
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// SwarmGetUnlockKey retrieves the swarm's unlock key.
+func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) {
+ serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil)
+ if err != nil {
+ return types.SwarmUnlockKeyResponse{}, err
+ }
+
+ var response types.SwarmUnlockKeyResponse
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go
new file mode 100644
index 000000000..9e65e1cca
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_init.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmInit initializes the swarm.
+func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) {
+ serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil)
+ if err != nil {
+ return "", err
+ }
+
+ var response string
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go
new file mode 100644
index 000000000..77e72f846
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_inspect.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmInspect inspects the swarm.
+func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) {
+ serverResp, err := cli.get(ctx, "/swarm", nil, nil)
+ if err != nil {
+ return swarm.Swarm{}, err
+ }
+
+ var response swarm.Swarm
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go
new file mode 100644
index 000000000..19e5192b9
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_join.go
@@ -0,0 +1,13 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmJoin joins the swarm.
+func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error {
+ resp, err := cli.post(ctx, "/swarm/join", nil, req, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go
new file mode 100644
index 000000000..3a205cf3b
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_leave.go
@@ -0,0 +1,18 @@
+package client
+
+import (
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// SwarmLeave leaves the swarm.
+func (cli *Client) SwarmLeave(ctx context.Context, force bool) error {
+ query := url.Values{}
+ if force {
+ query.Set("force", "1")
+ }
+ resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go
new file mode 100644
index 000000000..9ee441fed
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_unlock.go
@@ -0,0 +1,13 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmUnlock unlocks locked swarm.
+func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error {
+ serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil)
+ ensureReaderClosed(serverResp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go
new file mode 100644
index 000000000..7245fd4e3
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_update.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "fmt"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmUpdate updates the swarm.
+func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error {
+ query := url.Values{}
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+ query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken))
+ query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken))
+ query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey))
+ resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go
new file mode 100644
index 000000000..bc8058fc3
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/task_inspect.go
@@ -0,0 +1,34 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types/swarm"
+
+ "golang.org/x/net/context"
+)
+
+// TaskInspectWithRaw returns the task information and its raw representation..
+func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) {
+ serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return swarm.Task{}, nil, taskNotFoundError{taskID}
+ }
+ return swarm.Task{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return swarm.Task{}, nil, err
+ }
+
+ var response swarm.Task
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go
new file mode 100644
index 000000000..66324da95
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/task_list.go
@@ -0,0 +1,35 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// TaskList returns the list of tasks.
+func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) {
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/tasks", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var tasks []swarm.Task
+ err = json.NewDecoder(resp.body).Decode(&tasks)
+ ensureReaderClosed(resp)
+ return tasks, err
+}
diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go
new file mode 100644
index 000000000..2ed19543a
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/task_logs.go
@@ -0,0 +1,52 @@
+package client
+
+import (
+ "io"
+ "net/url"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+ timetypes "github.com/docker/docker/api/types/time"
+)
+
+// TaskLogs returns the logs generated by a task in an io.ReadCloser.
+// It's up to the caller to close the stream.
+func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
+ query := url.Values{}
+ if options.ShowStdout {
+ query.Set("stdout", "1")
+ }
+
+ if options.ShowStderr {
+ query.Set("stderr", "1")
+ }
+
+ if options.Since != "" {
+ ts, err := timetypes.GetTimestamp(options.Since, time.Now())
+ if err != nil {
+ return nil, err
+ }
+ query.Set("since", ts)
+ }
+
+ if options.Timestamps {
+ query.Set("timestamps", "1")
+ }
+
+ if options.Details {
+ query.Set("details", "1")
+ }
+
+ if options.Follow {
+ query.Set("follow", "1")
+ }
+ query.Set("tail", options.Tail)
+
+ resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go
new file mode 100644
index 000000000..401ab15d3
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/transport.go
@@ -0,0 +1,25 @@
+package client
+
+import (
+ "crypto/tls"
+ "net/http"
+)
+
+// transportFunc allows us to inject a mock transport for testing. We define it
+// here so we can detect the tlsconfig and return nil for only this type.
+type transportFunc func(*http.Request) (*http.Response, error)
+
+func (tf transportFunc) RoundTrip(req *http.Request) (*http.Response, error) {
+ return tf(req)
+}
+
+// resolveTLSConfig attempts to resolve the TLS configuration from the
+// RoundTripper.
+func resolveTLSConfig(transport http.RoundTripper) *tls.Config {
+ switch tr := transport.(type) {
+ case *http.Transport:
+ return tr.TLSClientConfig
+ default:
+ return nil
+ }
+}
diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go
new file mode 100644
index 000000000..f3d8877df
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/utils.go
@@ -0,0 +1,34 @@
+package client
+
+import (
+ "net/url"
+ "regexp"
+
+ "github.com/docker/docker/api/types/filters"
+)
+
+var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`)
+
+// getDockerOS returns the operating system based on the server header from the daemon.
+func getDockerOS(serverHeader string) string {
+ var osType string
+ matches := headerRegexp.FindStringSubmatch(serverHeader)
+ if len(matches) > 0 {
+ osType = matches[1]
+ }
+ return osType
+}
+
+// getFiltersQuery returns a url query with "filters" query term, based on the
+// filters provided.
+func getFiltersQuery(f filters.Args) (url.Values, error) {
+ query := url.Values{}
+ if f.Len() > 0 {
+ filterJSON, err := filters.ToParam(f)
+ if err != nil {
+ return query, err
+ }
+ query.Set("filters", filterJSON)
+ }
+ return query, nil
+}
diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go
new file mode 100644
index 000000000..933ceb4a4
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/version.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ServerVersion returns information of the docker client and server host.
+func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) {
+ resp, err := cli.get(ctx, "/version", nil, nil)
+ if err != nil {
+ return types.Version{}, err
+ }
+
+ var server types.Version
+ err = json.NewDecoder(resp.body).Decode(&server)
+ ensureReaderClosed(resp)
+ return server, err
+}
diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go
new file mode 100644
index 000000000..9620c87cb
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/volume_create.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ volumetypes "github.com/docker/docker/api/types/volume"
+ "golang.org/x/net/context"
+)
+
+// VolumeCreate creates a volume in the docker host.
+func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) {
+ var volume types.Volume
+ resp, err := cli.post(ctx, "/volumes/create", nil, options, nil)
+ if err != nil {
+ return volume, err
+ }
+ err = json.NewDecoder(resp.body).Decode(&volume)
+ ensureReaderClosed(resp)
+ return volume, err
+}
diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go
new file mode 100644
index 000000000..3860e9b22
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/volume_inspect.go
@@ -0,0 +1,38 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// VolumeInspect returns the information about a specific volume in the docker host.
+func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) {
+ volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID)
+ return volume, err
+}
+
+// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation
+func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) {
+ var volume types.Volume
+ resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return volume, nil, volumeNotFoundError{volumeID}
+ }
+ return volume, nil, err
+ }
+ defer ensureReaderClosed(resp)
+
+ body, err := ioutil.ReadAll(resp.body)
+ if err != nil {
+ return volume, nil, err
+ }
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&volume)
+ return volume, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go
new file mode 100644
index 000000000..32247ce11
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/volume_list.go
@@ -0,0 +1,32 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types/filters"
+ volumetypes "github.com/docker/docker/api/types/volume"
+ "golang.org/x/net/context"
+)
+
+// VolumeList returns the volumes configured in the docker host.
+func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) {
+ var volumes volumetypes.VolumesListOKBody
+ query := url.Values{}
+
+ if filter.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, filter)
+ if err != nil {
+ return volumes, err
+ }
+ query.Set("filters", filterJSON)
+ }
+ resp, err := cli.get(ctx, "/volumes", query, nil)
+ if err != nil {
+ return volumes, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&volumes)
+ ensureReaderClosed(resp)
+ return volumes, err
+}
diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go
new file mode 100644
index 000000000..2e7fea774
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/volume_prune.go
@@ -0,0 +1,36 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// VolumesPrune requests the daemon to delete unused data
+func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) {
+ var report types.VolumesPruneReport
+
+ if err := cli.NewVersionError("1.25", "volume prune"); err != nil {
+ return report, err
+ }
+
+ query, err := getFiltersQuery(pruneFilters)
+ if err != nil {
+ return report, err
+ }
+
+ serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil)
+ if err != nil {
+ return report, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
+ return report, fmt.Errorf("Error retrieving volume prune report: %v", err)
+ }
+
+ return report, nil
+}
diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go
new file mode 100644
index 000000000..6c26575b4
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/volume_remove.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types/versions"
+ "golang.org/x/net/context"
+)
+
+// VolumeRemove removes a volume from the docker host.
+func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error {
+ query := url.Values{}
+ if versions.GreaterThanOrEqualTo(cli.version, "1.25") {
+ if force {
+ query.Set("force", "1")
+ }
+ }
+ resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/README.md b/vendor/github.com/docker/docker/pkg/README.md
new file mode 100644
index 000000000..c4b78a8ad
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/README.md
@@ -0,0 +1,11 @@
+pkg/ is a collection of utility packages used by the Docker project without being specific to its internals.
+
+Utility packages are kept separate from the docker core codebase to keep it as small and concise as possible.
+If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the
+Docker organization, to facilitate re-use by other projects. However that is not the priority.
+
+The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core
+Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad!
+
+Because utility packages are small and neatly separated from the rest of the codebase, they are a good
+place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them!
diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go
new file mode 100644
index 000000000..012fe52a2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go
@@ -0,0 +1,23 @@
+// +build linux
+
+package homedir
+
+import (
+ "os"
+
+ "github.com/docker/docker/pkg/idtools"
+)
+
+// GetStatic returns the home directory for the current user without calling
+// os/user.Current(). This is useful for static-linked binary on glibc-based
+// system, because a call to os/user.Current() in a static binary leads to
+// segfault due to a glibc issue that won't be fixed in a short term.
+// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341)
+func GetStatic() (string, error) {
+ uid := os.Getuid()
+ usr, err := idtools.LookupUID(uid)
+ if err != nil {
+ return "", err
+ }
+ return usr.Home, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
new file mode 100644
index 000000000..6b96b856f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
@@ -0,0 +1,13 @@
+// +build !linux
+
+package homedir
+
+import (
+ "errors"
+)
+
+// GetStatic is not needed for non-linux systems.
+// (Precisely, it is needed only for glibc-based linux systems.)
+func GetStatic() (string, error) {
+ return "", errors.New("homedir.GetStatic() is not supported on this system")
+}
diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
new file mode 100644
index 000000000..f2a20ea8f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
@@ -0,0 +1,34 @@
+// +build !windows
+
+package homedir
+
+import (
+ "os"
+
+ "github.com/opencontainers/runc/libcontainer/user"
+)
+
+// Key returns the env var name for the user's home dir based on
+// the platform being run on
+func Key() string {
+ return "HOME"
+}
+
+// Get returns the home directory of the current user with the help of
+// environment variables depending on the target operating system.
+// Returned path should be used with "path/filepath" to form new paths.
+func Get() string {
+ home := os.Getenv(Key())
+ if home == "" {
+ if u, err := user.CurrentUser(); err == nil {
+ return u.Home
+ }
+ }
+ return home
+}
+
+// GetShortcutString returns the string that is shortcut to user's home directory
+// in the native shell of the platform running on.
+func GetShortcutString() string {
+ return "~"
+}
diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go
new file mode 100644
index 000000000..fafdb2bbf
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go
@@ -0,0 +1,24 @@
+package homedir
+
+import (
+ "os"
+)
+
+// Key returns the env var name for the user's home dir based on
+// the platform being run on
+func Key() string {
+ return "USERPROFILE"
+}
+
+// Get returns the home directory of the current user with the help of
+// environment variables depending on the target operating system.
+// Returned path should be used with "path/filepath" to form new paths.
+func Get() string {
+ return os.Getenv(Key())
+}
+
+// GetShortcutString returns the string that is shortcut to user's home directory
+// in the native shell of the platform running on.
+func GetShortcutString() string {
+ return "%USERPROFILE%" // be careful while using in format functions
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
new file mode 100644
index 000000000..68a072db2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
@@ -0,0 +1,279 @@
+package idtools
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// IDMap contains a single entry for user namespace range remapping. An array
+// of IDMap entries represents the structure that will be provided to the Linux
+// kernel for creating a user namespace.
+type IDMap struct {
+ ContainerID int `json:"container_id"`
+ HostID int `json:"host_id"`
+ Size int `json:"size"`
+}
+
+type subIDRange struct {
+ Start int
+ Length int
+}
+
+type ranges []subIDRange
+
+func (e ranges) Len() int { return len(e) }
+func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
+func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
+
+const (
+ subuidFileName string = "/etc/subuid"
+ subgidFileName string = "/etc/subgid"
+)
+
+// MkdirAllAs creates a directory (include any along the path) and then modifies
+// ownership to the requested uid/gid. If the directory already exists, this
+// function will still change ownership to the requested uid/gid pair.
+// Deprecated: Use MkdirAllAndChown
+func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
+ return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
+}
+
+// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
+// If the directory already exists, this function still changes ownership
+// Deprecated: Use MkdirAndChown with a IDPair
+func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
+ return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
+}
+
+// MkdirAllAndChown creates a directory (include any along the path) and then modifies
+// ownership to the requested uid/gid. If the directory already exists, this
+// function will still change ownership to the requested uid/gid pair.
+func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error {
+ return mkdirAs(path, mode, ids.UID, ids.GID, true, true)
+}
+
+// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
+// If the directory already exists, this function still changes ownership
+func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error {
+ return mkdirAs(path, mode, ids.UID, ids.GID, false, true)
+}
+
+// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies
+// ownership ONLY of newly created directories to the requested uid/gid. If the
+// directories along the path exist, no change of ownership will be performed
+func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error {
+ return mkdirAs(path, mode, ids.UID, ids.GID, true, false)
+}
+
+// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
+// If the maps are empty, then the root uid/gid will default to "real" 0/0
+func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
+ uid, err := toHost(0, uidMap)
+ if err != nil {
+ return -1, -1, err
+ }
+ gid, err := toHost(0, gidMap)
+ if err != nil {
+ return -1, -1, err
+ }
+ return uid, gid, nil
+}
+
+// toContainer takes an id mapping, and uses it to translate a
+// host ID to the remapped ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id
+func toContainer(hostID int, idMap []IDMap) (int, error) {
+ if idMap == nil {
+ return hostID, nil
+ }
+ for _, m := range idMap {
+ if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
+ contID := m.ContainerID + (hostID - m.HostID)
+ return contID, nil
+ }
+ }
+ return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
+}
+
+// toHost takes an id mapping and a remapped ID, and translates the
+// ID to the mapped host ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id #
+func toHost(contID int, idMap []IDMap) (int, error) {
+ if idMap == nil {
+ return contID, nil
+ }
+ for _, m := range idMap {
+ if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
+ hostID := m.HostID + (contID - m.ContainerID)
+ return hostID, nil
+ }
+ }
+ return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
+}
+
+// IDPair is a UID and GID pair
+type IDPair struct {
+ UID int
+ GID int
+}
+
+// IDMappings contains a mappings of UIDs and GIDs
+type IDMappings struct {
+ uids []IDMap
+ gids []IDMap
+}
+
+// NewIDMappings takes a requested user and group name and
+// using the data from /etc/sub{uid,gid} ranges, creates the
+// proper uid and gid remapping ranges for that user/group pair
+func NewIDMappings(username, groupname string) (*IDMappings, error) {
+ subuidRanges, err := parseSubuid(username)
+ if err != nil {
+ return nil, err
+ }
+ subgidRanges, err := parseSubgid(groupname)
+ if err != nil {
+ return nil, err
+ }
+ if len(subuidRanges) == 0 {
+ return nil, fmt.Errorf("No subuid ranges found for user %q", username)
+ }
+ if len(subgidRanges) == 0 {
+ return nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
+ }
+
+ return &IDMappings{
+ uids: createIDMap(subuidRanges),
+ gids: createIDMap(subgidRanges),
+ }, nil
+}
+
+// NewIDMappingsFromMaps creates a new mapping from two slices
+// Deprecated: this is a temporary shim while transitioning to IDMapping
+func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings {
+ return &IDMappings{uids: uids, gids: gids}
+}
+
+// RootPair returns a uid and gid pair for the root user. The error is ignored
+// because a root user always exists, and the defaults are correct when the uid
+// and gid maps are empty.
+func (i *IDMappings) RootPair() IDPair {
+ uid, gid, _ := GetRootUIDGID(i.uids, i.gids)
+ return IDPair{UID: uid, GID: gid}
+}
+
+// ToHost returns the host UID and GID for the container uid, gid.
+// Remapping is only performed if the ids aren't already the remapped root ids
+func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) {
+ var err error
+ target := i.RootPair()
+
+ if pair.UID != target.UID {
+ target.UID, err = toHost(pair.UID, i.uids)
+ if err != nil {
+ return target, err
+ }
+ }
+
+ if pair.GID != target.GID {
+ target.GID, err = toHost(pair.GID, i.gids)
+ }
+ return target, err
+}
+
+// ToContainer returns the container UID and GID for the host uid and gid
+func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) {
+ uid, err := toContainer(pair.UID, i.uids)
+ if err != nil {
+ return -1, -1, err
+ }
+ gid, err := toContainer(pair.GID, i.gids)
+ return uid, gid, err
+}
+
+// Empty returns true if there are no id mappings
+func (i *IDMappings) Empty() bool {
+ return len(i.uids) == 0 && len(i.gids) == 0
+}
+
+// UIDs return the UID mapping
+// TODO: remove this once everything has been refactored to use pairs
+func (i *IDMappings) UIDs() []IDMap {
+ return i.uids
+}
+
+// GIDs return the UID mapping
+// TODO: remove this once everything has been refactored to use pairs
+func (i *IDMappings) GIDs() []IDMap {
+ return i.gids
+}
+
+func createIDMap(subidRanges ranges) []IDMap {
+ idMap := []IDMap{}
+
+ // sort the ranges by lowest ID first
+ sort.Sort(subidRanges)
+ containerID := 0
+ for _, idrange := range subidRanges {
+ idMap = append(idMap, IDMap{
+ ContainerID: containerID,
+ HostID: idrange.Start,
+ Size: idrange.Length,
+ })
+ containerID = containerID + idrange.Length
+ }
+ return idMap
+}
+
+func parseSubuid(username string) (ranges, error) {
+ return parseSubidFile(subuidFileName, username)
+}
+
+func parseSubgid(username string) (ranges, error) {
+ return parseSubidFile(subgidFileName, username)
+}
+
+// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
+// and return all found ranges for a specified username. If the special value
+// "ALL" is supplied for username, then all ranges in the file will be returned
+func parseSubidFile(path, username string) (ranges, error) {
+ var rangeList ranges
+
+ subidFile, err := os.Open(path)
+ if err != nil {
+ return rangeList, err
+ }
+ defer subidFile.Close()
+
+ s := bufio.NewScanner(subidFile)
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return rangeList, err
+ }
+
+ text := strings.TrimSpace(s.Text())
+ if text == "" || strings.HasPrefix(text, "#") {
+ continue
+ }
+ parts := strings.Split(text, ":")
+ if len(parts) != 3 {
+ return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
+ }
+ if parts[0] == username || username == "ALL" {
+ startid, err := strconv.Atoi(parts[1])
+ if err != nil {
+ return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ }
+ length, err := strconv.Atoi(parts[2])
+ if err != nil {
+ return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ }
+ rangeList = append(rangeList, subIDRange{startid, length})
+ }
+ }
+ return rangeList, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
new file mode 100644
index 000000000..8701bb7fa
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
@@ -0,0 +1,204 @@
+// +build !windows
+
+package idtools
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/docker/docker/pkg/system"
+ "github.com/opencontainers/runc/libcontainer/user"
+)
+
+var (
+ entOnce sync.Once
+ getentCmd string
+)
+
+func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
+ // make an array containing the original path asked for, plus (for mkAll == true)
+ // all path components leading up to the complete path that don't exist before we MkdirAll
+ // so that we can chown all of them properly at the end. If chownExisting is false, we won't
+ // chown the full directory path if it exists
+ var paths []string
+ if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
+ paths = []string{path}
+ } else if err == nil && chownExisting {
+ // short-circuit--we were called with an existing directory and chown was requested
+ return os.Chown(path, ownerUID, ownerGID)
+ } else if err == nil {
+ // nothing to do; directory path fully exists already and chown was NOT requested
+ return nil
+ }
+
+ if mkAll {
+ // walk back to "/" looking for directories which do not exist
+ // and add them to the paths array for chown after creation
+ dirPath := path
+ for {
+ dirPath = filepath.Dir(dirPath)
+ if dirPath == "/" {
+ break
+ }
+ if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
+ paths = append(paths, dirPath)
+ }
+ }
+ if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) {
+ return err
+ }
+ } else {
+ if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
+ return err
+ }
+ }
+ // even if it existed, we will chown the requested path + any subpaths that
+ // didn't exist when we called MkdirAll
+ for _, pathComponent := range paths {
+ if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
+// if that uid, gid pair has access (execute bit) to the directory
+func CanAccess(path string, pair IDPair) bool {
+ statInfo, err := system.Stat(path)
+ if err != nil {
+ return false
+ }
+ fileMode := os.FileMode(statInfo.Mode())
+ permBits := fileMode.Perm()
+ return accessible(statInfo.UID() == uint32(pair.UID),
+ statInfo.GID() == uint32(pair.GID), permBits)
+}
+
+func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
+ if isOwner && (perms&0100 == 0100) {
+ return true
+ }
+ if isGroup && (perms&0010 == 0010) {
+ return true
+ }
+ if perms&0001 == 0001 {
+ return true
+ }
+ return false
+}
+
+// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupUser(username string) (user.User, error) {
+ // first try a local system files lookup using existing capabilities
+ usr, err := user.LookupUser(username)
+ if err == nil {
+ return usr, nil
+ }
+ // local files lookup failed; attempt to call `getent` to query configured passwd dbs
+ usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username))
+ if err != nil {
+ return user.User{}, err
+ }
+ return usr, nil
+}
+
+// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupUID(uid int) (user.User, error) {
+ // first try a local system files lookup using existing capabilities
+ usr, err := user.LookupUid(uid)
+ if err == nil {
+ return usr, nil
+ }
+ // local files lookup failed; attempt to call `getent` to query configured passwd dbs
+ return getentUser(fmt.Sprintf("%s %d", "passwd", uid))
+}
+
+func getentUser(args string) (user.User, error) {
+ reader, err := callGetent(args)
+ if err != nil {
+ return user.User{}, err
+ }
+ users, err := user.ParsePasswd(reader)
+ if err != nil {
+ return user.User{}, err
+ }
+ if len(users) == 0 {
+ return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1])
+ }
+ return users[0], nil
+}
+
+// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupGroup(groupname string) (user.Group, error) {
+ // first try a local system files lookup using existing capabilities
+ group, err := user.LookupGroup(groupname)
+ if err == nil {
+ return group, nil
+ }
+ // local files lookup failed; attempt to call `getent` to query configured group dbs
+ return getentGroup(fmt.Sprintf("%s %s", "group", groupname))
+}
+
+// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupGID(gid int) (user.Group, error) {
+ // first try a local system files lookup using existing capabilities
+ group, err := user.LookupGid(gid)
+ if err == nil {
+ return group, nil
+ }
+ // local files lookup failed; attempt to call `getent` to query configured group dbs
+ return getentGroup(fmt.Sprintf("%s %d", "group", gid))
+}
+
+func getentGroup(args string) (user.Group, error) {
+ reader, err := callGetent(args)
+ if err != nil {
+ return user.Group{}, err
+ }
+ groups, err := user.ParseGroup(reader)
+ if err != nil {
+ return user.Group{}, err
+ }
+ if len(groups) == 0 {
+ return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1])
+ }
+ return groups[0], nil
+}
+
+func callGetent(args string) (io.Reader, error) {
+ entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") })
+ // if no `getent` command on host, can't do anything else
+ if getentCmd == "" {
+ return nil, fmt.Errorf("")
+ }
+ out, err := execCmd(getentCmd, args)
+ if err != nil {
+ exitCode, errC := system.GetExitCode(err)
+ if errC != nil {
+ return nil, err
+ }
+ switch exitCode {
+ case 1:
+ return nil, fmt.Errorf("getent reported invalid parameters/database unknown")
+ case 2:
+ terms := strings.Split(args, " ")
+ return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0])
+ case 3:
+ return nil, fmt.Errorf("getent database doesn't support enumeration")
+ default:
+ return nil, err
+ }
+
+ }
+ return bytes.NewReader(out), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
new file mode 100644
index 000000000..45d2878e3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
@@ -0,0 +1,25 @@
+// +build windows
+
+package idtools
+
+import (
+ "os"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+// Platforms such as Windows do not support the UID/GID concept. So make this
+// just a wrapper around system.MkdirAll.
+func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
+ if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) {
+ return err
+ }
+ return nil
+}
+
+// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
+// if that uid, gid pair has access (execute bit) to the directory
+// Windows does not require/support this function, so always return true
+func CanAccess(path string, pair IDPair) bool {
+ return true
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
new file mode 100644
index 000000000..9da7975e2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
@@ -0,0 +1,164 @@
+package idtools
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// add a user and/or group to Linux /etc/passwd, /etc/group using standard
+// Linux distribution commands:
+// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group <username>
+// useradd -r -s /bin/false <username>
+
+var (
+ once sync.Once
+ userCommand string
+
+ cmdTemplates = map[string]string{
+ "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
+ "useradd": "-r -s /bin/false %s",
+ "usermod": "-%s %d-%d %s",
+ }
+
+ idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
+ // default length for a UID/GID subordinate range
+ defaultRangeLen = 65536
+ defaultRangeStart = 100000
+ userMod = "usermod"
+)
+
+// AddNamespaceRangesUser takes a username and uses the standard system
+// utility to create a system user/group pair used to hold the
+// /etc/sub{uid,gid} ranges which will be used for user namespace
+// mapping ranges in containers.
+func AddNamespaceRangesUser(name string) (int, int, error) {
+ if err := addUser(name); err != nil {
+ return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
+ }
+
+ // Query the system for the created uid and gid pair
+ out, err := execCmd("id", name)
+ if err != nil {
+ return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
+ }
+ matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
+ if len(matches) != 3 {
+ return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
+ }
+ uid, err := strconv.Atoi(matches[1])
+ if err != nil {
+ return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
+ }
+ gid, err := strconv.Atoi(matches[2])
+ if err != nil {
+ return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
+ }
+
+ // Now we need to create the subuid/subgid ranges for our new user/group (system users
+ // do not get auto-created ranges in subuid/subgid)
+
+ if err := createSubordinateRanges(name); err != nil {
+ return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
+ }
+ return uid, gid, nil
+}
+
+func addUser(userName string) error {
+ once.Do(func() {
+ // set up which commands are used for adding users/groups dependent on distro
+ if _, err := resolveBinary("adduser"); err == nil {
+ userCommand = "adduser"
+ } else if _, err := resolveBinary("useradd"); err == nil {
+ userCommand = "useradd"
+ }
+ })
+ if userCommand == "" {
+ return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
+ }
+ args := fmt.Sprintf(cmdTemplates[userCommand], userName)
+ out, err := execCmd(userCommand, args)
+ if err != nil {
+ return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
+ }
+ return nil
+}
+
+func createSubordinateRanges(name string) error {
+
+ // first, we should verify that ranges weren't automatically created
+ // by the distro tooling
+ ranges, err := parseSubuid(name)
+ if err != nil {
+ return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
+ }
+ if len(ranges) == 0 {
+ // no UID ranges; let's create one
+ startID, err := findNextUIDRange()
+ if err != nil {
+ return fmt.Errorf("Can't find available subuid range: %v", err)
+ }
+ out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
+ if err != nil {
+ return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
+ }
+ }
+
+ ranges, err = parseSubgid(name)
+ if err != nil {
+ return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
+ }
+ if len(ranges) == 0 {
+ // no GID ranges; let's create one
+ startID, err := findNextGIDRange()
+ if err != nil {
+ return fmt.Errorf("Can't find available subgid range: %v", err)
+ }
+ out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
+ if err != nil {
+ return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
+ }
+ }
+ return nil
+}
+
+func findNextUIDRange() (int, error) {
+ ranges, err := parseSubuid("ALL")
+ if err != nil {
+ return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
+ }
+ sort.Sort(ranges)
+ return findNextRangeStart(ranges)
+}
+
+func findNextGIDRange() (int, error) {
+ ranges, err := parseSubgid("ALL")
+ if err != nil {
+ return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
+ }
+ sort.Sort(ranges)
+ return findNextRangeStart(ranges)
+}
+
+func findNextRangeStart(rangeList ranges) (int, error) {
+ startID := defaultRangeStart
+ for _, arange := range rangeList {
+ if wouldOverlap(arange, startID) {
+ startID = arange.Start + arange.Length
+ }
+ }
+ return startID, nil
+}
+
+func wouldOverlap(arange subIDRange, ID int) bool {
+ low := ID
+ high := ID + defaultRangeLen
+ if (low >= arange.Start && low <= arange.Start+arange.Length) ||
+ (high <= arange.Start+arange.Length && high >= arange.Start) {
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
new file mode 100644
index 000000000..d98b354cb
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
@@ -0,0 +1,12 @@
+// +build !linux
+
+package idtools
+
+import "fmt"
+
+// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
+// and calls the appropriate helper function to add the group and then
+// the user to the group in /etc/group and /etc/passwd respectively.
+func AddNamespaceRangesUser(name string) (int, int, error) {
+ return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
new file mode 100644
index 000000000..9703ecbd9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
@@ -0,0 +1,32 @@
+// +build !windows
+
+package idtools
+
+import (
+ "fmt"
+ "os/exec"
+ "path/filepath"
+ "strings"
+)
+
+func resolveBinary(binname string) (string, error) {
+ binaryPath, err := exec.LookPath(binname)
+ if err != nil {
+ return "", err
+ }
+ resolvedPath, err := filepath.EvalSymlinks(binaryPath)
+ if err != nil {
+ return "", err
+ }
+ //only return no error if the final resolved binary basename
+ //matches what was searched for
+ if filepath.Base(resolvedPath) == binname {
+ return resolvedPath, nil
+ }
+ return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
+}
+
+func execCmd(cmd, args string) ([]byte, error) {
+ execCmd := exec.Command(cmd, strings.Split(args, " ")...)
+ return execCmd.CombinedOutput()
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go
new file mode 100644
index 000000000..3d737b3e1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go
@@ -0,0 +1,51 @@
+package ioutils
+
+import (
+ "errors"
+ "io"
+)
+
+var errBufferFull = errors.New("buffer is full")
+
+type fixedBuffer struct {
+ buf []byte
+ pos int
+ lastRead int
+}
+
+func (b *fixedBuffer) Write(p []byte) (int, error) {
+ n := copy(b.buf[b.pos:cap(b.buf)], p)
+ b.pos += n
+
+ if n < len(p) {
+ if b.pos == cap(b.buf) {
+ return n, errBufferFull
+ }
+ return n, io.ErrShortWrite
+ }
+ return n, nil
+}
+
+func (b *fixedBuffer) Read(p []byte) (int, error) {
+ n := copy(p, b.buf[b.lastRead:b.pos])
+ b.lastRead += n
+ return n, nil
+}
+
+func (b *fixedBuffer) Len() int {
+ return b.pos - b.lastRead
+}
+
+func (b *fixedBuffer) Cap() int {
+ return cap(b.buf)
+}
+
+func (b *fixedBuffer) Reset() {
+ b.pos = 0
+ b.lastRead = 0
+ b.buf = b.buf[:0]
+}
+
+func (b *fixedBuffer) String() string {
+ return string(b.buf[b.lastRead:b.pos])
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
new file mode 100644
index 000000000..72a04f349
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
@@ -0,0 +1,186 @@
+package ioutils
+
+import (
+ "errors"
+ "io"
+ "sync"
+)
+
+// maxCap is the highest capacity to use in byte slices that buffer data.
+const maxCap = 1e6
+
+// minCap is the lowest capacity to use in byte slices that buffer data
+const minCap = 64
+
+// blockThreshold is the minimum number of bytes in the buffer which will cause
+// a write to BytesPipe to block when allocating a new slice.
+const blockThreshold = 1e6
+
+var (
+ // ErrClosed is returned when Write is called on a closed BytesPipe.
+ ErrClosed = errors.New("write to closed BytesPipe")
+
+ bufPools = make(map[int]*sync.Pool)
+ bufPoolsLock sync.Mutex
+)
+
+// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
+// All written data may be read at most once. Also, BytesPipe allocates
+// and releases new byte slices to adjust to current needs, so the buffer
+// won't be overgrown after peak loads.
+type BytesPipe struct {
+ mu sync.Mutex
+ wait *sync.Cond
+ buf []*fixedBuffer
+ bufLen int
+ closeErr error // error to return from next Read. set to nil if not closed.
+}
+
+// NewBytesPipe creates new BytesPipe, initialized by specified slice.
+// If buf is nil, then it will be initialized with slice which cap is 64.
+// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
+func NewBytesPipe() *BytesPipe {
+ bp := &BytesPipe{}
+ bp.buf = append(bp.buf, getBuffer(minCap))
+ bp.wait = sync.NewCond(&bp.mu)
+ return bp
+}
+
+// Write writes p to BytesPipe.
+// It can allocate new []byte slices in a process of writing.
+func (bp *BytesPipe) Write(p []byte) (int, error) {
+ bp.mu.Lock()
+
+ written := 0
+loop0:
+ for {
+ if bp.closeErr != nil {
+ bp.mu.Unlock()
+ return written, ErrClosed
+ }
+
+ if len(bp.buf) == 0 {
+ bp.buf = append(bp.buf, getBuffer(64))
+ }
+ // get the last buffer
+ b := bp.buf[len(bp.buf)-1]
+
+ n, err := b.Write(p)
+ written += n
+ bp.bufLen += n
+
+ // errBufferFull is an error we expect to get if the buffer is full
+ if err != nil && err != errBufferFull {
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return written, err
+ }
+
+ // if there was enough room to write all then break
+ if len(p) == n {
+ break
+ }
+
+ // more data: write to the next slice
+ p = p[n:]
+
+ // make sure the buffer doesn't grow too big from this write
+ for bp.bufLen >= blockThreshold {
+ bp.wait.Wait()
+ if bp.closeErr != nil {
+ continue loop0
+ }
+ }
+
+ // add new byte slice to the buffers slice and continue writing
+ nextCap := b.Cap() * 2
+ if nextCap > maxCap {
+ nextCap = maxCap
+ }
+ bp.buf = append(bp.buf, getBuffer(nextCap))
+ }
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return written, nil
+}
+
+// CloseWithError causes further reads from a BytesPipe to return immediately.
+func (bp *BytesPipe) CloseWithError(err error) error {
+ bp.mu.Lock()
+ if err != nil {
+ bp.closeErr = err
+ } else {
+ bp.closeErr = io.EOF
+ }
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return nil
+}
+
+// Close causes further reads from a BytesPipe to return immediately.
+func (bp *BytesPipe) Close() error {
+ return bp.CloseWithError(nil)
+}
+
+// Read reads bytes from BytesPipe.
+// Data could be read only once.
+func (bp *BytesPipe) Read(p []byte) (n int, err error) {
+ bp.mu.Lock()
+ if bp.bufLen == 0 {
+ if bp.closeErr != nil {
+ bp.mu.Unlock()
+ return 0, bp.closeErr
+ }
+ bp.wait.Wait()
+ if bp.bufLen == 0 && bp.closeErr != nil {
+ err := bp.closeErr
+ bp.mu.Unlock()
+ return 0, err
+ }
+ }
+
+ for bp.bufLen > 0 {
+ b := bp.buf[0]
+ read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
+ n += read
+ bp.bufLen -= read
+
+ if b.Len() == 0 {
+ // it's empty so return it to the pool and move to the next one
+ returnBuffer(b)
+ bp.buf[0] = nil
+ bp.buf = bp.buf[1:]
+ }
+
+ if len(p) == read {
+ break
+ }
+
+ p = p[read:]
+ }
+
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return
+}
+
+func returnBuffer(b *fixedBuffer) {
+ b.Reset()
+ bufPoolsLock.Lock()
+ pool := bufPools[b.Cap()]
+ bufPoolsLock.Unlock()
+ if pool != nil {
+ pool.Put(b)
+ }
+}
+
+func getBuffer(size int) *fixedBuffer {
+ bufPoolsLock.Lock()
+ pool, ok := bufPools[size]
+ if !ok {
+ pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
+ bufPools[size] = pool
+ }
+ bufPoolsLock.Unlock()
+ return pool.Get().(*fixedBuffer)
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
new file mode 100644
index 000000000..a56c46265
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
@@ -0,0 +1,162 @@
+package ioutils
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
+// temporary file and closing it atomically changes the temporary file to
+// destination path. Writing and closing concurrently is not allowed.
+func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
+ f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
+ if err != nil {
+ return nil, err
+ }
+
+ abspath, err := filepath.Abs(filename)
+ if err != nil {
+ return nil, err
+ }
+ return &atomicFileWriter{
+ f: f,
+ fn: abspath,
+ perm: perm,
+ }, nil
+}
+
+// AtomicWriteFile atomically writes data to a file named by filename.
+func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
+ f, err := NewAtomicFileWriter(filename, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ f.(*atomicFileWriter).writeErr = err
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+type atomicFileWriter struct {
+ f *os.File
+ fn string
+ writeErr error
+ perm os.FileMode
+}
+
+func (w *atomicFileWriter) Write(dt []byte) (int, error) {
+ n, err := w.f.Write(dt)
+ if err != nil {
+ w.writeErr = err
+ }
+ return n, err
+}
+
+func (w *atomicFileWriter) Close() (retErr error) {
+ defer func() {
+ if retErr != nil || w.writeErr != nil {
+ os.Remove(w.f.Name())
+ }
+ }()
+ if err := w.f.Sync(); err != nil {
+ w.f.Close()
+ return err
+ }
+ if err := w.f.Close(); err != nil {
+ return err
+ }
+ if err := os.Chmod(w.f.Name(), w.perm); err != nil {
+ return err
+ }
+ if w.writeErr == nil {
+ return os.Rename(w.f.Name(), w.fn)
+ }
+ return nil
+}
+
+// AtomicWriteSet is used to atomically write a set
+// of files and ensure they are visible at the same time.
+// Must be committed to a new directory.
+type AtomicWriteSet struct {
+ root string
+}
+
+// NewAtomicWriteSet creates a new atomic write set to
+// atomically create a set of files. The given directory
+// is used as the base directory for storing files before
+// commit. If no temporary directory is given the system
+// default is used.
+func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) {
+ td, err := ioutil.TempDir(tmpDir, "write-set-")
+ if err != nil {
+ return nil, err
+ }
+
+ return &AtomicWriteSet{
+ root: td,
+ }, nil
+}
+
+// WriteFile writes a file to the set, guaranteeing the file
+// has been synced.
+func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error {
+ f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+type syncFileCloser struct {
+ *os.File
+}
+
+func (w syncFileCloser) Close() error {
+ err := w.File.Sync()
+ if err1 := w.File.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+// FileWriter opens a file writer inside the set. The file
+// should be synced and closed before calling commit.
+func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) {
+ f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ return syncFileCloser{f}, nil
+}
+
+// Cancel cancels the set and removes all temporary data
+// created in the set.
+func (ws *AtomicWriteSet) Cancel() error {
+ return os.RemoveAll(ws.root)
+}
+
+// Commit moves all created files to the target directory. The
+// target directory must not exist and the parent of the target
+// directory must exist.
+func (ws *AtomicWriteSet) Commit(target string) error {
+ return os.Rename(ws.root, target)
+}
+
+// String returns the location the set is writing to.
+func (ws *AtomicWriteSet) String() string {
+ return ws.root
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go
new file mode 100644
index 000000000..63f3c07f4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go
@@ -0,0 +1,154 @@
+package ioutils
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "io"
+
+ "golang.org/x/net/context"
+)
+
+type readCloserWrapper struct {
+ io.Reader
+ closer func() error
+}
+
+func (r *readCloserWrapper) Close() error {
+ return r.closer()
+}
+
+// NewReadCloserWrapper returns a new io.ReadCloser.
+func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
+ return &readCloserWrapper{
+ Reader: r,
+ closer: closer,
+ }
+}
+
+type readerErrWrapper struct {
+ reader io.Reader
+ closer func()
+}
+
+func (r *readerErrWrapper) Read(p []byte) (int, error) {
+ n, err := r.reader.Read(p)
+ if err != nil {
+ r.closer()
+ }
+ return n, err
+}
+
+// NewReaderErrWrapper returns a new io.Reader.
+func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
+ return &readerErrWrapper{
+ reader: r,
+ closer: closer,
+ }
+}
+
+// HashData returns the sha256 sum of src.
+func HashData(src io.Reader) (string, error) {
+ h := sha256.New()
+ if _, err := io.Copy(h, src); err != nil {
+ return "", err
+ }
+ return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
+}
+
+// OnEOFReader wraps an io.ReadCloser and a function
+// the function will run at the end of file or close the file.
+type OnEOFReader struct {
+ Rc io.ReadCloser
+ Fn func()
+}
+
+func (r *OnEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.Rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+// Close closes the file and run the function.
+func (r *OnEOFReader) Close() error {
+ err := r.Rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *OnEOFReader) runFunc() {
+ if fn := r.Fn; fn != nil {
+ fn()
+ r.Fn = nil
+ }
+}
+
+// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
+// operations.
+type cancelReadCloser struct {
+ cancel func()
+ pR *io.PipeReader // Stream to read from
+ pW *io.PipeWriter
+}
+
+// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
+// context is cancelled. The returned io.ReadCloser must be closed when it is
+// no longer needed.
+func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
+ pR, pW := io.Pipe()
+
+ // Create a context used to signal when the pipe is closed
+ doneCtx, cancel := context.WithCancel(context.Background())
+
+ p := &cancelReadCloser{
+ cancel: cancel,
+ pR: pR,
+ pW: pW,
+ }
+
+ go func() {
+ _, err := io.Copy(pW, in)
+ select {
+ case <-ctx.Done():
+ // If the context was closed, p.closeWithError
+ // was already called. Calling it again would
+ // change the error that Read returns.
+ default:
+ p.closeWithError(err)
+ }
+ in.Close()
+ }()
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ p.closeWithError(ctx.Err())
+ case <-doneCtx.Done():
+ return
+ }
+ }
+ }()
+
+ return p
+}
+
+// Read wraps the Read method of the pipe that provides data from the wrapped
+// ReadCloser.
+func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
+ return p.pR.Read(buf)
+}
+
+// closeWithError closes the wrapper and its underlying reader. It will
+// cause future calls to Read to return err.
+func (p *cancelReadCloser) closeWithError(err error) {
+ p.pW.CloseWithError(err)
+ p.cancel()
+}
+
+// Close closes the wrapper its underlying reader. It will cause
+// future calls to Read to return io.EOF.
+func (p *cancelReadCloser) Close() error {
+ p.closeWithError(io.EOF)
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
new file mode 100644
index 000000000..1539ad21b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
@@ -0,0 +1,10 @@
+// +build !windows
+
+package ioutils
+
+import "io/ioutil"
+
+// TempDir on Unix systems is equivalent to ioutil.TempDir.
+func TempDir(dir, prefix string) (string, error) {
+ return ioutil.TempDir(dir, prefix)
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
new file mode 100644
index 000000000..c258e5fdd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
@@ -0,0 +1,18 @@
+// +build windows
+
+package ioutils
+
+import (
+ "io/ioutil"
+
+ "github.com/docker/docker/pkg/longpath"
+)
+
+// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
+func TempDir(dir, prefix string) (string, error) {
+ tempDir, err := ioutil.TempDir(dir, prefix)
+ if err != nil {
+ return "", err
+ }
+ return longpath.AddPrefix(tempDir), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
new file mode 100644
index 000000000..52a4901ad
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
@@ -0,0 +1,92 @@
+package ioutils
+
+import (
+ "io"
+ "sync"
+)
+
+// WriteFlusher wraps the Write and Flush operation ensuring that every write
+// is a flush. In addition, the Close method can be called to intercept
+// Read/Write calls if the targets lifecycle has already ended.
+type WriteFlusher struct {
+ w io.Writer
+ flusher flusher
+ flushed chan struct{}
+ flushedOnce sync.Once
+ closed chan struct{}
+ closeLock sync.Mutex
+}
+
+type flusher interface {
+ Flush()
+}
+
+var errWriteFlusherClosed = io.EOF
+
+func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
+ select {
+ case <-wf.closed:
+ return 0, errWriteFlusherClosed
+ default:
+ }
+
+ n, err = wf.w.Write(b)
+ wf.Flush() // every write is a flush.
+ return n, err
+}
+
+// Flush the stream immediately.
+func (wf *WriteFlusher) Flush() {
+ select {
+ case <-wf.closed:
+ return
+ default:
+ }
+
+ wf.flushedOnce.Do(func() {
+ close(wf.flushed)
+ })
+ wf.flusher.Flush()
+}
+
+// Flushed returns the state of flushed.
+// If it's flushed, return true, or else it return false.
+func (wf *WriteFlusher) Flushed() bool {
+ // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
+ // be used to detect whether or a response code has been issued or not.
+ // Another hook should be used instead.
+ var flushed bool
+ select {
+ case <-wf.flushed:
+ flushed = true
+ default:
+ }
+ return flushed
+}
+
+// Close closes the write flusher, disallowing any further writes to the
+// target. After the flusher is closed, all calls to write or flush will
+// result in an error.
+func (wf *WriteFlusher) Close() error {
+ wf.closeLock.Lock()
+ defer wf.closeLock.Unlock()
+
+ select {
+ case <-wf.closed:
+ return errWriteFlusherClosed
+ default:
+ close(wf.closed)
+ }
+ return nil
+}
+
+// NewWriteFlusher returns a new WriteFlusher.
+func NewWriteFlusher(w io.Writer) *WriteFlusher {
+ var fl flusher
+ if f, ok := w.(flusher); ok {
+ fl = f
+ } else {
+ fl = &NopFlusher{}
+ }
+ return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go
new file mode 100644
index 000000000..ccc7f9c23
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go
@@ -0,0 +1,66 @@
+package ioutils
+
+import "io"
+
+// NopWriter represents a type which write operation is nop.
+type NopWriter struct{}
+
+func (*NopWriter) Write(buf []byte) (int, error) {
+ return len(buf), nil
+}
+
+type nopWriteCloser struct {
+ io.Writer
+}
+
+func (w *nopWriteCloser) Close() error { return nil }
+
+// NopWriteCloser returns a nopWriteCloser.
+func NopWriteCloser(w io.Writer) io.WriteCloser {
+ return &nopWriteCloser{w}
+}
+
+// NopFlusher represents a type which flush operation is nop.
+type NopFlusher struct{}
+
+// Flush is a nop operation.
+func (f *NopFlusher) Flush() {}
+
+type writeCloserWrapper struct {
+ io.Writer
+ closer func() error
+}
+
+func (r *writeCloserWrapper) Close() error {
+ return r.closer()
+}
+
+// NewWriteCloserWrapper returns a new io.WriteCloser.
+func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
+ return &writeCloserWrapper{
+ Writer: r,
+ closer: closer,
+ }
+}
+
+// WriteCounter wraps a concrete io.Writer and hold a count of the number
+// of bytes written to the writer during a "session".
+// This can be convenient when write return is masked
+// (e.g., json.Encoder.Encode())
+type WriteCounter struct {
+ Count int64
+ Writer io.Writer
+}
+
+// NewWriteCounter returns a new WriteCounter.
+func NewWriteCounter(w io.Writer) *WriteCounter {
+ return &WriteCounter{
+ Writer: w,
+ }
+}
+
+func (wc *WriteCounter) Write(p []byte) (count int, err error) {
+ count, err = wc.Writer.Write(p)
+ wc.Count += int64(count)
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go
new file mode 100644
index 000000000..9b15bfff4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/longpath/longpath.go
@@ -0,0 +1,26 @@
+// longpath introduces some constants and helper functions for handling long paths
+// in Windows, which are expected to be prepended with `\\?\` and followed by either
+// a drive letter, a UNC server\share, or a volume identifier.
+
+package longpath
+
+import (
+ "strings"
+)
+
+// Prefix is the longpath prefix for Windows file paths.
+const Prefix = `\\?\`
+
+// AddPrefix will add the Windows long path prefix to the path provided if
+// it does not already have it.
+func AddPrefix(path string) string {
+ if !strings.HasPrefix(path, Prefix) {
+ if strings.HasPrefix(path, `\\`) {
+ // This is a UNC path, so we need to add 'UNC' to the path as well.
+ path = Prefix + `UNC` + path[1:]
+ } else {
+ path = Prefix + path
+ }
+ }
+ return path
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags.go b/vendor/github.com/docker/docker/pkg/mount/flags.go
new file mode 100644
index 000000000..607dbed43
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/flags.go
@@ -0,0 +1,149 @@
+package mount
+
+import (
+ "fmt"
+ "strings"
+)
+
+var flags = map[string]struct {
+ clear bool
+ flag int
+}{
+ "defaults": {false, 0},
+ "ro": {false, RDONLY},
+ "rw": {true, RDONLY},
+ "suid": {true, NOSUID},
+ "nosuid": {false, NOSUID},
+ "dev": {true, NODEV},
+ "nodev": {false, NODEV},
+ "exec": {true, NOEXEC},
+ "noexec": {false, NOEXEC},
+ "sync": {false, SYNCHRONOUS},
+ "async": {true, SYNCHRONOUS},
+ "dirsync": {false, DIRSYNC},
+ "remount": {false, REMOUNT},
+ "mand": {false, MANDLOCK},
+ "nomand": {true, MANDLOCK},
+ "atime": {true, NOATIME},
+ "noatime": {false, NOATIME},
+ "diratime": {true, NODIRATIME},
+ "nodiratime": {false, NODIRATIME},
+ "bind": {false, BIND},
+ "rbind": {false, RBIND},
+ "unbindable": {false, UNBINDABLE},
+ "runbindable": {false, RUNBINDABLE},
+ "private": {false, PRIVATE},
+ "rprivate": {false, RPRIVATE},
+ "shared": {false, SHARED},
+ "rshared": {false, RSHARED},
+ "slave": {false, SLAVE},
+ "rslave": {false, RSLAVE},
+ "relatime": {false, RELATIME},
+ "norelatime": {true, RELATIME},
+ "strictatime": {false, STRICTATIME},
+ "nostrictatime": {true, STRICTATIME},
+}
+
+var validFlags = map[string]bool{
+ "": true,
+ "size": true,
+ "mode": true,
+ "uid": true,
+ "gid": true,
+ "nr_inodes": true,
+ "nr_blocks": true,
+ "mpol": true,
+}
+
+var propagationFlags = map[string]bool{
+ "bind": true,
+ "rbind": true,
+ "unbindable": true,
+ "runbindable": true,
+ "private": true,
+ "rprivate": true,
+ "shared": true,
+ "rshared": true,
+ "slave": true,
+ "rslave": true,
+}
+
+// MergeTmpfsOptions merge mount options to make sure there is no duplicate.
+func MergeTmpfsOptions(options []string) ([]string, error) {
+ // We use collisions maps to remove duplicates.
+ // For flag, the key is the flag value (the key for propagation flag is -1)
+ // For data=value, the key is the data
+ flagCollisions := map[int]bool{}
+ dataCollisions := map[string]bool{}
+
+ var newOptions []string
+ // We process in reverse order
+ for i := len(options) - 1; i >= 0; i-- {
+ option := options[i]
+ if option == "defaults" {
+ continue
+ }
+ if f, ok := flags[option]; ok && f.flag != 0 {
+ // There is only one propagation mode
+ key := f.flag
+ if propagationFlags[option] {
+ key = -1
+ }
+ // Check to see if there is collision for flag
+ if !flagCollisions[key] {
+ // We prepend the option and add to collision map
+ newOptions = append([]string{option}, newOptions...)
+ flagCollisions[key] = true
+ }
+ continue
+ }
+ opt := strings.SplitN(option, "=", 2)
+ if len(opt) != 2 || !validFlags[opt[0]] {
+ return nil, fmt.Errorf("Invalid tmpfs option %q", opt)
+ }
+ if !dataCollisions[opt[0]] {
+ // We prepend the option and add to collision map
+ newOptions = append([]string{option}, newOptions...)
+ dataCollisions[opt[0]] = true
+ }
+ }
+
+ return newOptions, nil
+}
+
+// Parse fstab type mount options into mount() flags
+// and device specific data
+func parseOptions(options string) (int, string) {
+ var (
+ flag int
+ data []string
+ )
+
+ for _, o := range strings.Split(options, ",") {
+ // If the option does not exist in the flags table or the flag
+ // is not supported on the platform,
+ // then it is a data value for a specific fs type
+ if f, exists := flags[o]; exists && f.flag != 0 {
+ if f.clear {
+ flag &= ^f.flag
+ } else {
+ flag |= f.flag
+ }
+ } else {
+ data = append(data, o)
+ }
+ }
+ return flag, strings.Join(data, ",")
+}
+
+// ParseTmpfsOptions parse fstab type mount options into flags and data
+func ParseTmpfsOptions(options string) (int, string, error) {
+ flags, data := parseOptions(options)
+ for _, o := range strings.Split(data, ",") {
+ opt := strings.SplitN(o, "=", 2)
+ if !validFlags[opt[0]] {
+ return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt)
+ }
+ }
+ return flags, data, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go
new file mode 100644
index 000000000..5f76f331b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go
@@ -0,0 +1,49 @@
+// +build freebsd,cgo
+
+package mount
+
+/*
+#include <sys/mount.h>
+*/
+import "C"
+
+const (
+ // RDONLY will mount the filesystem as read-only.
+ RDONLY = C.MNT_RDONLY
+
+ // NOSUID will not allow set-user-identifier or set-group-identifier bits to
+ // take effect.
+ NOSUID = C.MNT_NOSUID
+
+ // NOEXEC will not allow execution of any binaries on the mounted file system.
+ NOEXEC = C.MNT_NOEXEC
+
+ // SYNCHRONOUS will allow any I/O to the file system to be done synchronously.
+ SYNCHRONOUS = C.MNT_SYNCHRONOUS
+
+ // NOATIME will not update the file access time when reading from a file.
+ NOATIME = C.MNT_NOATIME
+)
+
+// These flags are unsupported.
+const (
+ BIND = 0
+ DIRSYNC = 0
+ MANDLOCK = 0
+ NODEV = 0
+ NODIRATIME = 0
+ UNBINDABLE = 0
+ RUNBINDABLE = 0
+ PRIVATE = 0
+ RPRIVATE = 0
+ SHARED = 0
+ RSHARED = 0
+ SLAVE = 0
+ RSLAVE = 0
+ RBIND = 0
+ RELATIVE = 0
+ RELATIME = 0
+ REMOUNT = 0
+ STRICTATIME = 0
+ mntDetach = 0
+)
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go
new file mode 100644
index 000000000..0425d0dd6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go
@@ -0,0 +1,87 @@
+package mount
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // RDONLY will mount the file system read-only.
+ RDONLY = unix.MS_RDONLY
+
+ // NOSUID will not allow set-user-identifier or set-group-identifier bits to
+ // take effect.
+ NOSUID = unix.MS_NOSUID
+
+ // NODEV will not interpret character or block special devices on the file
+ // system.
+ NODEV = unix.MS_NODEV
+
+ // NOEXEC will not allow execution of any binaries on the mounted file system.
+ NOEXEC = unix.MS_NOEXEC
+
+ // SYNCHRONOUS will allow I/O to the file system to be done synchronously.
+ SYNCHRONOUS = unix.MS_SYNCHRONOUS
+
+ // DIRSYNC will force all directory updates within the file system to be done
+ // synchronously. This affects the following system calls: create, link,
+ // unlink, symlink, mkdir, rmdir, mknod and rename.
+ DIRSYNC = unix.MS_DIRSYNC
+
+ // REMOUNT will attempt to remount an already-mounted file system. This is
+ // commonly used to change the mount flags for a file system, especially to
+ // make a readonly file system writeable. It does not change device or mount
+ // point.
+ REMOUNT = unix.MS_REMOUNT
+
+ // MANDLOCK will force mandatory locks on a filesystem.
+ MANDLOCK = unix.MS_MANDLOCK
+
+ // NOATIME will not update the file access time when reading from a file.
+ NOATIME = unix.MS_NOATIME
+
+ // NODIRATIME will not update the directory access time.
+ NODIRATIME = unix.MS_NODIRATIME
+
+ // BIND remounts a subtree somewhere else.
+ BIND = unix.MS_BIND
+
+ // RBIND remounts a subtree and all possible submounts somewhere else.
+ RBIND = unix.MS_BIND | unix.MS_REC
+
+ // UNBINDABLE creates a mount which cannot be cloned through a bind operation.
+ UNBINDABLE = unix.MS_UNBINDABLE
+
+ // RUNBINDABLE marks the entire mount tree as UNBINDABLE.
+ RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC
+
+ // PRIVATE creates a mount which carries no propagation abilities.
+ PRIVATE = unix.MS_PRIVATE
+
+ // RPRIVATE marks the entire mount tree as PRIVATE.
+ RPRIVATE = unix.MS_PRIVATE | unix.MS_REC
+
+ // SLAVE creates a mount which receives propagation from its master, but not
+ // vice versa.
+ SLAVE = unix.MS_SLAVE
+
+ // RSLAVE marks the entire mount tree as SLAVE.
+ RSLAVE = unix.MS_SLAVE | unix.MS_REC
+
+ // SHARED creates a mount which provides the ability to create mirrors of
+ // that mount such that mounts and unmounts within any of the mirrors
+ // propagate to the other mirrors.
+ SHARED = unix.MS_SHARED
+
+ // RSHARED marks the entire mount tree as SHARED.
+ RSHARED = unix.MS_SHARED | unix.MS_REC
+
+ // RELATIME updates inode access times relative to modify or change time.
+ RELATIME = unix.MS_RELATIME
+
+ // STRICTATIME allows to explicitly request full atime updates. This makes
+ // it possible for the kernel to default to relatime or noatime but still
+ // allow userspace to override it.
+ STRICTATIME = unix.MS_STRICTATIME
+
+ mntDetach = unix.MNT_DETACH
+)
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
new file mode 100644
index 000000000..9ed741e3f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
@@ -0,0 +1,31 @@
+// +build !linux,!freebsd freebsd,!cgo solaris,!cgo
+
+package mount
+
+// These flags are unsupported.
+const (
+ BIND = 0
+ DIRSYNC = 0
+ MANDLOCK = 0
+ NOATIME = 0
+ NODEV = 0
+ NODIRATIME = 0
+ NOEXEC = 0
+ NOSUID = 0
+ UNBINDABLE = 0
+ RUNBINDABLE = 0
+ PRIVATE = 0
+ RPRIVATE = 0
+ SHARED = 0
+ RSHARED = 0
+ SLAVE = 0
+ RSLAVE = 0
+ RBIND = 0
+ RELATIME = 0
+ RELATIVE = 0
+ REMOUNT = 0
+ STRICTATIME = 0
+ SYNCHRONOUS = 0
+ RDONLY = 0
+ mntDetach = 0
+)
diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go
new file mode 100644
index 000000000..c9fdfd694
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mount.go
@@ -0,0 +1,86 @@
+package mount
+
+import (
+ "sort"
+ "strings"
+)
+
+// GetMounts retrieves a list of mounts for the current running process.
+func GetMounts() ([]*Info, error) {
+ return parseMountTable()
+}
+
+// Mounted determines if a specified mountpoint has been mounted.
+// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab.
+func Mounted(mountpoint string) (bool, error) {
+ entries, err := parseMountTable()
+ if err != nil {
+ return false, err
+ }
+
+ // Search the table for the mountpoint
+ for _, e := range entries {
+ if e.Mountpoint == mountpoint {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// Mount will mount filesystem according to the specified configuration, on the
+// condition that the target path is *not* already mounted. Options must be
+// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
+// flags.go for supported option flags.
+func Mount(device, target, mType, options string) error {
+ flag, _ := parseOptions(options)
+ if flag&REMOUNT != REMOUNT {
+ if mounted, err := Mounted(target); err != nil || mounted {
+ return err
+ }
+ }
+ return ForceMount(device, target, mType, options)
+}
+
+// ForceMount will mount a filesystem according to the specified configuration,
+// *regardless* if the target path is not already mounted. Options must be
+// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
+// flags.go for supported option flags.
+func ForceMount(device, target, mType, options string) error {
+ flag, data := parseOptions(options)
+ return mount(device, target, mType, uintptr(flag), data)
+}
+
+// Unmount lazily unmounts a filesystem on supported platforms, otherwise
+// does a normal unmount.
+func Unmount(target string) error {
+ if mounted, err := Mounted(target); err != nil || !mounted {
+ return err
+ }
+ return unmount(target, mntDetach)
+}
+
+// RecursiveUnmount unmounts the target and all mounts underneath, starting with
+// the deepsest mount first.
+func RecursiveUnmount(target string) error {
+ mounts, err := GetMounts()
+ if err != nil {
+ return err
+ }
+
+ // Make the deepest mount be first
+ sort.Sort(sort.Reverse(byMountpoint(mounts)))
+
+ for i, m := range mounts {
+ if !strings.HasPrefix(m.Mountpoint, target) {
+ continue
+ }
+ if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 {
+ if mounted, err := Mounted(m.Mountpoint); err != nil || mounted {
+ return err
+ }
+ // Ignore errors for submounts and continue trying to unmount others
+ // The final unmount should fail if there ane any submounts remaining
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go
new file mode 100644
index 000000000..814896cc9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go
@@ -0,0 +1,60 @@
+package mount
+
+/*
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/_iovec.h>
+#include <sys/mount.h>
+#include <sys/param.h>
+*/
+import "C"
+
+import (
+ "fmt"
+ "strings"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+func allocateIOVecs(options []string) []C.struct_iovec {
+ out := make([]C.struct_iovec, len(options))
+ for i, option := range options {
+ out[i].iov_base = unsafe.Pointer(C.CString(option))
+ out[i].iov_len = C.size_t(len(option) + 1)
+ }
+ return out
+}
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ isNullFS := false
+
+ xs := strings.Split(data, ",")
+ for _, x := range xs {
+ if x == "bind" {
+ isNullFS = true
+ }
+ }
+
+ options := []string{"fspath", target}
+ if isNullFS {
+ options = append(options, "fstype", "nullfs", "target", device)
+ } else {
+ options = append(options, "fstype", mType, "from", device)
+ }
+ rawOptions := allocateIOVecs(options)
+ for _, rawOption := range rawOptions {
+ defer C.free(rawOption.iov_base)
+ }
+
+ if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
+ reason := C.GoString(C.strerror(*C.__error()))
+ return fmt.Errorf("Failed to call nmount: %s", reason)
+ }
+ return nil
+}
+
+func unmount(target string, flag int) error {
+ return unix.Unmount(target, flag)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go
new file mode 100644
index 000000000..39c36d472
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go
@@ -0,0 +1,57 @@
+package mount
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // ptypes is the set propagation types.
+ ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE
+
+ // pflags is the full set valid flags for a change propagation call.
+ pflags = ptypes | unix.MS_REC | unix.MS_SILENT
+
+ // broflags is the combination of bind and read only
+ broflags = unix.MS_BIND | unix.MS_RDONLY
+)
+
+// isremount returns true if either device name or flags identify a remount request, false otherwise.
+func isremount(device string, flags uintptr) bool {
+ switch {
+ // We treat device "" and "none" as a remount request to provide compatibility with
+ // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts.
+ case flags&unix.MS_REMOUNT != 0, device == "", device == "none":
+ return true
+ default:
+ return false
+ }
+}
+
+func mount(device, target, mType string, flags uintptr, data string) error {
+ oflags := flags &^ ptypes
+ if !isremount(device, flags) || data != "" {
+ // Initial call applying all non-propagation flags for mount
+ // or remount with changed data
+ if err := unix.Mount(device, target, mType, oflags, data); err != nil {
+ return err
+ }
+ }
+
+ if flags&ptypes != 0 {
+ // Change the propagation type.
+ if err := unix.Mount("", target, "", flags&pflags, ""); err != nil {
+ return err
+ }
+ }
+
+ if oflags&broflags == broflags {
+ // Remount the bind to apply read only.
+ return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "")
+ }
+
+ return nil
+}
+
+func unmount(target string, flag int) error {
+ return unix.Unmount(target, flag)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go
new file mode 100644
index 000000000..c684aa81f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go
@@ -0,0 +1,33 @@
+// +build solaris,cgo
+
+package mount
+
+import (
+ "golang.org/x/sys/unix"
+ "unsafe"
+)
+
+// #include <stdlib.h>
+// #include <stdio.h>
+// #include <sys/mount.h>
+// int Mount(const char *spec, const char *dir, int mflag,
+// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) {
+// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen);
+// }
+import "C"
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ spec := C.CString(device)
+ dir := C.CString(target)
+ fstype := C.CString(mType)
+ _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0)
+ C.free(unsafe.Pointer(spec))
+ C.free(unsafe.Pointer(dir))
+ C.free(unsafe.Pointer(fstype))
+ return err
+}
+
+func unmount(target string, flag int) error {
+ err := unix.Unmount(target, flag)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
new file mode 100644
index 000000000..a2a3bb457
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
@@ -0,0 +1,11 @@
+// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
+
+package mount
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ panic("Not implemented")
+}
+
+func unmount(target string, flag int) error {
+ panic("Not implemented")
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go
new file mode 100644
index 000000000..ff4cc1d86
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go
@@ -0,0 +1,54 @@
+package mount
+
+// Info reveals information about a particular mounted filesystem. This
+// struct is populated from the content in the /proc/<pid>/mountinfo file.
+type Info struct {
+ // ID is a unique identifier of the mount (may be reused after umount).
+ ID int
+
+ // Parent indicates the ID of the mount parent (or of self for the top of the
+ // mount tree).
+ Parent int
+
+ // Major indicates one half of the device ID which identifies the device class.
+ Major int
+
+ // Minor indicates one half of the device ID which identifies a specific
+ // instance of device.
+ Minor int
+
+ // Root of the mount within the filesystem.
+ Root string
+
+ // Mountpoint indicates the mount point relative to the process's root.
+ Mountpoint string
+
+ // Opts represents mount-specific options.
+ Opts string
+
+ // Optional represents optional fields.
+ Optional string
+
+ // Fstype indicates the type of filesystem, such as EXT3.
+ Fstype string
+
+ // Source indicates filesystem specific information or "none".
+ Source string
+
+ // VfsOpts represents per super block options.
+ VfsOpts string
+}
+
+type byMountpoint []*Info
+
+func (by byMountpoint) Len() int {
+ return len(by)
+}
+
+func (by byMountpoint) Less(i, j int) bool {
+ return by[i].Mountpoint < by[j].Mountpoint
+}
+
+func (by byMountpoint) Swap(i, j int) {
+ by[i], by[j] = by[j], by[i]
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
new file mode 100644
index 000000000..4f32edcd9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
@@ -0,0 +1,41 @@
+package mount
+
+/*
+#include <sys/param.h>
+#include <sys/ucred.h>
+#include <sys/mount.h>
+*/
+import "C"
+
+import (
+ "fmt"
+ "reflect"
+ "unsafe"
+)
+
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
+// bind mounts.
+func parseMountTable() ([]*Info, error) {
+ var rawEntries *C.struct_statfs
+
+ count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
+ if count == 0 {
+ return nil, fmt.Errorf("Failed to call getmntinfo")
+ }
+
+ var entries []C.struct_statfs
+ header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
+ header.Cap = count
+ header.Len = count
+ header.Data = uintptr(unsafe.Pointer(rawEntries))
+
+ var out []*Info
+ for _, entry := range entries {
+ var mountinfo Info
+ mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
+ mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
+ mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
+ out = append(out, &mountinfo)
+ }
+ return out, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go
new file mode 100644
index 000000000..be69fee1d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go
@@ -0,0 +1,95 @@
+// +build linux
+
+package mount
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+const (
+ /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
+ (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
+
+ (1) mount ID: unique identifier of the mount (may be reused after umount)
+ (2) parent ID: ID of parent (or of self for the top of the mount tree)
+ (3) major:minor: value of st_dev for files on filesystem
+ (4) root: root of the mount within the filesystem
+ (5) mount point: mount point relative to the process's root
+ (6) mount options: per mount options
+ (7) optional fields: zero or more fields of the form "tag[:value]"
+ (8) separator: marks the end of the optional fields
+ (9) filesystem type: name of filesystem of the form "type[.subtype]"
+ (10) mount source: filesystem specific information or "none"
+ (11) super options: per super block options*/
+ mountinfoFormat = "%d %d %d:%d %s %s %s %s"
+)
+
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
+// bind mounts
+func parseMountTable() ([]*Info, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseInfoFile(f)
+}
+
+func parseInfoFile(r io.Reader) ([]*Info, error) {
+ var (
+ s = bufio.NewScanner(r)
+ out = []*Info{}
+ )
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ var (
+ p = &Info{}
+ text = s.Text()
+ optionalFields string
+ )
+
+ if _, err := fmt.Sscanf(text, mountinfoFormat,
+ &p.ID, &p.Parent, &p.Major, &p.Minor,
+ &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil {
+ return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
+ }
+ // Safe as mountinfo encodes mountpoints with spaces as \040.
+ index := strings.Index(text, " - ")
+ postSeparatorFields := strings.Fields(text[index+3:])
+ if len(postSeparatorFields) < 3 {
+ return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
+ }
+
+ if optionalFields != "-" {
+ p.Optional = optionalFields
+ }
+
+ p.Fstype = postSeparatorFields[0]
+ p.Source = postSeparatorFields[1]
+ p.VfsOpts = strings.Join(postSeparatorFields[2:], " ")
+ out = append(out, p)
+ }
+ return out, nil
+}
+
+// PidMountInfo collects the mounts for a specific process ID. If the process
+// ID is unknown, it is better to use `GetMounts` which will inspect
+// "/proc/self/mountinfo" instead.
+func PidMountInfo(pid int) ([]*Info, error) {
+ f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseInfoFile(f)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go
new file mode 100644
index 000000000..ad9ab57f8
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go
@@ -0,0 +1,37 @@
+// +build solaris,cgo
+
+package mount
+
+/*
+#include <stdio.h>
+#include <sys/mnttab.h>
+*/
+import "C"
+
+import (
+ "fmt"
+)
+
+func parseMountTable() ([]*Info, error) {
+ mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r"))
+ if mnttab == nil {
+ return nil, fmt.Errorf("Failed to open %s", C.MNTTAB)
+ }
+
+ var out []*Info
+ var mp C.struct_mnttab
+
+ ret := C.getmntent(mnttab, &mp)
+ for ret == 0 {
+ var mountinfo Info
+ mountinfo.Mountpoint = C.GoString(mp.mnt_mountp)
+ mountinfo.Source = C.GoString(mp.mnt_special)
+ mountinfo.Fstype = C.GoString(mp.mnt_fstype)
+ mountinfo.Opts = C.GoString(mp.mnt_mntopts)
+ out = append(out, &mountinfo)
+ ret = C.getmntent(mnttab, &mp)
+ }
+
+ C.fclose(mnttab)
+ return out, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
new file mode 100644
index 000000000..7fbcf1921
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
@@ -0,0 +1,12 @@
+// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
+
+package mount
+
+import (
+ "fmt"
+ "runtime"
+)
+
+func parseMountTable() ([]*Info, error) {
+ return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go
new file mode 100644
index 000000000..dab8a37ed
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go
@@ -0,0 +1,6 @@
+package mount
+
+func parseMountTable() ([]*Info, error) {
+ // Do NOT return an error!
+ return nil, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
new file mode 100644
index 000000000..8ceec84bc
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
@@ -0,0 +1,69 @@
+// +build linux
+
+package mount
+
+// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "shared")
+}
+
+// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rshared")
+}
+
+// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakePrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "private")
+}
+
+// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeRPrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rprivate")
+}
+
+// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "slave")
+}
+
+// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rslave")
+}
+
+// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "unbindable")
+}
+
+// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
+// option enabled. See the supported options in flags.go for further reference.
+func MakeRUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "runbindable")
+}
+
+func ensureMountedAs(mountPoint, options string) error {
+ mounted, err := Mounted(mountPoint)
+ if err != nil {
+ return err
+ }
+
+ if !mounted {
+ if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil {
+ return err
+ }
+ }
+ if _, err = Mounted(mountPoint); err != nil {
+ return err
+ }
+
+ return ForceMount("", mountPoint, "none", options)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go
new file mode 100644
index 000000000..09f6b03cb
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go
@@ -0,0 +1,58 @@
+// +build solaris
+
+package mount
+
+// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "shared")
+}
+
+// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rshared")
+}
+
+// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakePrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "private")
+}
+
+// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeRPrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rprivate")
+}
+
+// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "slave")
+}
+
+// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rslave")
+}
+
+// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "unbindable")
+}
+
+// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
+// option enabled. See the supported options in flags.go for further reference.
+func MakeRUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "runbindable")
+}
+
+func ensureMountedAs(mountPoint, options string) error {
+ // TODO: Solaris does not support bind mounts.
+ // Evaluate lofs and also look at the relevant
+ // mount flags to be supported.
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go
new file mode 100644
index 000000000..6a111a3ba
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/pools/pools.go
@@ -0,0 +1,137 @@
+// Package pools provides a collection of pools which provide various
+// data types with buffers. These can be used to lower the number of
+// memory allocations and reuse buffers.
+//
+// New pools should be added to this package to allow them to be
+// shared across packages.
+//
+// Utility functions which operate on pools should be added to this
+// package to allow them to be reused.
+package pools
+
+import (
+ "bufio"
+ "io"
+ "sync"
+
+ "github.com/docker/docker/pkg/ioutils"
+)
+
+const buffer32K = 32 * 1024
+
+var (
+ // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
+ BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
+ // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
+ BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
+ buffer32KPool = newBufferPoolWithSize(buffer32K)
+)
+
+// BufioReaderPool is a bufio reader that uses sync.Pool.
+type BufioReaderPool struct {
+ pool sync.Pool
+}
+
+// newBufioReaderPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
+ return &BufioReaderPool{
+ pool: sync.Pool{
+ New: func() interface{} { return bufio.NewReaderSize(nil, size) },
+ },
+ }
+}
+
+// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
+func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
+ buf := bufPool.pool.Get().(*bufio.Reader)
+ buf.Reset(r)
+ return buf
+}
+
+// Put puts the bufio.Reader back into the pool.
+func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+type bufferPool struct {
+ pool sync.Pool
+}
+
+func newBufferPoolWithSize(size int) *bufferPool {
+ return &bufferPool{
+ pool: sync.Pool{
+ New: func() interface{} { return make([]byte, size) },
+ },
+ }
+}
+
+func (bp *bufferPool) Get() []byte {
+ return bp.pool.Get().([]byte)
+}
+
+func (bp *bufferPool) Put(b []byte) {
+ bp.pool.Put(b)
+}
+
+// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
+func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
+ buf := buffer32KPool.Get()
+ written, err = io.CopyBuffer(dst, src, buf)
+ buffer32KPool.Put(buf)
+ return
+}
+
+// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
+// into the pool and closes the reader if it's an io.ReadCloser.
+func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
+ return ioutils.NewReadCloserWrapper(r, func() error {
+ if readCloser, ok := r.(io.ReadCloser); ok {
+ readCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
+
+// BufioWriterPool is a bufio writer that uses sync.Pool.
+type BufioWriterPool struct {
+ pool sync.Pool
+}
+
+// newBufioWriterPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
+ return &BufioWriterPool{
+ pool: sync.Pool{
+ New: func() interface{} { return bufio.NewWriterSize(nil, size) },
+ },
+ }
+}
+
+// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
+func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
+ buf := bufPool.pool.Get().(*bufio.Writer)
+ buf.Reset(w)
+ return buf
+}
+
+// Put puts the bufio.Writer back into the pool.
+func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
+// into the pool and closes the writer if it's an io.Writecloser.
+func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
+ return ioutils.NewWriteCloserWrapper(w, func() error {
+ buf.Flush()
+ if writeCloser, ok := w.(io.WriteCloser); ok {
+ writeCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/README.md b/vendor/github.com/docker/docker/pkg/signal/README.md
new file mode 100644
index 000000000..2b237a594
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/README.md
@@ -0,0 +1 @@
+This package provides helper functions for dealing with signals across various operating systems \ No newline at end of file
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal.go b/vendor/github.com/docker/docker/pkg/signal/signal.go
new file mode 100644
index 000000000..68bb77cf5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal.go
@@ -0,0 +1,54 @@
+// Package signal provides helper functions for dealing with signals across
+// various operating systems.
+package signal
+
+import (
+ "fmt"
+ "os"
+ "os/signal"
+ "strconv"
+ "strings"
+ "syscall"
+)
+
+// CatchAll catches all signals and relays them to the specified channel.
+func CatchAll(sigc chan os.Signal) {
+ handledSigs := []os.Signal{}
+ for _, s := range SignalMap {
+ handledSigs = append(handledSigs, s)
+ }
+ signal.Notify(sigc, handledSigs...)
+}
+
+// StopCatch stops catching the signals and closes the specified channel.
+func StopCatch(sigc chan os.Signal) {
+ signal.Stop(sigc)
+ close(sigc)
+}
+
+// ParseSignal translates a string to a valid syscall signal.
+// It returns an error if the signal map doesn't include the given signal.
+func ParseSignal(rawSignal string) (syscall.Signal, error) {
+ s, err := strconv.Atoi(rawSignal)
+ if err == nil {
+ if s == 0 {
+ return -1, fmt.Errorf("Invalid signal: %s", rawSignal)
+ }
+ return syscall.Signal(s), nil
+ }
+ signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")]
+ if !ok {
+ return -1, fmt.Errorf("Invalid signal: %s", rawSignal)
+ }
+ return signal, nil
+}
+
+// ValidSignalForPlatform returns true if a signal is valid on the platform
+func ValidSignalForPlatform(sig syscall.Signal) bool {
+ for _, v := range SignalMap {
+ if v == sig {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go
new file mode 100644
index 000000000..946de87e9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go
@@ -0,0 +1,41 @@
+package signal
+
+import (
+ "syscall"
+)
+
+// SignalMap is a map of Darwin signals.
+var SignalMap = map[string]syscall.Signal{
+ "ABRT": syscall.SIGABRT,
+ "ALRM": syscall.SIGALRM,
+ "BUG": syscall.SIGBUS,
+ "CHLD": syscall.SIGCHLD,
+ "CONT": syscall.SIGCONT,
+ "EMT": syscall.SIGEMT,
+ "FPE": syscall.SIGFPE,
+ "HUP": syscall.SIGHUP,
+ "ILL": syscall.SIGILL,
+ "INFO": syscall.SIGINFO,
+ "INT": syscall.SIGINT,
+ "IO": syscall.SIGIO,
+ "IOT": syscall.SIGIOT,
+ "KILL": syscall.SIGKILL,
+ "PIPE": syscall.SIGPIPE,
+ "PROF": syscall.SIGPROF,
+ "QUIT": syscall.SIGQUIT,
+ "SEGV": syscall.SIGSEGV,
+ "STOP": syscall.SIGSTOP,
+ "SYS": syscall.SIGSYS,
+ "TERM": syscall.SIGTERM,
+ "TRAP": syscall.SIGTRAP,
+ "TSTP": syscall.SIGTSTP,
+ "TTIN": syscall.SIGTTIN,
+ "TTOU": syscall.SIGTTOU,
+ "URG": syscall.SIGURG,
+ "USR1": syscall.SIGUSR1,
+ "USR2": syscall.SIGUSR2,
+ "VTALRM": syscall.SIGVTALRM,
+ "WINCH": syscall.SIGWINCH,
+ "XCPU": syscall.SIGXCPU,
+ "XFSZ": syscall.SIGXFSZ,
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go
new file mode 100644
index 000000000..6b9569bb7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go
@@ -0,0 +1,43 @@
+package signal
+
+import (
+ "syscall"
+)
+
+// SignalMap is a map of FreeBSD signals.
+var SignalMap = map[string]syscall.Signal{
+ "ABRT": syscall.SIGABRT,
+ "ALRM": syscall.SIGALRM,
+ "BUF": syscall.SIGBUS,
+ "CHLD": syscall.SIGCHLD,
+ "CONT": syscall.SIGCONT,
+ "EMT": syscall.SIGEMT,
+ "FPE": syscall.SIGFPE,
+ "HUP": syscall.SIGHUP,
+ "ILL": syscall.SIGILL,
+ "INFO": syscall.SIGINFO,
+ "INT": syscall.SIGINT,
+ "IO": syscall.SIGIO,
+ "IOT": syscall.SIGIOT,
+ "KILL": syscall.SIGKILL,
+ "LWP": syscall.SIGLWP,
+ "PIPE": syscall.SIGPIPE,
+ "PROF": syscall.SIGPROF,
+ "QUIT": syscall.SIGQUIT,
+ "SEGV": syscall.SIGSEGV,
+ "STOP": syscall.SIGSTOP,
+ "SYS": syscall.SIGSYS,
+ "TERM": syscall.SIGTERM,
+ "THR": syscall.SIGTHR,
+ "TRAP": syscall.SIGTRAP,
+ "TSTP": syscall.SIGTSTP,
+ "TTIN": syscall.SIGTTIN,
+ "TTOU": syscall.SIGTTOU,
+ "URG": syscall.SIGURG,
+ "USR1": syscall.SIGUSR1,
+ "USR2": syscall.SIGUSR2,
+ "VTALRM": syscall.SIGVTALRM,
+ "WINCH": syscall.SIGWINCH,
+ "XCPU": syscall.SIGXCPU,
+ "XFSZ": syscall.SIGXFSZ,
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go
new file mode 100644
index 000000000..3594796ca
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go
@@ -0,0 +1,82 @@
+package signal
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ sigrtmin = 34
+ sigrtmax = 64
+)
+
+// SignalMap is a map of Linux signals.
+var SignalMap = map[string]syscall.Signal{
+ "ABRT": unix.SIGABRT,
+ "ALRM": unix.SIGALRM,
+ "BUS": unix.SIGBUS,
+ "CHLD": unix.SIGCHLD,
+ "CLD": unix.SIGCLD,
+ "CONT": unix.SIGCONT,
+ "FPE": unix.SIGFPE,
+ "HUP": unix.SIGHUP,
+ "ILL": unix.SIGILL,
+ "INT": unix.SIGINT,
+ "IO": unix.SIGIO,
+ "IOT": unix.SIGIOT,
+ "KILL": unix.SIGKILL,
+ "PIPE": unix.SIGPIPE,
+ "POLL": unix.SIGPOLL,
+ "PROF": unix.SIGPROF,
+ "PWR": unix.SIGPWR,
+ "QUIT": unix.SIGQUIT,
+ "SEGV": unix.SIGSEGV,
+ "STKFLT": unix.SIGSTKFLT,
+ "STOP": unix.SIGSTOP,
+ "SYS": unix.SIGSYS,
+ "TERM": unix.SIGTERM,
+ "TRAP": unix.SIGTRAP,
+ "TSTP": unix.SIGTSTP,
+ "TTIN": unix.SIGTTIN,
+ "TTOU": unix.SIGTTOU,
+ "UNUSED": unix.SIGUNUSED,
+ "URG": unix.SIGURG,
+ "USR1": unix.SIGUSR1,
+ "USR2": unix.SIGUSR2,
+ "VTALRM": unix.SIGVTALRM,
+ "WINCH": unix.SIGWINCH,
+ "XCPU": unix.SIGXCPU,
+ "XFSZ": unix.SIGXFSZ,
+ "RTMIN": sigrtmin,
+ "RTMIN+1": sigrtmin + 1,
+ "RTMIN+2": sigrtmin + 2,
+ "RTMIN+3": sigrtmin + 3,
+ "RTMIN+4": sigrtmin + 4,
+ "RTMIN+5": sigrtmin + 5,
+ "RTMIN+6": sigrtmin + 6,
+ "RTMIN+7": sigrtmin + 7,
+ "RTMIN+8": sigrtmin + 8,
+ "RTMIN+9": sigrtmin + 9,
+ "RTMIN+10": sigrtmin + 10,
+ "RTMIN+11": sigrtmin + 11,
+ "RTMIN+12": sigrtmin + 12,
+ "RTMIN+13": sigrtmin + 13,
+ "RTMIN+14": sigrtmin + 14,
+ "RTMIN+15": sigrtmin + 15,
+ "RTMAX-14": sigrtmax - 14,
+ "RTMAX-13": sigrtmax - 13,
+ "RTMAX-12": sigrtmax - 12,
+ "RTMAX-11": sigrtmax - 11,
+ "RTMAX-10": sigrtmax - 10,
+ "RTMAX-9": sigrtmax - 9,
+ "RTMAX-8": sigrtmax - 8,
+ "RTMAX-7": sigrtmax - 7,
+ "RTMAX-6": sigrtmax - 6,
+ "RTMAX-5": sigrtmax - 5,
+ "RTMAX-4": sigrtmax - 4,
+ "RTMAX-3": sigrtmax - 3,
+ "RTMAX-2": sigrtmax - 2,
+ "RTMAX-1": sigrtmax - 1,
+ "RTMAX": sigrtmax,
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go b/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go
new file mode 100644
index 000000000..89576b9e3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go
@@ -0,0 +1,42 @@
+package signal
+
+import (
+ "syscall"
+)
+
+// SignalMap is a map of Solaris signals.
+// SIGINFO and SIGTHR not defined for Solaris
+var SignalMap = map[string]syscall.Signal{
+ "ABRT": syscall.SIGABRT,
+ "ALRM": syscall.SIGALRM,
+ "BUF": syscall.SIGBUS,
+ "CHLD": syscall.SIGCHLD,
+ "CONT": syscall.SIGCONT,
+ "EMT": syscall.SIGEMT,
+ "FPE": syscall.SIGFPE,
+ "HUP": syscall.SIGHUP,
+ "ILL": syscall.SIGILL,
+ "INT": syscall.SIGINT,
+ "IO": syscall.SIGIO,
+ "IOT": syscall.SIGIOT,
+ "KILL": syscall.SIGKILL,
+ "LWP": syscall.SIGLWP,
+ "PIPE": syscall.SIGPIPE,
+ "PROF": syscall.SIGPROF,
+ "QUIT": syscall.SIGQUIT,
+ "SEGV": syscall.SIGSEGV,
+ "STOP": syscall.SIGSTOP,
+ "SYS": syscall.SIGSYS,
+ "TERM": syscall.SIGTERM,
+ "TRAP": syscall.SIGTRAP,
+ "TSTP": syscall.SIGTSTP,
+ "TTIN": syscall.SIGTTIN,
+ "TTOU": syscall.SIGTTOU,
+ "URG": syscall.SIGURG,
+ "USR1": syscall.SIGUSR1,
+ "USR2": syscall.SIGUSR2,
+ "VTALRM": syscall.SIGVTALRM,
+ "WINCH": syscall.SIGWINCH,
+ "XCPU": syscall.SIGXCPU,
+ "XFSZ": syscall.SIGXFSZ,
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unix.go b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go
new file mode 100644
index 000000000..5d058fd56
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go
@@ -0,0 +1,21 @@
+// +build !windows
+
+package signal
+
+import (
+ "syscall"
+)
+
+// Signals used in cli/command (no windows equivalent, use
+// invalid signals so they don't get handled)
+
+const (
+ // SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted.
+ SIGCHLD = syscall.SIGCHLD
+ // SIGWINCH is a signal sent to a process when its controlling terminal changes its size
+ SIGWINCH = syscall.SIGWINCH
+ // SIGPIPE is a signal sent to a process when a pipe is written to before the other end is open for reading
+ SIGPIPE = syscall.SIGPIPE
+ // DefaultStopSignal is the syscall signal used to stop a container in unix systems.
+ DefaultStopSignal = "SIGTERM"
+)
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go
new file mode 100644
index 000000000..c592d37df
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go
@@ -0,0 +1,10 @@
+// +build !linux,!darwin,!freebsd,!windows,!solaris
+
+package signal
+
+import (
+ "syscall"
+)
+
+// SignalMap is an empty map of signals for unsupported platform.
+var SignalMap = map[string]syscall.Signal{}
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go
new file mode 100644
index 000000000..440f2700e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go
@@ -0,0 +1,28 @@
+// +build windows
+
+package signal
+
+import (
+ "syscall"
+)
+
+// Signals used in cli/command (no windows equivalent, use
+// invalid signals so they don't get handled)
+const (
+ SIGCHLD = syscall.Signal(0xff)
+ SIGWINCH = syscall.Signal(0xff)
+ SIGPIPE = syscall.Signal(0xff)
+ // DefaultStopSignal is the syscall signal used to stop a container in windows systems.
+ DefaultStopSignal = "15"
+)
+
+// SignalMap is a map of "supported" signals. As per the comment in GOLang's
+// ztypes_windows.go: "More invented values for signals". Windows doesn't
+// really support signals in any way, shape or form that Unix does.
+//
+// We have these so that docker kill can be used to gracefully (TERM) and
+// forcibly (KILL) terminate a container on Windows.
+var SignalMap = map[string]syscall.Signal{
+ "KILL": syscall.SIGKILL,
+ "TERM": syscall.SIGTERM,
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/trap.go b/vendor/github.com/docker/docker/pkg/signal/trap.go
new file mode 100644
index 000000000..2884dfee3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/trap.go
@@ -0,0 +1,104 @@
+package signal
+
+import (
+ "fmt"
+ "os"
+ gosignal "os/signal"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync/atomic"
+ "syscall"
+ "time"
+
+ "github.com/pkg/errors"
+)
+
+// Trap sets up a simplified signal "trap", appropriate for common
+// behavior expected from a vanilla unix command-line tool in general
+// (and the Docker engine in particular).
+//
+// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated.
+// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is
+// skipped and the process is terminated immediately (allows force quit of stuck daemon)
+// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit.
+// * Ignore SIGPIPE events. These are generated by systemd when journald is restarted while
+// the docker daemon is not restarted and also running under systemd.
+// Fixes https://github.com/docker/docker/issues/19728
+//
+func Trap(cleanup func(), logger interface {
+ Info(args ...interface{})
+}) {
+ c := make(chan os.Signal, 1)
+ // we will handle INT, TERM, QUIT, SIGPIPE here
+ signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE}
+ gosignal.Notify(c, signals...)
+ go func() {
+ interruptCount := uint32(0)
+ for sig := range c {
+ if sig == syscall.SIGPIPE {
+ continue
+ }
+
+ go func(sig os.Signal) {
+ logger.Info(fmt.Sprintf("Processing signal '%v'", sig))
+ switch sig {
+ case os.Interrupt, syscall.SIGTERM:
+ if atomic.LoadUint32(&interruptCount) < 3 {
+ // Initiate the cleanup only once
+ if atomic.AddUint32(&interruptCount, 1) == 1 {
+ // Call the provided cleanup handler
+ cleanup()
+ os.Exit(0)
+ } else {
+ return
+ }
+ } else {
+ // 3 SIGTERM/INT signals received; force exit without cleanup
+ logger.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received")
+ }
+ case syscall.SIGQUIT:
+ DumpStacks("")
+ logger.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT")
+ }
+ //for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal #
+ os.Exit(128 + int(sig.(syscall.Signal)))
+ }(sig)
+ }
+ }()
+}
+
+const stacksLogNameTemplate = "goroutine-stacks-%s.log"
+
+// DumpStacks appends the runtime stack into file in dir and returns full path
+// to that file.
+func DumpStacks(dir string) (string, error) {
+ var (
+ buf []byte
+ stackSize int
+ )
+ bufferLen := 16384
+ for stackSize == len(buf) {
+ buf = make([]byte, bufferLen)
+ stackSize = runtime.Stack(buf, true)
+ bufferLen *= 2
+ }
+ buf = buf[:stackSize]
+ var f *os.File
+ if dir != "" {
+ path := filepath.Join(dir, fmt.Sprintf(stacksLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1)))
+ var err error
+ f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666)
+ if err != nil {
+ return "", errors.Wrap(err, "failed to open file to write the goroutine stacks")
+ }
+ defer f.Close()
+ defer f.Sync()
+ } else {
+ f = os.Stderr
+ }
+ if _, err := f.Write(buf); err != nil {
+ return "", errors.Wrap(err, "failed to write goroutine stacks")
+ }
+ return f.Name(), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/stringid/README.md b/vendor/github.com/docker/docker/pkg/stringid/README.md
new file mode 100644
index 000000000..37a5098fd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stringid/README.md
@@ -0,0 +1 @@
+This package provides helper functions for dealing with string identifiers
diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/vendor/github.com/docker/docker/pkg/stringid/stringid.go
new file mode 100644
index 000000000..a0c7c42a0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stringid/stringid.go
@@ -0,0 +1,99 @@
+// Package stringid provides helper functions for dealing with string identifiers
+package stringid
+
+import (
+ cryptorand "crypto/rand"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "math/rand"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const shortLen = 12
+
+var (
+ validShortID = regexp.MustCompile("^[a-f0-9]{12}$")
+ validHex = regexp.MustCompile(`^[a-f0-9]{64}$`)
+)
+
+// IsShortID determines if an arbitrary string *looks like* a short ID.
+func IsShortID(id string) bool {
+ return validShortID.MatchString(id)
+}
+
+// TruncateID returns a shorthand version of a string identifier for convenience.
+// A collision with other shorthands is very unlikely, but possible.
+// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
+// will need to use a longer prefix, or the full-length Id.
+func TruncateID(id string) string {
+ if i := strings.IndexRune(id, ':'); i >= 0 {
+ id = id[i+1:]
+ }
+ if len(id) > shortLen {
+ id = id[:shortLen]
+ }
+ return id
+}
+
+func generateID(r io.Reader) string {
+ b := make([]byte, 32)
+ for {
+ if _, err := io.ReadFull(r, b); err != nil {
+ panic(err) // This shouldn't happen
+ }
+ id := hex.EncodeToString(b)
+ // if we try to parse the truncated for as an int and we don't have
+ // an error then the value is all numeric and causes issues when
+ // used as a hostname. ref #3869
+ if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil {
+ continue
+ }
+ return id
+ }
+}
+
+// GenerateRandomID returns a unique id.
+func GenerateRandomID() string {
+ return generateID(cryptorand.Reader)
+}
+
+// GenerateNonCryptoID generates unique id without using cryptographically
+// secure sources of random.
+// It helps you to save entropy.
+func GenerateNonCryptoID() string {
+ return generateID(readerFunc(rand.Read))
+}
+
+// ValidateID checks whether an ID string is a valid image ID.
+func ValidateID(id string) error {
+ if ok := validHex.MatchString(id); !ok {
+ return fmt.Errorf("image ID %q is invalid", id)
+ }
+ return nil
+}
+
+func init() {
+ // safely set the seed globally so we generate random ids. Tries to use a
+ // crypto seed before falling back to time.
+ var seed int64
+ if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil {
+ // This should not happen, but worst-case fallback to time-based seed.
+ seed = time.Now().UnixNano()
+ } else {
+ seed = cryptoseed.Int64()
+ }
+
+ rand.Seed(seed)
+}
+
+type readerFunc func(p []byte) (int, error)
+
+func (fn readerFunc) Read(p []byte) (int, error) {
+ return fn(p)
+}
diff --git a/vendor/github.com/docker/docker/pkg/stringutils/README.md b/vendor/github.com/docker/docker/pkg/stringutils/README.md
new file mode 100644
index 000000000..b3e454573
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stringutils/README.md
@@ -0,0 +1 @@
+This package provides helper functions for dealing with strings
diff --git a/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go b/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go
new file mode 100644
index 000000000..8c4c39875
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go
@@ -0,0 +1,99 @@
+// Package stringutils provides helper functions for dealing with strings.
+package stringutils
+
+import (
+ "bytes"
+ "math/rand"
+ "strings"
+)
+
+// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n.
+func GenerateRandomAlphaOnlyString(n int) string {
+ // make a really long string
+ letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = letters[rand.Intn(len(letters))]
+ }
+ return string(b)
+}
+
+// GenerateRandomASCIIString generates an ASCII random string with length n.
+func GenerateRandomASCIIString(n int) string {
+ chars := "abcdefghijklmnopqrstuvwxyz" +
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
+ "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
+ res := make([]byte, n)
+ for i := 0; i < n; i++ {
+ res[i] = chars[rand.Intn(len(chars))]
+ }
+ return string(res)
+}
+
+// Ellipsis truncates a string to fit within maxlen, and appends ellipsis (...).
+// For maxlen of 3 and lower, no ellipsis is appended.
+func Ellipsis(s string, maxlen int) string {
+ r := []rune(s)
+ if len(r) <= maxlen {
+ return s
+ }
+ if maxlen <= 3 {
+ return string(r[:maxlen])
+ }
+ return string(r[:maxlen-3]) + "..."
+}
+
+// Truncate truncates a string to maxlen.
+func Truncate(s string, maxlen int) string {
+ r := []rune(s)
+ if len(r) <= maxlen {
+ return s
+ }
+ return string(r[:maxlen])
+}
+
+// InSlice tests whether a string is contained in a slice of strings or not.
+// Comparison is case insensitive
+func InSlice(slice []string, s string) bool {
+ for _, ss := range slice {
+ if strings.ToLower(s) == strings.ToLower(ss) {
+ return true
+ }
+ }
+ return false
+}
+
+func quote(word string, buf *bytes.Buffer) {
+ // Bail out early for "simple" strings
+ if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") {
+ buf.WriteString(word)
+ return
+ }
+
+ buf.WriteString("'")
+
+ for i := 0; i < len(word); i++ {
+ b := word[i]
+ if b == '\'' {
+ // Replace literal ' with a close ', a \', and an open '
+ buf.WriteString("'\\''")
+ } else {
+ buf.WriteByte(b)
+ }
+ }
+
+ buf.WriteString("'")
+}
+
+// ShellQuoteArguments takes a list of strings and escapes them so they will be
+// handled right when passed as arguments to a program via a shell
+func ShellQuoteArguments(args []string) string {
+ var buf bytes.Buffer
+ for i, arg := range args {
+ if i != 0 {
+ buf.WriteByte(' ')
+ }
+ quote(arg, &buf)
+ }
+ return buf.String()
+}
diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE
new file mode 100644
index 000000000..b9fbf3c98
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014-2017 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD
new file mode 100644
index 000000000..4c056c5ed
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD
@@ -0,0 +1,27 @@
+Copyright (c) 2014-2017 The Docker & Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/docker/docker/pkg/symlink/README.md b/vendor/github.com/docker/docker/pkg/symlink/README.md
new file mode 100644
index 000000000..8dba54fd0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/symlink/README.md
@@ -0,0 +1,6 @@
+Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks,
+as well as a Windows long-path aware version of filepath.EvalSymlinks
+from the [Go standard library](https://golang.org/pkg/path/filepath).
+
+The code from filepath.EvalSymlinks has been adapted in fs.go.
+Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go.
diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs.go b/vendor/github.com/docker/docker/pkg/symlink/fs.go
new file mode 100644
index 000000000..52fb9a691
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/symlink/fs.go
@@ -0,0 +1,144 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.BSD file.
+
+// This code is a modified version of path/filepath/symlink.go from the Go standard library.
+
+package symlink
+
+import (
+ "bytes"
+ "errors"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an
+// absolute path. This function handles paths in a platform-agnostic manner.
+func FollowSymlinkInScope(path, root string) (string, error) {
+ path, err := filepath.Abs(filepath.FromSlash(path))
+ if err != nil {
+ return "", err
+ }
+ root, err = filepath.Abs(filepath.FromSlash(root))
+ if err != nil {
+ return "", err
+ }
+ return evalSymlinksInScope(path, root)
+}
+
+// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return
+// a result guaranteed to be contained within the scope `root`, at the time of the call.
+// Symlinks in `root` are not evaluated and left as-is.
+// Errors encountered while attempting to evaluate symlinks in path will be returned.
+// Non-existing paths are valid and do not constitute an error.
+// `path` has to contain `root` as a prefix, or else an error will be returned.
+// Trying to break out from `root` does not constitute an error.
+//
+// Example:
+// If /foo/bar -> /outside,
+// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/outside"
+//
+// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks
+// are created and not to create subsequently, additional symlinks that could potentially make a
+// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo")
+// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should
+// no longer be considered safely contained in "/foo".
+func evalSymlinksInScope(path, root string) (string, error) {
+ root = filepath.Clean(root)
+ if path == root {
+ return path, nil
+ }
+ if !strings.HasPrefix(path, root) {
+ return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
+ }
+ const maxIter = 255
+ originalPath := path
+ // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c"
+ path = path[len(root):]
+ if root == string(filepath.Separator) {
+ path = string(filepath.Separator) + path
+ }
+ if !strings.HasPrefix(path, string(filepath.Separator)) {
+ return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
+ }
+ path = filepath.Clean(path)
+ // consume path by taking each frontmost path element,
+ // expanding it if it's a symlink, and appending it to b
+ var b bytes.Buffer
+ // b here will always be considered to be the "current absolute path inside
+ // root" when we append paths to it, we also append a slash and use
+ // filepath.Clean after the loop to trim the trailing slash
+ for n := 0; path != ""; n++ {
+ if n > maxIter {
+ return "", errors.New("evalSymlinksInScope: too many links in " + originalPath)
+ }
+
+ // find next path component, p
+ i := strings.IndexRune(path, filepath.Separator)
+ var p string
+ if i == -1 {
+ p, path = path, ""
+ } else {
+ p, path = path[:i], path[i+1:]
+ }
+
+ if p == "" {
+ continue
+ }
+
+ // this takes a b.String() like "b/../" and a p like "c" and turns it
+ // into "/b/../c" which then gets filepath.Cleaned into "/c" and then
+ // root gets prepended and we Clean again (to remove any trailing slash
+ // if the first Clean gave us just "/")
+ cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p)
+ if isDriveOrRoot(cleanP) {
+ // never Lstat "/" itself, or drive letters on Windows
+ b.Reset()
+ continue
+ }
+ fullP := filepath.Clean(root + cleanP)
+
+ fi, err := os.Lstat(fullP)
+ if os.IsNotExist(err) {
+ // if p does not exist, accept it
+ b.WriteString(p)
+ b.WriteRune(filepath.Separator)
+ continue
+ }
+ if err != nil {
+ return "", err
+ }
+ if fi.Mode()&os.ModeSymlink == 0 {
+ b.WriteString(p)
+ b.WriteRune(filepath.Separator)
+ continue
+ }
+
+ // it's a symlink, put it at the front of path
+ dest, err := os.Readlink(fullP)
+ if err != nil {
+ return "", err
+ }
+ if system.IsAbs(dest) {
+ b.Reset()
+ }
+ path = dest + string(filepath.Separator) + path
+ }
+
+ // see note above on "fullP := ..." for why this is double-cleaned and
+ // what's happening here
+ return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil
+}
+
+// EvalSymlinks returns the path name after the evaluation of any symbolic
+// links.
+// If path is relative the result will be relative to the current directory,
+// unless one of the components is an absolute symbolic link.
+// This version has been updated to support long paths prepended with `\\?\`.
+func EvalSymlinks(path string) (string, error) {
+ return evalSymlinks(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go
new file mode 100644
index 000000000..22708273d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go
@@ -0,0 +1,15 @@
+// +build !windows
+
+package symlink
+
+import (
+ "path/filepath"
+)
+
+func evalSymlinks(path string) (string, error) {
+ return filepath.EvalSymlinks(path)
+}
+
+func isDriveOrRoot(p string) bool {
+ return p == string(filepath.Separator)
+}
diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go
new file mode 100644
index 000000000..31523ade9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go
@@ -0,0 +1,169 @@
+package symlink
+
+import (
+ "bytes"
+ "errors"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/longpath"
+ "golang.org/x/sys/windows"
+)
+
+func toShort(path string) (string, error) {
+ p, err := windows.UTF16FromString(path)
+ if err != nil {
+ return "", err
+ }
+ b := p // GetShortPathName says we can reuse buffer
+ n, err := windows.GetShortPathName(&p[0], &b[0], uint32(len(b)))
+ if err != nil {
+ return "", err
+ }
+ if n > uint32(len(b)) {
+ b = make([]uint16, n)
+ if _, err = windows.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil {
+ return "", err
+ }
+ }
+ return windows.UTF16ToString(b), nil
+}
+
+func toLong(path string) (string, error) {
+ p, err := windows.UTF16FromString(path)
+ if err != nil {
+ return "", err
+ }
+ b := p // GetLongPathName says we can reuse buffer
+ n, err := windows.GetLongPathName(&p[0], &b[0], uint32(len(b)))
+ if err != nil {
+ return "", err
+ }
+ if n > uint32(len(b)) {
+ b = make([]uint16, n)
+ n, err = windows.GetLongPathName(&p[0], &b[0], uint32(len(b)))
+ if err != nil {
+ return "", err
+ }
+ }
+ b = b[:n]
+ return windows.UTF16ToString(b), nil
+}
+
+func evalSymlinks(path string) (string, error) {
+ path, err := walkSymlinks(path)
+ if err != nil {
+ return "", err
+ }
+
+ p, err := toShort(path)
+ if err != nil {
+ return "", err
+ }
+ p, err = toLong(p)
+ if err != nil {
+ return "", err
+ }
+ // windows.GetLongPathName does not change the case of the drive letter,
+ // but the result of EvalSymlinks must be unique, so we have
+ // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`).
+ // Make drive letter upper case.
+ if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' {
+ p = string(p[0]+'A'-'a') + p[1:]
+ } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' {
+ p = p[:3] + string(p[4]+'A'-'a') + p[5:]
+ }
+ return filepath.Clean(p), nil
+}
+
+const utf8RuneSelf = 0x80
+
+func walkSymlinks(path string) (string, error) {
+ const maxIter = 255
+ originalPath := path
+ // consume path by taking each frontmost path element,
+ // expanding it if it's a symlink, and appending it to b
+ var b bytes.Buffer
+ for n := 0; path != ""; n++ {
+ if n > maxIter {
+ return "", errors.New("EvalSymlinks: too many links in " + originalPath)
+ }
+
+ // A path beginning with `\\?\` represents the root, so automatically
+ // skip that part and begin processing the next segment.
+ if strings.HasPrefix(path, longpath.Prefix) {
+ b.WriteString(longpath.Prefix)
+ path = path[4:]
+ continue
+ }
+
+ // find next path component, p
+ var i = -1
+ for j, c := range path {
+ if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) {
+ i = j
+ break
+ }
+ }
+ var p string
+ if i == -1 {
+ p, path = path, ""
+ } else {
+ p, path = path[:i], path[i+1:]
+ }
+
+ if p == "" {
+ if b.Len() == 0 {
+ // must be absolute path
+ b.WriteRune(filepath.Separator)
+ }
+ continue
+ }
+
+ // If this is the first segment after the long path prefix, accept the
+ // current segment as a volume root or UNC share and move on to the next.
+ if b.String() == longpath.Prefix {
+ b.WriteString(p)
+ b.WriteRune(filepath.Separator)
+ continue
+ }
+
+ fi, err := os.Lstat(b.String() + p)
+ if err != nil {
+ return "", err
+ }
+ if fi.Mode()&os.ModeSymlink == 0 {
+ b.WriteString(p)
+ if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') {
+ b.WriteRune(filepath.Separator)
+ }
+ continue
+ }
+
+ // it's a symlink, put it at the front of path
+ dest, err := os.Readlink(b.String() + p)
+ if err != nil {
+ return "", err
+ }
+ if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) {
+ b.Reset()
+ }
+ path = dest + string(filepath.Separator) + path
+ }
+ return filepath.Clean(b.String()), nil
+}
+
+func isDriveOrRoot(p string) bool {
+ if p == string(filepath.Separator) {
+ return true
+ }
+
+ length := len(p)
+ if length >= 2 {
+ if p[length-1] == ':' && (('a' <= p[length-2] && p[length-2] <= 'z') || ('A' <= p[length-2] && p[length-2] <= 'Z')) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go
new file mode 100644
index 000000000..056d19954
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes.go
@@ -0,0 +1,35 @@
+package system
+
+import (
+ "os"
+ "time"
+)
+
+// Chtimes changes the access time and modified time of a file at the given path
+func Chtimes(name string, atime time.Time, mtime time.Time) error {
+ unixMinTime := time.Unix(0, 0)
+ unixMaxTime := maxTime
+
+ // If the modified time is prior to the Unix Epoch, or after the
+ // end of Unix Time, os.Chtimes has undefined behavior
+ // default to Unix Epoch in this case, just in case
+
+ if atime.Before(unixMinTime) || atime.After(unixMaxTime) {
+ atime = unixMinTime
+ }
+
+ if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) {
+ mtime = unixMinTime
+ }
+
+ if err := os.Chtimes(name, atime, mtime); err != nil {
+ return err
+ }
+
+ // Take platform specific action for setting create time.
+ if err := setCTime(name, mtime); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
new file mode 100644
index 000000000..09d58bcbf
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
@@ -0,0 +1,14 @@
+// +build !windows
+
+package system
+
+import (
+ "time"
+)
+
+//setCTime will set the create time on a file. On Unix, the create
+//time is updated as a side effect of setting the modified time, so
+//no action is required.
+func setCTime(path string, ctime time.Time) error {
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
new file mode 100644
index 000000000..45428c141
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
@@ -0,0 +1,28 @@
+// +build windows
+
+package system
+
+import (
+ "time"
+
+ "golang.org/x/sys/windows"
+)
+
+//setCTime will set the create time on a file. On Windows, this requires
+//calling SetFileTime and explicitly including the create time.
+func setCTime(path string, ctime time.Time) error {
+ ctimespec := windows.NsecToTimespec(ctime.UnixNano())
+ pathp, e := windows.UTF16PtrFromString(path)
+ if e != nil {
+ return e
+ }
+ h, e := windows.CreateFile(pathp,
+ windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil,
+ windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0)
+ if e != nil {
+ return e
+ }
+ defer windows.Close(h)
+ c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec))
+ return windows.SetFileTime(h, &c, nil, nil)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go
new file mode 100644
index 000000000..288318985
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/errors.go
@@ -0,0 +1,10 @@
+package system
+
+import (
+ "errors"
+)
+
+var (
+ // ErrNotSupportedPlatform means the platform is not supported.
+ ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
+)
diff --git a/vendor/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/docker/docker/pkg/system/events_windows.go
new file mode 100644
index 000000000..192e36788
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/events_windows.go
@@ -0,0 +1,85 @@
+package system
+
+// This file implements syscalls for Win32 events which are not implemented
+// in golang.
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var (
+ procCreateEvent = modkernel32.NewProc("CreateEventW")
+ procOpenEvent = modkernel32.NewProc("OpenEventW")
+ procSetEvent = modkernel32.NewProc("SetEvent")
+ procResetEvent = modkernel32.NewProc("ResetEvent")
+ procPulseEvent = modkernel32.NewProc("PulseEvent")
+)
+
+// CreateEvent implements win32 CreateEventW func in golang. It will create an event object.
+func CreateEvent(eventAttributes *windows.SecurityAttributes, manualReset bool, initialState bool, name string) (handle windows.Handle, err error) {
+ namep, _ := windows.UTF16PtrFromString(name)
+ var _p1 uint32
+ if manualReset {
+ _p1 = 1
+ }
+ var _p2 uint32
+ if initialState {
+ _p2 = 1
+ }
+ r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep)))
+ use(unsafe.Pointer(namep))
+ handle = windows.Handle(r0)
+ if handle == windows.InvalidHandle {
+ err = e1
+ }
+ return
+}
+
+// OpenEvent implements win32 OpenEventW func in golang. It opens an event object.
+func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle windows.Handle, err error) {
+ namep, _ := windows.UTF16PtrFromString(name)
+ var _p1 uint32
+ if inheritHandle {
+ _p1 = 1
+ }
+ r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep)))
+ use(unsafe.Pointer(namep))
+ handle = windows.Handle(r0)
+ if handle == windows.InvalidHandle {
+ err = e1
+ }
+ return
+}
+
+// SetEvent implements win32 SetEvent func in golang.
+func SetEvent(handle windows.Handle) (err error) {
+ return setResetPulse(handle, procSetEvent)
+}
+
+// ResetEvent implements win32 ResetEvent func in golang.
+func ResetEvent(handle windows.Handle) (err error) {
+ return setResetPulse(handle, procResetEvent)
+}
+
+// PulseEvent implements win32 PulseEvent func in golang.
+func PulseEvent(handle windows.Handle) (err error) {
+ return setResetPulse(handle, procPulseEvent)
+}
+
+func setResetPulse(handle windows.Handle, proc *windows.LazyProc) (err error) {
+ r0, _, _ := proc.Call(uintptr(handle))
+ if r0 != 0 {
+ err = syscall.Errno(r0)
+ }
+ return
+}
+
+var temp unsafe.Pointer
+
+// use ensures a variable is kept alive without the GC freeing while still needed
+func use(p unsafe.Pointer) {
+ temp = p
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/exitcode.go b/vendor/github.com/docker/docker/pkg/system/exitcode.go
new file mode 100644
index 000000000..60f0514b1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/exitcode.go
@@ -0,0 +1,33 @@
+package system
+
+import (
+ "fmt"
+ "os/exec"
+ "syscall"
+)
+
+// GetExitCode returns the ExitStatus of the specified error if its type is
+// exec.ExitError, returns 0 and an error otherwise.
+func GetExitCode(err error) (int, error) {
+ exitCode := 0
+ if exiterr, ok := err.(*exec.ExitError); ok {
+ if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok {
+ return procExit.ExitStatus(), nil
+ }
+ }
+ return exitCode, fmt.Errorf("failed to get exit code")
+}
+
+// ProcessExitCode process the specified error and returns the exit status code
+// if the error was of type exec.ExitError, returns nothing otherwise.
+func ProcessExitCode(err error) (exitCode int) {
+ if err != nil {
+ var exiterr error
+ if exitCode, exiterr = GetExitCode(err); exiterr != nil {
+ // TODO: Fix this so we check the error's text.
+ // we've failed to retrieve exit code, so we set it to 127
+ exitCode = 127
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go
new file mode 100644
index 000000000..102565f76
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/filesys.go
@@ -0,0 +1,67 @@
+// +build !windows
+
+package system
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// MkdirAllWithACL is a wrapper for MkdirAll on unix systems.
+func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
+ return MkdirAll(path, perm, sddl)
+}
+
+// MkdirAll creates a directory named path along with any necessary parents,
+// with permission specified by attribute perm for all dir created.
+func MkdirAll(path string, perm os.FileMode, sddl string) error {
+ return os.MkdirAll(path, perm)
+}
+
+// IsAbs is a platform-specific wrapper for filepath.IsAbs.
+func IsAbs(path string) bool {
+ return filepath.IsAbs(path)
+}
+
+// The functions below here are wrappers for the equivalents in the os and ioutils packages.
+// They are passthrough on Unix platforms, and only relevant on Windows.
+
+// CreateSequential creates the named file with mode 0666 (before umask), truncating
+// it if it already exists. If successful, methods on the returned
+// File can be used for I/O; the associated file descriptor has mode
+// O_RDWR.
+// If there is an error, it will be of type *PathError.
+func CreateSequential(name string) (*os.File, error) {
+ return os.Create(name)
+}
+
+// OpenSequential opens the named file for reading. If successful, methods on
+// the returned file can be used for reading; the associated file
+// descriptor has mode O_RDONLY.
+// If there is an error, it will be of type *PathError.
+func OpenSequential(name string) (*os.File, error) {
+ return os.Open(name)
+}
+
+// OpenFileSequential is the generalized open call; most users will use Open
+// or Create instead. It opens the named file with specified flag
+// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
+// methods on the returned File can be used for I/O.
+// If there is an error, it will be of type *PathError.
+func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) {
+ return os.OpenFile(name, flag, perm)
+}
+
+// TempFileSequential creates a new temporary file in the directory dir
+// with a name beginning with prefix, opens the file for reading
+// and writing, and returns the resulting *os.File.
+// If dir is the empty string, TempFile uses the default directory
+// for temporary files (see os.TempDir).
+// Multiple programs calling TempFile simultaneously
+// will not choose the same file. The caller can use f.Name()
+// to find the pathname of the file. It is the caller's responsibility
+// to remove the file when no longer needed.
+func TempFileSequential(dir, prefix string) (f *os.File, err error) {
+ return ioutil.TempFile(dir, prefix)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
new file mode 100644
index 000000000..a61b53d0b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
@@ -0,0 +1,298 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+ "unsafe"
+
+ winio "github.com/Microsoft/go-winio"
+ "golang.org/x/sys/windows"
+)
+
+const (
+ // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System
+ SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
+ // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System
+ SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
+)
+
+// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory
+// with an appropriate SDDL defined ACL.
+func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
+ return mkdirall(path, true, sddl)
+}
+
+// MkdirAll implementation that is volume path aware for Windows.
+func MkdirAll(path string, _ os.FileMode, sddl string) error {
+ return mkdirall(path, false, sddl)
+}
+
+// mkdirall is a custom version of os.MkdirAll modified for use on Windows
+// so that it is both volume path aware, and can create a directory with
+// a DACL.
+func mkdirall(path string, applyACL bool, sddl string) error {
+ if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
+ return nil
+ }
+
+ // The rest of this method is largely copied from os.MkdirAll and should be kept
+ // as-is to ensure compatibility.
+
+ // Fast path: if we can tell whether path is a directory or file, stop with success or error.
+ dir, err := os.Stat(path)
+ if err == nil {
+ if dir.IsDir() {
+ return nil
+ }
+ return &os.PathError{
+ Op: "mkdir",
+ Path: path,
+ Err: syscall.ENOTDIR,
+ }
+ }
+
+ // Slow path: make sure parent exists and then call Mkdir for path.
+ i := len(path)
+ for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
+ i--
+ }
+
+ j := i
+ for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
+ j--
+ }
+
+ if j > 1 {
+ // Create parent
+ err = mkdirall(path[0:j-1], false, sddl)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result.
+ if applyACL {
+ err = mkdirWithACL(path, sddl)
+ } else {
+ err = os.Mkdir(path, 0)
+ }
+
+ if err != nil {
+ // Handle arguments like "foo/." by
+ // double-checking that directory doesn't exist.
+ dir, err1 := os.Lstat(path)
+ if err1 == nil && dir.IsDir() {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
+
+// mkdirWithACL creates a new directory. If there is an error, it will be of
+// type *PathError. .
+//
+// This is a modified and combined version of os.Mkdir and windows.Mkdir
+// in golang to cater for creating a directory am ACL permitting full
+// access, with inheritance, to any subfolder/file for Built-in Administrators
+// and Local System.
+func mkdirWithACL(name string, sddl string) error {
+ sa := windows.SecurityAttributes{Length: 0}
+ sd, err := winio.SddlToSecurityDescriptor(sddl)
+ if err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+ sa.Length = uint32(unsafe.Sizeof(sa))
+ sa.InheritHandle = 1
+ sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0]))
+
+ namep, err := windows.UTF16PtrFromString(name)
+ if err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+
+ e := windows.CreateDirectory(namep, &sa)
+ if e != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: e}
+ }
+ return nil
+}
+
+// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
+// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
+// as it doesn't start with a drive-letter/colon combination. However, in
+// docker we need to verify things such as WORKDIR /windows/system32 in
+// a Dockerfile (which gets translated to \windows\system32 when being processed
+// by the daemon. This SHOULD be treated as absolute from a docker processing
+// perspective.
+func IsAbs(path string) bool {
+ if !filepath.IsAbs(path) {
+ if !strings.HasPrefix(path, string(os.PathSeparator)) {
+ return false
+ }
+ }
+ return true
+}
+
+// The origin of the functions below here are the golang OS and windows packages,
+// slightly modified to only cope with files, not directories due to the
+// specific use case.
+//
+// The alteration is to allow a file on Windows to be opened with
+// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating
+// the standby list, particularly when accessing large files such as layer.tar.
+
+// CreateSequential creates the named file with mode 0666 (before umask), truncating
+// it if it already exists. If successful, methods on the returned
+// File can be used for I/O; the associated file descriptor has mode
+// O_RDWR.
+// If there is an error, it will be of type *PathError.
+func CreateSequential(name string) (*os.File, error) {
+ return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0)
+}
+
+// OpenSequential opens the named file for reading. If successful, methods on
+// the returned file can be used for reading; the associated file
+// descriptor has mode O_RDONLY.
+// If there is an error, it will be of type *PathError.
+func OpenSequential(name string) (*os.File, error) {
+ return OpenFileSequential(name, os.O_RDONLY, 0)
+}
+
+// OpenFileSequential is the generalized open call; most users will use Open
+// or Create instead.
+// If there is an error, it will be of type *PathError.
+func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) {
+ if name == "" {
+ return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT}
+ }
+ r, errf := windowsOpenFileSequential(name, flag, 0)
+ if errf == nil {
+ return r, nil
+ }
+ return nil, &os.PathError{Op: "open", Path: name, Err: errf}
+}
+
+func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) {
+ r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0)
+ if e != nil {
+ return nil, e
+ }
+ return os.NewFile(uintptr(r), name), nil
+}
+
+func makeInheritSa() *windows.SecurityAttributes {
+ var sa windows.SecurityAttributes
+ sa.Length = uint32(unsafe.Sizeof(sa))
+ sa.InheritHandle = 1
+ return &sa
+}
+
+func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) {
+ if len(path) == 0 {
+ return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND
+ }
+ pathp, err := windows.UTF16PtrFromString(path)
+ if err != nil {
+ return windows.InvalidHandle, err
+ }
+ var access uint32
+ switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) {
+ case windows.O_RDONLY:
+ access = windows.GENERIC_READ
+ case windows.O_WRONLY:
+ access = windows.GENERIC_WRITE
+ case windows.O_RDWR:
+ access = windows.GENERIC_READ | windows.GENERIC_WRITE
+ }
+ if mode&windows.O_CREAT != 0 {
+ access |= windows.GENERIC_WRITE
+ }
+ if mode&windows.O_APPEND != 0 {
+ access &^= windows.GENERIC_WRITE
+ access |= windows.FILE_APPEND_DATA
+ }
+ sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE)
+ var sa *windows.SecurityAttributes
+ if mode&windows.O_CLOEXEC == 0 {
+ sa = makeInheritSa()
+ }
+ var createmode uint32
+ switch {
+ case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL):
+ createmode = windows.CREATE_NEW
+ case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC):
+ createmode = windows.CREATE_ALWAYS
+ case mode&windows.O_CREAT == windows.O_CREAT:
+ createmode = windows.OPEN_ALWAYS
+ case mode&windows.O_TRUNC == windows.O_TRUNC:
+ createmode = windows.TRUNCATE_EXISTING
+ default:
+ createmode = windows.OPEN_EXISTING
+ }
+ // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang.
+ //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
+ const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
+ h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0)
+ return h, e
+}
+
+// Helpers for TempFileSequential
+var rand uint32
+var randmu sync.Mutex
+
+func reseed() uint32 {
+ return uint32(time.Now().UnixNano() + int64(os.Getpid()))
+}
+func nextSuffix() string {
+ randmu.Lock()
+ r := rand
+ if r == 0 {
+ r = reseed()
+ }
+ r = r*1664525 + 1013904223 // constants from Numerical Recipes
+ rand = r
+ randmu.Unlock()
+ return strconv.Itoa(int(1e9 + r%1e9))[1:]
+}
+
+// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential
+// file access. Below is the original comment from golang:
+// TempFile creates a new temporary file in the directory dir
+// with a name beginning with prefix, opens the file for reading
+// and writing, and returns the resulting *os.File.
+// If dir is the empty string, TempFile uses the default directory
+// for temporary files (see os.TempDir).
+// Multiple programs calling TempFile simultaneously
+// will not choose the same file. The caller can use f.Name()
+// to find the pathname of the file. It is the caller's responsibility
+// to remove the file when no longer needed.
+func TempFileSequential(dir, prefix string) (f *os.File, err error) {
+ if dir == "" {
+ dir = os.TempDir()
+ }
+
+ nconflict := 0
+ for i := 0; i < 10000; i++ {
+ name := filepath.Join(dir, prefix+nextSuffix())
+ f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
+ if os.IsExist(err) {
+ if nconflict++; nconflict > 10 {
+ randmu.Lock()
+ rand = reseed()
+ randmu.Unlock()
+ }
+ continue
+ }
+ break
+ }
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/init.go b/vendor/github.com/docker/docker/pkg/system/init.go
new file mode 100644
index 000000000..17935088d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/init.go
@@ -0,0 +1,22 @@
+package system
+
+import (
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+// Used by chtimes
+var maxTime time.Time
+
+func init() {
+ // chtimes initialization
+ if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
+ // This is a 64 bit timespec
+ // os.Chtimes limits time to the following
+ maxTime = time.Unix(0, 1<<63-1)
+ } else {
+ // This is a 32 bit timespec
+ maxTime = time.Unix(1<<31-1, 0)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go
new file mode 100644
index 000000000..019c66441
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go
@@ -0,0 +1,17 @@
+package system
+
+import "os"
+
+// LCOWSupported determines if Linux Containers on Windows are supported.
+// Note: This feature is in development (06/17) and enabled through an
+// environment variable. At a future time, it will be enabled based
+// on build number. @jhowardmsft
+var lcowSupported = false
+
+func init() {
+ // LCOW initialization
+ if os.Getenv("LCOW_SUPPORTED") != "" {
+ lcowSupported = true
+ }
+
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_unix.go b/vendor/github.com/docker/docker/pkg/system/lcow_unix.go
new file mode 100644
index 000000000..cff33bb40
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/lcow_unix.go
@@ -0,0 +1,8 @@
+// +build !windows
+
+package system
+
+// LCOWSupported returns true if Linux containers on Windows are supported.
+func LCOWSupported() bool {
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_windows.go b/vendor/github.com/docker/docker/pkg/system/lcow_windows.go
new file mode 100644
index 000000000..e54d01e69
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/lcow_windows.go
@@ -0,0 +1,6 @@
+package system
+
+// LCOWSupported returns true if Linux containers on Windows are supported.
+func LCOWSupported() bool {
+ return lcowSupported
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go
new file mode 100644
index 000000000..bd23c4d50
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// Lstat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Lstat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Lstat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go
new file mode 100644
index 000000000..e51df0daf
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go
@@ -0,0 +1,14 @@
+package system
+
+import "os"
+
+// Lstat calls os.Lstat to get a fileinfo interface back.
+// This is then copied into our own locally defined structure.
+func Lstat(path string) (*StatT, error) {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return fromStatT(&fi)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/docker/docker/pkg/system/meminfo.go
new file mode 100644
index 000000000..3b6e947e6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo.go
@@ -0,0 +1,17 @@
+package system
+
+// MemInfo contains memory statistics of the host system.
+type MemInfo struct {
+ // Total usable RAM (i.e. physical RAM minus a few reserved bits and the
+ // kernel binary code).
+ MemTotal int64
+
+ // Amount of free memory.
+ MemFree int64
+
+ // Total amount of swap space available.
+ SwapTotal int64
+
+ // Amount of swap space that is currently unused.
+ SwapFree int64
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
new file mode 100644
index 000000000..385f1d5e7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
@@ -0,0 +1,65 @@
+package system
+
+import (
+ "bufio"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/docker/go-units"
+)
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ file, err := os.Open("/proc/meminfo")
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ return parseMemInfo(file)
+}
+
+// parseMemInfo parses the /proc/meminfo file into
+// a MemInfo object given an io.Reader to the file.
+// Throws error if there are problems reading from the file
+func parseMemInfo(reader io.Reader) (*MemInfo, error) {
+ meminfo := &MemInfo{}
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ // Expected format: ["MemTotal:", "1234", "kB"]
+ parts := strings.Fields(scanner.Text())
+
+ // Sanity checks: Skip malformed entries.
+ if len(parts) < 3 || parts[2] != "kB" {
+ continue
+ }
+
+ // Convert to bytes.
+ size, err := strconv.Atoi(parts[1])
+ if err != nil {
+ continue
+ }
+ bytes := int64(size) * units.KiB
+
+ switch parts[0] {
+ case "MemTotal:":
+ meminfo.MemTotal = bytes
+ case "MemFree:":
+ meminfo.MemFree = bytes
+ case "SwapTotal:":
+ meminfo.SwapTotal = bytes
+ case "SwapFree:":
+ meminfo.SwapFree = bytes
+ }
+
+ }
+
+ // Handle errors that may have occurred during the reading of the file.
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return meminfo, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go
new file mode 100644
index 000000000..925776e78
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go
@@ -0,0 +1,129 @@
+// +build solaris,cgo
+
+package system
+
+import (
+ "fmt"
+ "unsafe"
+)
+
+// #cgo CFLAGS: -std=c99
+// #cgo LDFLAGS: -lkstat
+// #include <unistd.h>
+// #include <stdlib.h>
+// #include <stdio.h>
+// #include <kstat.h>
+// #include <sys/swap.h>
+// #include <sys/param.h>
+// struct swaptable *allocSwaptable(int num) {
+// struct swaptable *st;
+// struct swapent *swapent;
+// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int));
+// swapent = st->swt_ent;
+// for (int i = 0; i < num; i++,swapent++) {
+// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char));
+// }
+// st->swt_n = num;
+// return st;
+//}
+// void freeSwaptable (struct swaptable *st) {
+// struct swapent *swapent = st->swt_ent;
+// for (int i = 0; i < st->swt_n; i++,swapent++) {
+// free(swapent->ste_path);
+// }
+// free(st);
+// }
+// swapent_t getSwapEnt(swapent_t *ent, int i) {
+// return ent[i];
+// }
+// int64_t getPpKernel() {
+// int64_t pp_kernel = 0;
+// kstat_ctl_t *ksc;
+// kstat_t *ks;
+// kstat_named_t *knp;
+// kid_t kid;
+//
+// if ((ksc = kstat_open()) == NULL) {
+// return -1;
+// }
+// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) {
+// return -1;
+// }
+// if (((kid = kstat_read(ksc, ks, NULL)) == -1) ||
+// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) {
+// return -1;
+// }
+// switch (knp->data_type) {
+// case KSTAT_DATA_UINT64:
+// pp_kernel = knp->value.ui64;
+// break;
+// case KSTAT_DATA_UINT32:
+// pp_kernel = knp->value.ui32;
+// break;
+// }
+// pp_kernel *= sysconf(_SC_PAGESIZE);
+// return (pp_kernel > 0 ? pp_kernel : -1);
+// }
+import "C"
+
+// Get the system memory info using sysconf same as prtconf
+func getTotalMem() int64 {
+ pagesize := C.sysconf(C._SC_PAGESIZE)
+ npages := C.sysconf(C._SC_PHYS_PAGES)
+ return int64(pagesize * npages)
+}
+
+func getFreeMem() int64 {
+ pagesize := C.sysconf(C._SC_PAGESIZE)
+ npages := C.sysconf(C._SC_AVPHYS_PAGES)
+ return int64(pagesize * npages)
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+
+ ppKernel := C.getPpKernel()
+ MemTotal := getTotalMem()
+ MemFree := getFreeMem()
+ SwapTotal, SwapFree, err := getSysSwap()
+
+ if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 ||
+ SwapFree < 0 {
+ return nil, fmt.Errorf("error getting system memory info %v\n", err)
+ }
+
+ meminfo := &MemInfo{}
+ // Total memory is total physical memory less than memory locked by kernel
+ meminfo.MemTotal = MemTotal - int64(ppKernel)
+ meminfo.MemFree = MemFree
+ meminfo.SwapTotal = SwapTotal
+ meminfo.SwapFree = SwapFree
+
+ return meminfo, nil
+}
+
+func getSysSwap() (int64, int64, error) {
+ var tSwap int64
+ var fSwap int64
+ var diskblksPerPage int64
+ num, err := C.swapctl(C.SC_GETNSWP, nil)
+ if err != nil {
+ return -1, -1, err
+ }
+ st := C.allocSwaptable(num)
+ _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st))
+ if err != nil {
+ C.freeSwaptable(st)
+ return -1, -1, err
+ }
+
+ diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT)
+ for i := 0; i < int(num); i++ {
+ swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i))
+ tSwap += int64(swapent.ste_pages) * diskblksPerPage
+ fSwap += int64(swapent.ste_free) * diskblksPerPage
+ }
+ C.freeSwaptable(st)
+ return tSwap, fSwap, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
new file mode 100644
index 000000000..3ce019dff
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
@@ -0,0 +1,8 @@
+// +build !linux,!windows,!solaris
+
+package system
+
+// ReadMemInfo is not supported on platforms other than linux and windows.
+func ReadMemInfo() (*MemInfo, error) {
+ return nil, ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go
new file mode 100644
index 000000000..883944a4c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go
@@ -0,0 +1,45 @@
+package system
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var (
+ modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+
+ procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx
+type memorystatusex struct {
+ dwLength uint32
+ dwMemoryLoad uint32
+ ullTotalPhys uint64
+ ullAvailPhys uint64
+ ullTotalPageFile uint64
+ ullAvailPageFile uint64
+ ullTotalVirtual uint64
+ ullAvailVirtual uint64
+ ullAvailExtendedVirtual uint64
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ msi := &memorystatusex{
+ dwLength: 64,
+ }
+ r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi)))
+ if r1 == 0 {
+ return &MemInfo{}, nil
+ }
+ return &MemInfo{
+ MemTotal: int64(msi.ullTotalPhys),
+ MemFree: int64(msi.ullAvailPhys),
+ SwapTotal: int64(msi.ullTotalPageFile),
+ SwapFree: int64(msi.ullAvailPageFile),
+ }, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go
new file mode 100644
index 000000000..af79a6538
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/mknod.go
@@ -0,0 +1,22 @@
+// +build !windows
+
+package system
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// Mknod creates a filesystem node (file, device special file or named pipe) named path
+// with attributes specified by mode and dev.
+func Mknod(path string, mode uint32, dev int) error {
+ return unix.Mknod(path, mode, dev)
+}
+
+// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
+// and minor number of the newly created device special file.
+// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
+// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
+// then the top 12 bits of the minor.
+func Mkdev(major int64, minor int64) uint32 {
+ return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go
new file mode 100644
index 000000000..2e863c021
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go
@@ -0,0 +1,13 @@
+// +build windows
+
+package system
+
+// Mknod is not implemented on Windows.
+func Mknod(path string, mode uint32, dev int) error {
+ return ErrNotSupportedPlatform
+}
+
+// Mkdev is not implemented on Windows.
+func Mkdev(major int64, minor int64) uint32 {
+ panic("Mkdev not implemented on Windows.")
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go
new file mode 100644
index 000000000..f634a6be6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/path.go
@@ -0,0 +1,21 @@
+package system
+
+import "runtime"
+
+const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+
+// DefaultPathEnv is unix style list of directories to search for
+// executables. Each directory is separated from the next by a colon
+// ':' character .
+func DefaultPathEnv(platform string) string {
+ if runtime.GOOS == "windows" {
+ if platform != runtime.GOOS && LCOWSupported() {
+ return defaultUnixPathEnv
+ }
+ // Deliberately empty on Windows containers on Windows as the default path will be set by
+ // the container. Docker has no context of what the default path should be.
+ return ""
+ }
+ return defaultUnixPathEnv
+
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go
new file mode 100644
index 000000000..f3762e69d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/path_unix.go
@@ -0,0 +1,9 @@
+// +build !windows
+
+package system
+
+// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
+// is the system drive. This is a no-op on Linux.
+func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+ return path, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go
new file mode 100644
index 000000000..aab891522
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/path_windows.go
@@ -0,0 +1,33 @@
+// +build windows
+
+package system
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+)
+
+// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
+// This is used, for example, when validating a user provided path in docker cp.
+// If a drive letter is supplied, it must be the system drive. The drive letter
+// is always removed. Also, it translates it to OS semantics (IOW / to \). We
+// need the path in this syntax so that it can ultimately be concatenated with
+// a Windows long-path which doesn't support drive-letters. Examples:
+// C: --> Fail
+// C:\ --> \
+// a --> a
+// /a --> \a
+// d:\ --> Fail
+func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+ if len(path) == 2 && string(path[1]) == ":" {
+ return "", fmt.Errorf("No relative path specified in %q", path)
+ }
+ if !filepath.IsAbs(path) || len(path) < 2 {
+ return filepath.FromSlash(path), nil
+ }
+ if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
+ return "", fmt.Errorf("The specified path is not on the system drive (C:)")
+ }
+ return filepath.FromSlash(path[2:]), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/process_unix.go b/vendor/github.com/docker/docker/pkg/system/process_unix.go
new file mode 100644
index 000000000..26c8b42c1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/process_unix.go
@@ -0,0 +1,24 @@
+// +build linux freebsd solaris darwin
+
+package system
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+// IsProcessAlive returns true if process with a given pid is running.
+func IsProcessAlive(pid int) bool {
+ err := unix.Kill(pid, syscall.Signal(0))
+ if err == nil || err == unix.EPERM {
+ return true
+ }
+
+ return false
+}
+
+// KillProcess force-stops a process.
+func KillProcess(pid int) {
+ unix.Kill(pid, unix.SIGKILL)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/rm.go b/vendor/github.com/docker/docker/pkg/system/rm.go
new file mode 100644
index 000000000..101b569a5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/rm.go
@@ -0,0 +1,80 @@
+package system
+
+import (
+ "os"
+ "syscall"
+ "time"
+
+ "github.com/docker/docker/pkg/mount"
+ "github.com/pkg/errors"
+)
+
+// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can
+// often be remedied.
+// Only use `EnsureRemoveAll` if you really want to make every effort to remove
+// a directory.
+//
+// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there
+// can be a race between reading directory entries and then actually attempting
+// to remove everything in the directory.
+// These types of errors do not need to be returned since it's ok for the dir to
+// be gone we can just retry the remove operation.
+//
+// This should not return a `os.ErrNotExist` kind of error under any circumstances
+func EnsureRemoveAll(dir string) error {
+ notExistErr := make(map[string]bool)
+
+ // track retries
+ exitOnErr := make(map[string]int)
+ maxRetry := 5
+
+ // Attempt to unmount anything beneath this dir first
+ mount.RecursiveUnmount(dir)
+
+ for {
+ err := os.RemoveAll(dir)
+ if err == nil {
+ return err
+ }
+
+ pe, ok := err.(*os.PathError)
+ if !ok {
+ return err
+ }
+
+ if os.IsNotExist(err) {
+ if notExistErr[pe.Path] {
+ return err
+ }
+ notExistErr[pe.Path] = true
+
+ // There is a race where some subdir can be removed but after the parent
+ // dir entries have been read.
+ // So the path could be from `os.Remove(subdir)`
+ // If the reported non-existent path is not the passed in `dir` we
+ // should just retry, but otherwise return with no error.
+ if pe.Path == dir {
+ return nil
+ }
+ continue
+ }
+
+ if pe.Err != syscall.EBUSY {
+ return err
+ }
+
+ if mounted, _ := mount.Mounted(pe.Path); mounted {
+ if e := mount.Unmount(pe.Path); e != nil {
+ if mounted, _ := mount.Mounted(pe.Path); mounted {
+ return errors.Wrapf(e, "error while removing %s", dir)
+ }
+ }
+ }
+
+ if exitOnErr[pe.Path] == maxRetry {
+ return err
+ }
+ exitOnErr[pe.Path]++
+ time.Sleep(100 * time.Millisecond)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go
new file mode 100644
index 000000000..715f05b93
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go
@@ -0,0 +1,13 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtimespec}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go
new file mode 100644
index 000000000..715f05b93
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go
@@ -0,0 +1,13 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtimespec}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
new file mode 100644
index 000000000..66bf6e28e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
@@ -0,0 +1,19 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtim}, nil
+}
+
+// FromStatT converts a syscall.Stat_t type to a system.Stat_t type
+// This is exposed on Linux as pkg/archive/changes uses it.
+func FromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go
new file mode 100644
index 000000000..b607dea94
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go
@@ -0,0 +1,13 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtim}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
new file mode 100644
index 000000000..b607dea94
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
@@ -0,0 +1,13 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtim}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/vendor/github.com/docker/docker/pkg/system/stat_unix.go
new file mode 100644
index 000000000..91c7d121c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_unix.go
@@ -0,0 +1,60 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// StatT type contains status of a file. It contains metadata
+// like permission, owner, group, size, etc about a file.
+type StatT struct {
+ mode uint32
+ uid uint32
+ gid uint32
+ rdev uint64
+ size int64
+ mtim syscall.Timespec
+}
+
+// Mode returns file's permission mode.
+func (s StatT) Mode() uint32 {
+ return s.mode
+}
+
+// UID returns file's user id of owner.
+func (s StatT) UID() uint32 {
+ return s.uid
+}
+
+// GID returns file's group id of owner.
+func (s StatT) GID() uint32 {
+ return s.gid
+}
+
+// Rdev returns file's device ID (if it's special file).
+func (s StatT) Rdev() uint64 {
+ return s.rdev
+}
+
+// Size returns file's size.
+func (s StatT) Size() int64 {
+ return s.size
+}
+
+// Mtim returns file's last modification time.
+func (s StatT) Mtim() syscall.Timespec {
+ return s.mtim
+}
+
+// Stat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Stat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go
new file mode 100644
index 000000000..6c6397268
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go
@@ -0,0 +1,49 @@
+package system
+
+import (
+ "os"
+ "time"
+)
+
+// StatT type contains status of a file. It contains metadata
+// like permission, size, etc about a file.
+type StatT struct {
+ mode os.FileMode
+ size int64
+ mtim time.Time
+}
+
+// Size returns file's size.
+func (s StatT) Size() int64 {
+ return s.size
+}
+
+// Mode returns file's permission mode.
+func (s StatT) Mode() os.FileMode {
+ return os.FileMode(s.mode)
+}
+
+// Mtim returns file's last modification time.
+func (s StatT) Mtim() time.Time {
+ return time.Time(s.mtim)
+}
+
+// Stat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+ fi, err := os.Stat(path)
+ if err != nil {
+ return nil, err
+ }
+ return fromStatT(&fi)
+}
+
+// fromStatT converts a os.FileInfo type to a system.StatT type
+func fromStatT(fi *os.FileInfo) (*StatT, error) {
+ return &StatT{
+ size: (*fi).Size(),
+ mode: (*fi).Mode(),
+ mtim: (*fi).ModTime()}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
new file mode 100644
index 000000000..49dbdd378
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
@@ -0,0 +1,17 @@
+// +build linux freebsd
+
+package system
+
+import "golang.org/x/sys/unix"
+
+// Unmount is a platform-specific helper function to call
+// the unmount syscall.
+func Unmount(dest string) error {
+ return unix.Unmount(dest, 0)
+}
+
+// CommandLineToArgv should not be used on Unix.
+// It simply returns commandLine in the only element in the returned array.
+func CommandLineToArgv(commandLine string) ([]string, error) {
+ return []string{commandLine}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
new file mode 100644
index 000000000..23e9b207c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
@@ -0,0 +1,122 @@
+package system
+
+import (
+ "unsafe"
+
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/windows"
+)
+
+var (
+ ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0")
+ procGetVersionExW = modkernel32.NewProc("GetVersionExW")
+ procGetProductInfo = modkernel32.NewProc("GetProductInfo")
+)
+
+// OSVersion is a wrapper for Windows version information
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
+type OSVersion struct {
+ Version uint32
+ MajorVersion uint8
+ MinorVersion uint8
+ Build uint16
+}
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
+type osVersionInfoEx struct {
+ OSVersionInfoSize uint32
+ MajorVersion uint32
+ MinorVersion uint32
+ BuildNumber uint32
+ PlatformID uint32
+ CSDVersion [128]uint16
+ ServicePackMajor uint16
+ ServicePackMinor uint16
+ SuiteMask uint16
+ ProductType byte
+ Reserve byte
+}
+
+// GetOSVersion gets the operating system version on Windows. Note that
+// docker.exe must be manifested to get the correct version information.
+func GetOSVersion() OSVersion {
+ var err error
+ osv := OSVersion{}
+ osv.Version, err = windows.GetVersion()
+ if err != nil {
+ // GetVersion never fails.
+ panic(err)
+ }
+ osv.MajorVersion = uint8(osv.Version & 0xFF)
+ osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
+ osv.Build = uint16(osv.Version >> 16)
+ return osv
+}
+
+// IsWindowsClient returns true if the SKU is client
+// @engine maintainers - this function should not be removed or modified as it
+// is used to enforce licensing restrictions on Windows.
+func IsWindowsClient() bool {
+ osviex := &osVersionInfoEx{OSVersionInfoSize: 284}
+ r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex)))
+ if r1 == 0 {
+ logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err)
+ return false
+ }
+ const verNTWorkstation = 0x00000001
+ return osviex.ProductType == verNTWorkstation
+}
+
+// IsIoTCore returns true if the currently running image is based off of
+// Windows 10 IoT Core.
+// @engine maintainers - this function should not be removed or modified as it
+// is used to enforce licensing restrictions on Windows.
+func IsIoTCore() bool {
+ var returnedProductType uint32
+ r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType)))
+ if r1 == 0 {
+ logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err)
+ return false
+ }
+ const productIoTUAP = 0x0000007B
+ const productIoTUAPCommercial = 0x00000083
+ return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial
+}
+
+// Unmount is a platform-specific helper function to call
+// the unmount syscall. Not supported on Windows
+func Unmount(dest string) error {
+ return nil
+}
+
+// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array.
+func CommandLineToArgv(commandLine string) ([]string, error) {
+ var argc int32
+
+ argsPtr, err := windows.UTF16PtrFromString(commandLine)
+ if err != nil {
+ return nil, err
+ }
+
+ argv, err := windows.CommandLineToArgv(argsPtr, &argc)
+ if err != nil {
+ return nil, err
+ }
+ defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv))))
+
+ newArgs := make([]string, argc)
+ for i, v := range (*argv)[:argc] {
+ newArgs[i] = string(windows.UTF16ToString((*v)[:]))
+ }
+
+ return newArgs, nil
+}
+
+// HasWin32KSupport determines whether containers that depend on win32k can
+// run on this machine. Win32k is the driver used to implement windowing.
+func HasWin32KSupport() bool {
+ // For now, check for ntuser API support on the host. In the future, a host
+ // may support win32k in containers even if the host does not support ntuser
+ // APIs.
+ return ntuserApiset.Load() == nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go
new file mode 100644
index 000000000..5a10eda5a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/umask.go
@@ -0,0 +1,13 @@
+// +build !windows
+
+package system
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// Umask sets current process's file mode creation mask to newmask
+// and returns oldmask.
+func Umask(newmask int) (oldmask int, err error) {
+ return unix.Umask(newmask), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go
new file mode 100644
index 000000000..13f1de176
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/umask_windows.go
@@ -0,0 +1,9 @@
+// +build windows
+
+package system
+
+// Umask is not supported on the windows platform.
+func Umask(newmask int) (oldmask int, err error) {
+ // should not be called on cli code path
+ return 0, ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go
new file mode 100644
index 000000000..6a7752437
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go
@@ -0,0 +1,24 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// LUtimesNano is used to change access and modification time of the specified path.
+// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ var _path *byte
+ _path, err := unix.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go
new file mode 100644
index 000000000..edc588a63
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go
@@ -0,0 +1,25 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// LUtimesNano is used to change access and modification time of the specified path.
+// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ atFdCwd := unix.AT_FDCWD
+
+ var _path *byte
+ _path, err := unix.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
new file mode 100644
index 000000000..139714544
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
@@ -0,0 +1,10 @@
+// +build !linux,!freebsd
+
+package system
+
+import "syscall"
+
+// LUtimesNano is only supported on linux and freebsd.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
new file mode 100644
index 000000000..98b111be4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
@@ -0,0 +1,29 @@
+package system
+
+import "golang.org/x/sys/unix"
+
+// Lgetxattr retrieves the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+// It will returns a nil slice and nil error if the xattr is not set.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ dest := make([]byte, 128)
+ sz, errno := unix.Lgetxattr(path, attr, dest)
+ if errno == unix.ENODATA {
+ return nil, nil
+ }
+ if errno == unix.ERANGE {
+ dest = make([]byte, sz)
+ sz, errno = unix.Lgetxattr(path, attr, dest)
+ }
+ if errno != nil {
+ return nil, errno
+ }
+
+ return dest[:sz], nil
+}
+
+// Lsetxattr sets the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ return unix.Lsetxattr(path, attr, data, flags)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
new file mode 100644
index 000000000..0114f2227
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
@@ -0,0 +1,13 @@
+// +build !linux
+
+package system
+
+// Lgetxattr is not supported on platforms other than linux.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ return nil, ErrNotSupportedPlatform
+}
+
+// Lsetxattr is not supported on platforms other than linux.
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go
new file mode 100644
index 000000000..f5262bccf
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/ascii.go
@@ -0,0 +1,66 @@
+package term
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ASCII list the possible supported ASCII key sequence
+var ASCII = []string{
+ "ctrl-@",
+ "ctrl-a",
+ "ctrl-b",
+ "ctrl-c",
+ "ctrl-d",
+ "ctrl-e",
+ "ctrl-f",
+ "ctrl-g",
+ "ctrl-h",
+ "ctrl-i",
+ "ctrl-j",
+ "ctrl-k",
+ "ctrl-l",
+ "ctrl-m",
+ "ctrl-n",
+ "ctrl-o",
+ "ctrl-p",
+ "ctrl-q",
+ "ctrl-r",
+ "ctrl-s",
+ "ctrl-t",
+ "ctrl-u",
+ "ctrl-v",
+ "ctrl-w",
+ "ctrl-x",
+ "ctrl-y",
+ "ctrl-z",
+ "ctrl-[",
+ "ctrl-\\",
+ "ctrl-]",
+ "ctrl-^",
+ "ctrl-_",
+}
+
+// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code.
+func ToBytes(keys string) ([]byte, error) {
+ codes := []byte{}
+next:
+ for _, key := range strings.Split(keys, ",") {
+ if len(key) != 1 {
+ for code, ctrl := range ASCII {
+ if ctrl == key {
+ codes = append(codes, byte(code))
+ continue next
+ }
+ }
+ if key == "DEL" {
+ codes = append(codes, 127)
+ } else {
+ return nil, fmt.Errorf("Unknown character: '%s'", key)
+ }
+ } else {
+ codes = append(codes, byte(key[0]))
+ }
+ }
+ return codes, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/proxy.go b/vendor/github.com/docker/docker/pkg/term/proxy.go
new file mode 100644
index 000000000..e648eb812
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/proxy.go
@@ -0,0 +1,74 @@
+package term
+
+import (
+ "io"
+)
+
+// EscapeError is special error which returned by a TTY proxy reader's Read()
+// method in case its detach escape sequence is read.
+type EscapeError struct{}
+
+func (EscapeError) Error() string {
+ return "read escape sequence"
+}
+
+// escapeProxy is used only for attaches with a TTY. It is used to proxy
+// stdin keypresses from the underlying reader and look for the passed in
+// escape key sequence to signal a detach.
+type escapeProxy struct {
+ escapeKeys []byte
+ escapeKeyPos int
+ r io.Reader
+}
+
+// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader
+// and detects when the specified escape keys are read, in which case the Read
+// method will return an error of type EscapeError.
+func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader {
+ return &escapeProxy{
+ escapeKeys: escapeKeys,
+ r: r,
+ }
+}
+
+func (r *escapeProxy) Read(buf []byte) (int, error) {
+ nr, err := r.r.Read(buf)
+
+ preserve := func() {
+ // this preserves the original key presses in the passed in buffer
+ nr += r.escapeKeyPos
+ preserve := make([]byte, 0, r.escapeKeyPos+len(buf))
+ preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...)
+ preserve = append(preserve, buf...)
+ r.escapeKeyPos = 0
+ copy(buf[0:nr], preserve)
+ }
+
+ if nr != 1 || err != nil {
+ if r.escapeKeyPos > 0 {
+ preserve()
+ }
+ return nr, err
+ }
+
+ if buf[0] != r.escapeKeys[r.escapeKeyPos] {
+ if r.escapeKeyPos > 0 {
+ preserve()
+ }
+ return nr, nil
+ }
+
+ if r.escapeKeyPos == len(r.escapeKeys)-1 {
+ return 0, EscapeError{}
+ }
+
+ // Looks like we've got an escape key, but we need to match again on the next
+ // read.
+ // Store the current escape key we found so we can look for the next one on
+ // the next read.
+ // Since this is an escape key, make sure we don't let the caller read it
+ // If later on we find that this is not the escape sequence, we'll add the
+ // keys back
+ r.escapeKeyPos++
+ return nr - r.escapeKeyPos, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/tc.go b/vendor/github.com/docker/docker/pkg/term/tc.go
new file mode 100644
index 000000000..6d2dfd3a8
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/tc.go
@@ -0,0 +1,21 @@
+// +build !windows
+// +build !solaris !cgo
+
+package term
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+func tcget(fd uintptr, p *Termios) syscall.Errno {
+ _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p)))
+ return err
+}
+
+func tcset(fd uintptr, p *Termios) syscall.Errno {
+ _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p)))
+ return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go
new file mode 100644
index 000000000..50234affc
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go
@@ -0,0 +1,65 @@
+// +build solaris,cgo
+
+package term
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// #include <termios.h>
+import "C"
+
+// Termios is the Unix API for terminal I/O.
+// It is passthrough for unix.Termios in order to make it portable with
+// other platforms where it is not available or handled differently.
+type Termios unix.Termios
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+ var oldState State
+ if err := tcget(fd, &oldState.termios); err != 0 {
+ return nil, err
+ }
+
+ newState := oldState.termios
+
+ newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON | unix.IXANY)
+ newState.Oflag &^= unix.OPOST
+ newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
+ newState.Cflag &^= (unix.CSIZE | unix.PARENB)
+ newState.Cflag |= unix.CS8
+
+ /*
+ VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned
+ Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It
+ needs to be explicitly set to 1.
+ */
+ newState.Cc[C.VMIN] = 1
+ newState.Cc[C.VTIME] = 0
+
+ if err := tcset(fd, &newState); err != 0 {
+ return nil, err
+ }
+ return &oldState, nil
+}
+
+func tcget(fd uintptr, p *Termios) syscall.Errno {
+ ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p)))
+ if ret != 0 {
+ return err.(syscall.Errno)
+ }
+ return 0
+}
+
+func tcset(fd uintptr, p *Termios) syscall.Errno {
+ ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p)))
+ if ret != 0 {
+ return err.(syscall.Errno)
+ }
+ return 0
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go
new file mode 100644
index 000000000..4f59d8d93
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/term.go
@@ -0,0 +1,124 @@
+// +build !windows
+
+// Package term provides structures and helper functions to work with
+// terminal (state, sizes).
+package term
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "os/signal"
+
+ "golang.org/x/sys/unix"
+)
+
+var (
+ // ErrInvalidState is returned if the state of the terminal is invalid.
+ ErrInvalidState = errors.New("Invalid terminal state")
+)
+
+// State represents the state of the terminal.
+type State struct {
+ termios Termios
+}
+
+// Winsize represents the size of the terminal window.
+type Winsize struct {
+ Height uint16
+ Width uint16
+ x uint16
+ y uint16
+}
+
+// StdStreams returns the standard streams (stdin, stdout, stderr).
+func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
+ return os.Stdin, os.Stdout, os.Stderr
+}
+
+// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
+func GetFdInfo(in interface{}) (uintptr, bool) {
+ var inFd uintptr
+ var isTerminalIn bool
+ if file, ok := in.(*os.File); ok {
+ inFd = file.Fd()
+ isTerminalIn = IsTerminal(inFd)
+ }
+ return inFd, isTerminalIn
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd uintptr) bool {
+ var termios Termios
+ return tcget(fd, &termios) == 0
+}
+
+// RestoreTerminal restores the terminal connected to the given file descriptor
+// to a previous state.
+func RestoreTerminal(fd uintptr, state *State) error {
+ if state == nil {
+ return ErrInvalidState
+ }
+ if err := tcset(fd, &state.termios); err != 0 {
+ return err
+ }
+ return nil
+}
+
+// SaveState saves the state of the terminal connected to the given file descriptor.
+func SaveState(fd uintptr) (*State, error) {
+ var oldState State
+ if err := tcget(fd, &oldState.termios); err != 0 {
+ return nil, err
+ }
+
+ return &oldState, nil
+}
+
+// DisableEcho applies the specified state to the terminal connected to the file
+// descriptor, with echo disabled.
+func DisableEcho(fd uintptr, state *State) error {
+ newState := state.termios
+ newState.Lflag &^= unix.ECHO
+
+ if err := tcset(fd, &newState); err != 0 {
+ return err
+ }
+ handleInterrupt(fd, state)
+ return nil
+}
+
+// SetRawTerminal puts the terminal connected to the given file descriptor into
+// raw mode and returns the previous state. On UNIX, this puts both the input
+// and output into raw mode. On Windows, it only puts the input into raw mode.
+func SetRawTerminal(fd uintptr) (*State, error) {
+ oldState, err := MakeRaw(fd)
+ if err != nil {
+ return nil, err
+ }
+ handleInterrupt(fd, oldState)
+ return oldState, err
+}
+
+// SetRawTerminalOutput puts the output of terminal connected to the given file
+// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
+// state. On Windows, it disables LF -> CRLF translation.
+func SetRawTerminalOutput(fd uintptr) (*State, error) {
+ return nil, nil
+}
+
+func handleInterrupt(fd uintptr, state *State) {
+ sigchan := make(chan os.Signal, 1)
+ signal.Notify(sigchan, os.Interrupt)
+ go func() {
+ for range sigchan {
+ // quit cleanly and the new terminal item is on a new line
+ fmt.Println()
+ signal.Stop(sigchan)
+ close(sigchan)
+ RestoreTerminal(fd, state)
+ os.Exit(1)
+ }
+ }()
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go
new file mode 100644
index 000000000..c0332c3cd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go
@@ -0,0 +1,237 @@
+// +build windows
+
+package term
+
+import (
+ "io"
+ "os"
+ "os/signal"
+ "syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE
+
+ "github.com/Azure/go-ansiterm/winterm"
+ "github.com/docker/docker/pkg/term/windows"
+)
+
+// State holds the console mode for the terminal.
+type State struct {
+ mode uint32
+}
+
+// Winsize is used for window size.
+type Winsize struct {
+ Height uint16
+ Width uint16
+}
+
+const (
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
+ enableVirtualTerminalInput = 0x0200
+ enableVirtualTerminalProcessing = 0x0004
+ disableNewlineAutoReturn = 0x0008
+)
+
+// vtInputSupported is true if enableVirtualTerminalInput is supported by the console
+var vtInputSupported bool
+
+// StdStreams returns the standard streams (stdin, stdout, stderr).
+func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
+ // Turn on VT handling on all std handles, if possible. This might
+ // fail, in which case we will fall back to terminal emulation.
+ var emulateStdin, emulateStdout, emulateStderr bool
+ fd := os.Stdin.Fd()
+ if mode, err := winterm.GetConsoleMode(fd); err == nil {
+ // Validate that enableVirtualTerminalInput is supported, but do not set it.
+ if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil {
+ emulateStdin = true
+ } else {
+ vtInputSupported = true
+ }
+ // Unconditionally set the console mode back even on failure because SetConsoleMode
+ // remembers invalid bits on input handles.
+ winterm.SetConsoleMode(fd, mode)
+ }
+
+ fd = os.Stdout.Fd()
+ if mode, err := winterm.GetConsoleMode(fd); err == nil {
+ // Validate disableNewlineAutoReturn is supported, but do not set it.
+ if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
+ emulateStdout = true
+ } else {
+ winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
+ }
+ }
+
+ fd = os.Stderr.Fd()
+ if mode, err := winterm.GetConsoleMode(fd); err == nil {
+ // Validate disableNewlineAutoReturn is supported, but do not set it.
+ if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
+ emulateStderr = true
+ } else {
+ winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
+ }
+ }
+
+ if os.Getenv("ConEmuANSI") == "ON" || os.Getenv("ConsoleZVersion") != "" {
+ // The ConEmu and ConsoleZ terminals emulate ANSI on output streams well.
+ emulateStdin = true
+ emulateStdout = false
+ emulateStderr = false
+ }
+
+ // Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and
+ // STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as
+ // go-ansiterm hasn't switch to x/sys/windows.
+ // TODO: switch back to x/sys/windows once go-ansiterm has switched
+ if emulateStdin {
+ stdIn = windowsconsole.NewAnsiReader(syscall.STD_INPUT_HANDLE)
+ } else {
+ stdIn = os.Stdin
+ }
+
+ if emulateStdout {
+ stdOut = windowsconsole.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE)
+ } else {
+ stdOut = os.Stdout
+ }
+
+ if emulateStderr {
+ stdErr = windowsconsole.NewAnsiWriter(syscall.STD_ERROR_HANDLE)
+ } else {
+ stdErr = os.Stderr
+ }
+
+ return
+}
+
+// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
+func GetFdInfo(in interface{}) (uintptr, bool) {
+ return windowsconsole.GetHandleInfo(in)
+}
+
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
+ info, err := winterm.GetConsoleScreenBufferInfo(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ winsize := &Winsize{
+ Width: uint16(info.Window.Right - info.Window.Left + 1),
+ Height: uint16(info.Window.Bottom - info.Window.Top + 1),
+ }
+
+ return winsize, nil
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd uintptr) bool {
+ return windowsconsole.IsConsole(fd)
+}
+
+// RestoreTerminal restores the terminal connected to the given file descriptor
+// to a previous state.
+func RestoreTerminal(fd uintptr, state *State) error {
+ return winterm.SetConsoleMode(fd, state.mode)
+}
+
+// SaveState saves the state of the terminal connected to the given file descriptor.
+func SaveState(fd uintptr) (*State, error) {
+ mode, e := winterm.GetConsoleMode(fd)
+ if e != nil {
+ return nil, e
+ }
+
+ return &State{mode: mode}, nil
+}
+
+// DisableEcho disables echo for the terminal connected to the given file descriptor.
+// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
+func DisableEcho(fd uintptr, state *State) error {
+ mode := state.mode
+ mode &^= winterm.ENABLE_ECHO_INPUT
+ mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT
+ err := winterm.SetConsoleMode(fd, mode)
+ if err != nil {
+ return err
+ }
+
+ // Register an interrupt handler to catch and restore prior state
+ restoreAtInterrupt(fd, state)
+ return nil
+}
+
+// SetRawTerminal puts the terminal connected to the given file descriptor into
+// raw mode and returns the previous state. On UNIX, this puts both the input
+// and output into raw mode. On Windows, it only puts the input into raw mode.
+func SetRawTerminal(fd uintptr) (*State, error) {
+ state, err := MakeRaw(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ // Register an interrupt handler to catch and restore prior state
+ restoreAtInterrupt(fd, state)
+ return state, err
+}
+
+// SetRawTerminalOutput puts the output of terminal connected to the given file
+// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
+// state. On Windows, it disables LF -> CRLF translation.
+func SetRawTerminalOutput(fd uintptr) (*State, error) {
+ state, err := SaveState(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ // Ignore failures, since disableNewlineAutoReturn might not be supported on this
+ // version of Windows.
+ winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn)
+ return state, err
+}
+
+// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be restored.
+func MakeRaw(fd uintptr) (*State, error) {
+ state, err := SaveState(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ mode := state.mode
+
+ // See
+ // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
+ // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
+
+ // Disable these modes
+ mode &^= winterm.ENABLE_ECHO_INPUT
+ mode &^= winterm.ENABLE_LINE_INPUT
+ mode &^= winterm.ENABLE_MOUSE_INPUT
+ mode &^= winterm.ENABLE_WINDOW_INPUT
+ mode &^= winterm.ENABLE_PROCESSED_INPUT
+
+ // Enable these modes
+ mode |= winterm.ENABLE_EXTENDED_FLAGS
+ mode |= winterm.ENABLE_INSERT_MODE
+ mode |= winterm.ENABLE_QUICK_EDIT_MODE
+ if vtInputSupported {
+ mode |= enableVirtualTerminalInput
+ }
+
+ err = winterm.SetConsoleMode(fd, mode)
+ if err != nil {
+ return nil, err
+ }
+ return state, nil
+}
+
+func restoreAtInterrupt(fd uintptr, state *State) {
+ sigchan := make(chan os.Signal, 1)
+ signal.Notify(sigchan, os.Interrupt)
+
+ go func() {
+ _ = <-sigchan
+ RestoreTerminal(fd, state)
+ os.Exit(0)
+ }()
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/termios_bsd.go b/vendor/github.com/docker/docker/pkg/term/termios_bsd.go
new file mode 100644
index 000000000..c47341e87
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/termios_bsd.go
@@ -0,0 +1,42 @@
+// +build darwin freebsd openbsd
+
+package term
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ getTermios = unix.TIOCGETA
+ setTermios = unix.TIOCSETA
+)
+
+// Termios is the Unix API for terminal I/O.
+type Termios unix.Termios
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+ var oldState State
+ if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
+ return nil, err
+ }
+
+ newState := oldState.termios
+ newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)
+ newState.Oflag &^= unix.OPOST
+ newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
+ newState.Cflag &^= (unix.CSIZE | unix.PARENB)
+ newState.Cflag |= unix.CS8
+ newState.Cc[unix.VMIN] = 1
+ newState.Cc[unix.VTIME] = 0
+
+ if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 {
+ return nil, err
+ }
+
+ return &oldState, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/termios_linux.go b/vendor/github.com/docker/docker/pkg/term/termios_linux.go
new file mode 100644
index 000000000..3e25eb7a4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/termios_linux.go
@@ -0,0 +1,37 @@
+package term
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+const (
+ getTermios = unix.TCGETS
+ setTermios = unix.TCSETS
+)
+
+// Termios is the Unix API for terminal I/O.
+type Termios unix.Termios
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+ termios, err := unix.IoctlGetTermios(int(fd), getTermios)
+ if err != nil {
+ return nil, err
+ }
+
+ var oldState State
+ oldState.termios = Termios(*termios)
+
+ termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)
+ termios.Oflag &^= unix.OPOST
+ termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
+ termios.Cflag &^= (unix.CSIZE | unix.PARENB)
+ termios.Cflag |= unix.CS8
+
+ if err := unix.IoctlSetTermios(int(fd), setTermios, termios); err != nil {
+ return nil, err
+ }
+ return &oldState, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go
new file mode 100644
index 000000000..29d396318
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go
@@ -0,0 +1,263 @@
+// +build windows
+
+package windowsconsole
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "unsafe"
+
+ ansiterm "github.com/Azure/go-ansiterm"
+ "github.com/Azure/go-ansiterm/winterm"
+)
+
+const (
+ escapeSequence = ansiterm.KEY_ESC_CSI
+)
+
+// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation.
+type ansiReader struct {
+ file *os.File
+ fd uintptr
+ buffer []byte
+ cbBuffer int
+ command []byte
+}
+
+// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a
+// Windows console input handle.
+func NewAnsiReader(nFile int) io.ReadCloser {
+ initLogger()
+ file, fd := winterm.GetStdFile(nFile)
+ return &ansiReader{
+ file: file,
+ fd: fd,
+ command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH),
+ buffer: make([]byte, 0),
+ }
+}
+
+// Close closes the wrapped file.
+func (ar *ansiReader) Close() (err error) {
+ return ar.file.Close()
+}
+
+// Fd returns the file descriptor of the wrapped file.
+func (ar *ansiReader) Fd() uintptr {
+ return ar.fd
+}
+
+// Read reads up to len(p) bytes of translated input events into p.
+func (ar *ansiReader) Read(p []byte) (int, error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+
+ // Previously read bytes exist, read as much as we can and return
+ if len(ar.buffer) > 0 {
+ logger.Debugf("Reading previously cached bytes")
+
+ originalLength := len(ar.buffer)
+ copiedLength := copy(p, ar.buffer)
+
+ if copiedLength == originalLength {
+ ar.buffer = make([]byte, 0, len(p))
+ } else {
+ ar.buffer = ar.buffer[copiedLength:]
+ }
+
+ logger.Debugf("Read from cache p[%d]: % x", copiedLength, p)
+ return copiedLength, nil
+ }
+
+ // Read and translate key events
+ events, err := readInputEvents(ar.fd, len(p))
+ if err != nil {
+ return 0, err
+ } else if len(events) == 0 {
+ logger.Debug("No input events detected")
+ return 0, nil
+ }
+
+ keyBytes := translateKeyEvents(events, []byte(escapeSequence))
+
+ // Save excess bytes and right-size keyBytes
+ if len(keyBytes) > len(p) {
+ logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p))
+ ar.buffer = keyBytes[len(p):]
+ keyBytes = keyBytes[:len(p)]
+ } else if len(keyBytes) == 0 {
+ logger.Debug("No key bytes returned from the translator")
+ return 0, nil
+ }
+
+ copiedLength := copy(p, keyBytes)
+ if copiedLength != len(keyBytes) {
+ return 0, errors.New("unexpected copy length encountered")
+ }
+
+ logger.Debugf("Read p[%d]: % x", copiedLength, p)
+ logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes)
+ return copiedLength, nil
+}
+
+// readInputEvents polls until at least one event is available.
+func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) {
+ // Determine the maximum number of records to retrieve
+ // -- Cast around the type system to obtain the size of a single INPUT_RECORD.
+ // unsafe.Sizeof requires an expression vs. a type-reference; the casting
+ // tricks the type system into believing it has such an expression.
+ recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes)))))
+ countRecords := maxBytes / recordSize
+ if countRecords > ansiterm.MAX_INPUT_EVENTS {
+ countRecords = ansiterm.MAX_INPUT_EVENTS
+ } else if countRecords == 0 {
+ countRecords = 1
+ }
+ logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize)
+
+ // Wait for and read input events
+ events := make([]winterm.INPUT_RECORD, countRecords)
+ nEvents := uint32(0)
+ eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE)
+ if err != nil {
+ return nil, err
+ }
+
+ if eventsExist {
+ err = winterm.ReadConsoleInput(fd, events, &nEvents)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Return a slice restricted to the number of returned records
+ logger.Debugf("[windows] readInputEvents: Read %v events", nEvents)
+ return events[:nEvents], nil
+}
+
+// KeyEvent Translation Helpers
+
+var arrowKeyMapPrefix = map[uint16]string{
+ winterm.VK_UP: "%s%sA",
+ winterm.VK_DOWN: "%s%sB",
+ winterm.VK_RIGHT: "%s%sC",
+ winterm.VK_LEFT: "%s%sD",
+}
+
+var keyMapPrefix = map[uint16]string{
+ winterm.VK_UP: "\x1B[%sA",
+ winterm.VK_DOWN: "\x1B[%sB",
+ winterm.VK_RIGHT: "\x1B[%sC",
+ winterm.VK_LEFT: "\x1B[%sD",
+ winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1
+ winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4
+ winterm.VK_INSERT: "\x1B[2%s~",
+ winterm.VK_DELETE: "\x1B[3%s~",
+ winterm.VK_PRIOR: "\x1B[5%s~",
+ winterm.VK_NEXT: "\x1B[6%s~",
+ winterm.VK_F1: "",
+ winterm.VK_F2: "",
+ winterm.VK_F3: "\x1B[13%s~",
+ winterm.VK_F4: "\x1B[14%s~",
+ winterm.VK_F5: "\x1B[15%s~",
+ winterm.VK_F6: "\x1B[17%s~",
+ winterm.VK_F7: "\x1B[18%s~",
+ winterm.VK_F8: "\x1B[19%s~",
+ winterm.VK_F9: "\x1B[20%s~",
+ winterm.VK_F10: "\x1B[21%s~",
+ winterm.VK_F11: "\x1B[23%s~",
+ winterm.VK_F12: "\x1B[24%s~",
+}
+
+// translateKeyEvents converts the input events into the appropriate ANSI string.
+func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte {
+ var buffer bytes.Buffer
+ for _, event := range events {
+ if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 {
+ buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence))
+ }
+ }
+
+ return buffer.Bytes()
+}
+
+// keyToString maps the given input event record to the corresponding string.
+func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string {
+ if keyEvent.UnicodeChar == 0 {
+ return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence)
+ }
+
+ _, alt, control := getControlKeys(keyEvent.ControlKeyState)
+ if control {
+ // TODO(azlinux): Implement following control sequences
+ // <Ctrl>-D Signals the end of input from the keyboard; also exits current shell.
+ // <Ctrl>-H Deletes the first character to the left of the cursor. Also called the ERASE key.
+ // <Ctrl>-Q Restarts printing after it has been stopped with <Ctrl>-s.
+ // <Ctrl>-S Suspends printing on the screen (does not stop the program).
+ // <Ctrl>-U Deletes all characters on the current line. Also called the KILL key.
+ // <Ctrl>-E Quits current command and creates a core
+
+ }
+
+ // <Alt>+Key generates ESC N Key
+ if !control && alt {
+ return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar))
+ }
+
+ return string(keyEvent.UnicodeChar)
+}
+
+// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string.
+func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string {
+ shift, alt, control := getControlKeys(controlState)
+ modifier := getControlKeysModifier(shift, alt, control)
+
+ if format, ok := arrowKeyMapPrefix[key]; ok {
+ return fmt.Sprintf(format, escapeSequence, modifier)
+ }
+
+ if format, ok := keyMapPrefix[key]; ok {
+ return fmt.Sprintf(format, modifier)
+ }
+
+ return ""
+}
+
+// getControlKeys extracts the shift, alt, and ctrl key states.
+func getControlKeys(controlState uint32) (shift, alt, control bool) {
+ shift = 0 != (controlState & winterm.SHIFT_PRESSED)
+ alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED))
+ control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED))
+ return shift, alt, control
+}
+
+// getControlKeysModifier returns the ANSI modifier for the given combination of control keys.
+func getControlKeysModifier(shift, alt, control bool) string {
+ if shift && alt && control {
+ return ansiterm.KEY_CONTROL_PARAM_8
+ }
+ if alt && control {
+ return ansiterm.KEY_CONTROL_PARAM_7
+ }
+ if shift && control {
+ return ansiterm.KEY_CONTROL_PARAM_6
+ }
+ if control {
+ return ansiterm.KEY_CONTROL_PARAM_5
+ }
+ if shift && alt {
+ return ansiterm.KEY_CONTROL_PARAM_4
+ }
+ if alt {
+ return ansiterm.KEY_CONTROL_PARAM_3
+ }
+ if shift {
+ return ansiterm.KEY_CONTROL_PARAM_2
+ }
+ return ""
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go
new file mode 100644
index 000000000..256577e1f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go
@@ -0,0 +1,64 @@
+// +build windows
+
+package windowsconsole
+
+import (
+ "io"
+ "os"
+
+ ansiterm "github.com/Azure/go-ansiterm"
+ "github.com/Azure/go-ansiterm/winterm"
+)
+
+// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation.
+type ansiWriter struct {
+ file *os.File
+ fd uintptr
+ infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO
+ command []byte
+ escapeSequence []byte
+ inAnsiSequence bool
+ parser *ansiterm.AnsiParser
+}
+
+// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a
+// Windows console output handle.
+func NewAnsiWriter(nFile int) io.Writer {
+ initLogger()
+ file, fd := winterm.GetStdFile(nFile)
+ info, err := winterm.GetConsoleScreenBufferInfo(fd)
+ if err != nil {
+ return nil
+ }
+
+ parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file))
+ logger.Infof("newAnsiWriter: parser %p", parser)
+
+ aw := &ansiWriter{
+ file: file,
+ fd: fd,
+ infoReset: info,
+ command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH),
+ escapeSequence: []byte(ansiterm.KEY_ESC_CSI),
+ parser: parser,
+ }
+
+ logger.Infof("newAnsiWriter: aw.parser %p", aw.parser)
+ logger.Infof("newAnsiWriter: %v", aw)
+ return aw
+}
+
+func (aw *ansiWriter) Fd() uintptr {
+ return aw.fd
+}
+
+// Write writes len(p) bytes from p to the underlying data stream.
+func (aw *ansiWriter) Write(p []byte) (total int, err error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+
+ logger.Infof("Write: % x", p)
+ logger.Infof("Write: %s", string(p))
+ return aw.parser.Parse(p)
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/console.go b/vendor/github.com/docker/docker/pkg/term/windows/console.go
new file mode 100644
index 000000000..4bad32ea7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/console.go
@@ -0,0 +1,35 @@
+// +build windows
+
+package windowsconsole
+
+import (
+ "os"
+
+ "github.com/Azure/go-ansiterm/winterm"
+)
+
+// GetHandleInfo returns file descriptor and bool indicating whether the file is a console.
+func GetHandleInfo(in interface{}) (uintptr, bool) {
+ switch t := in.(type) {
+ case *ansiReader:
+ return t.Fd(), true
+ case *ansiWriter:
+ return t.Fd(), true
+ }
+
+ var inFd uintptr
+ var isTerminal bool
+
+ if file, ok := in.(*os.File); ok {
+ inFd = file.Fd()
+ isTerminal = IsConsole(inFd)
+ }
+ return inFd, isTerminal
+}
+
+// IsConsole returns true if the given file descriptor is a Windows Console.
+// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console.
+func IsConsole(fd uintptr) bool {
+ _, e := winterm.GetConsoleMode(fd)
+ return e == nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go
new file mode 100644
index 000000000..c02a93a03
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/windows.go
@@ -0,0 +1,33 @@
+// These files implement ANSI-aware input and output streams for use by the Docker Windows client.
+// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create
+// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls.
+
+package windowsconsole
+
+import (
+ "io/ioutil"
+ "os"
+ "sync"
+
+ ansiterm "github.com/Azure/go-ansiterm"
+ "github.com/sirupsen/logrus"
+)
+
+var logger *logrus.Logger
+var initOnce sync.Once
+
+func initLogger() {
+ initOnce.Do(func() {
+ logFile := ioutil.Discard
+
+ if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
+ logFile, _ = os.Create("ansiReaderWriter.log")
+ }
+
+ logger = &logrus.Logger{
+ Out: logFile,
+ Formatter: new(logrus.TextFormatter),
+ Level: logrus.DebugLevel,
+ }
+ })
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/winsize.go b/vendor/github.com/docker/docker/pkg/term/winsize.go
new file mode 100644
index 000000000..f58367fe6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/winsize.go
@@ -0,0 +1,30 @@
+// +build !solaris,!windows
+
+package term
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
+ ws := &Winsize{}
+ _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TIOCGWINSZ), uintptr(unsafe.Pointer(ws)))
+ // Skipp errno = 0
+ if err == 0 {
+ return ws, nil
+ }
+ return ws, err
+}
+
+// SetWinsize tries to set the specified window size for the specified file descriptor.
+func SetWinsize(fd uintptr, ws *Winsize) error {
+ _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TIOCSWINSZ), uintptr(unsafe.Pointer(ws)))
+ // Skipp errno = 0
+ if err == 0 {
+ return nil
+ }
+ return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/winsize_solaris_cgo.go b/vendor/github.com/docker/docker/pkg/term/winsize_solaris_cgo.go
new file mode 100644
index 000000000..39c1d3207
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/winsize_solaris_cgo.go
@@ -0,0 +1,42 @@
+// +build solaris,cgo
+
+package term
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+/*
+#include <unistd.h>
+#include <stropts.h>
+#include <termios.h>
+
+// Small wrapper to get rid of variadic args of ioctl()
+int my_ioctl(int fd, int cmd, struct winsize *ws) {
+ return ioctl(fd, cmd, ws);
+}
+*/
+import "C"
+
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
+ ws := &Winsize{}
+ ret, err := C.my_ioctl(C.int(fd), C.int(unix.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
+ // Skip retval = 0
+ if ret == 0 {
+ return ws, nil
+ }
+ return ws, err
+}
+
+// SetWinsize tries to set the specified window size for the specified file descriptor.
+func SetWinsize(fd uintptr, ws *Winsize) error {
+ ret, err := C.my_ioctl(C.int(fd), C.int(unix.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
+ // Skip retval = 0
+ if ret == 0 {
+ return nil
+ }
+ return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go
new file mode 100644
index 000000000..e4dec3a5d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go
@@ -0,0 +1,11 @@
+// +build go1.8
+
+package tlsconfig
+
+import "crypto/tls"
+
+// Clone returns a clone of tls.Config. This function is provided for
+// compatibility for go1.7 that doesn't include this method in stdlib.
+func Clone(c *tls.Config) *tls.Config {
+ return c.Clone()
+}
diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go
new file mode 100644
index 000000000..0d5b448fe
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go
@@ -0,0 +1,33 @@
+// +build go1.7,!go1.8
+
+package tlsconfig
+
+import "crypto/tls"
+
+// Clone returns a clone of tls.Config. This function is provided for
+// compatibility for go1.7 that doesn't include this method in stdlib.
+func Clone(c *tls.Config) *tls.Config {
+ return &tls.Config{
+ Rand: c.Rand,
+ Time: c.Time,
+ Certificates: c.Certificates,
+ NameToCertificate: c.NameToCertificate,
+ GetCertificate: c.GetCertificate,
+ RootCAs: c.RootCAs,
+ NextProtos: c.NextProtos,
+ ServerName: c.ServerName,
+ ClientAuth: c.ClientAuth,
+ ClientCAs: c.ClientCAs,
+ InsecureSkipVerify: c.InsecureSkipVerify,
+ CipherSuites: c.CipherSuites,
+ PreferServerCipherSuites: c.PreferServerCipherSuites,
+ SessionTicketsDisabled: c.SessionTicketsDisabled,
+ SessionTicketKey: c.SessionTicketKey,
+ ClientSessionCache: c.ClientSessionCache,
+ MinVersion: c.MinVersion,
+ MaxVersion: c.MaxVersion,
+ CurvePreferences: c.CurvePreferences,
+ DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
+ Renegotiation: c.Renegotiation,
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go
new file mode 100644
index 000000000..74776e65e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go
@@ -0,0 +1,139 @@
+// Package truncindex provides a general 'index tree', used by Docker
+// in order to be able to reference containers by only a few unambiguous
+// characters of their id.
+package truncindex
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/tchap/go-patricia/patricia"
+)
+
+var (
+ // ErrEmptyPrefix is an error returned if the prefix was empty.
+ ErrEmptyPrefix = errors.New("Prefix can't be empty")
+
+ // ErrIllegalChar is returned when a space is in the ID
+ ErrIllegalChar = errors.New("illegal character: ' '")
+
+ // ErrNotExist is returned when ID or its prefix not found in index.
+ ErrNotExist = errors.New("ID does not exist")
+)
+
+// ErrAmbiguousPrefix is returned if the prefix was ambiguous
+// (multiple ids for the prefix).
+type ErrAmbiguousPrefix struct {
+ prefix string
+}
+
+func (e ErrAmbiguousPrefix) Error() string {
+ return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix)
+}
+
+// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes.
+// This is used to retrieve image and container IDs by more convenient shorthand prefixes.
+type TruncIndex struct {
+ sync.RWMutex
+ trie *patricia.Trie
+ ids map[string]struct{}
+}
+
+// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs.
+func NewTruncIndex(ids []string) (idx *TruncIndex) {
+ idx = &TruncIndex{
+ ids: make(map[string]struct{}),
+
+ // Change patricia max prefix per node length,
+ // because our len(ID) always 64
+ trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)),
+ }
+ for _, id := range ids {
+ idx.addID(id)
+ }
+ return
+}
+
+func (idx *TruncIndex) addID(id string) error {
+ if strings.Contains(id, " ") {
+ return ErrIllegalChar
+ }
+ if id == "" {
+ return ErrEmptyPrefix
+ }
+ if _, exists := idx.ids[id]; exists {
+ return fmt.Errorf("id already exists: '%s'", id)
+ }
+ idx.ids[id] = struct{}{}
+ if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted {
+ return fmt.Errorf("failed to insert id: %s", id)
+ }
+ return nil
+}
+
+// Add adds a new ID to the TruncIndex.
+func (idx *TruncIndex) Add(id string) error {
+ idx.Lock()
+ defer idx.Unlock()
+ return idx.addID(id)
+}
+
+// Delete removes an ID from the TruncIndex. If there are multiple IDs
+// with the given prefix, an error is thrown.
+func (idx *TruncIndex) Delete(id string) error {
+ idx.Lock()
+ defer idx.Unlock()
+ if _, exists := idx.ids[id]; !exists || id == "" {
+ return fmt.Errorf("no such id: '%s'", id)
+ }
+ delete(idx.ids, id)
+ if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted {
+ return fmt.Errorf("no such id: '%s'", id)
+ }
+ return nil
+}
+
+// Get retrieves an ID from the TruncIndex. If there are multiple IDs
+// with the given prefix, an error is thrown.
+func (idx *TruncIndex) Get(s string) (string, error) {
+ if s == "" {
+ return "", ErrEmptyPrefix
+ }
+ var (
+ id string
+ )
+ subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error {
+ if id != "" {
+ // we haven't found the ID if there are two or more IDs
+ id = ""
+ return ErrAmbiguousPrefix{prefix: string(prefix)}
+ }
+ id = string(prefix)
+ return nil
+ }
+
+ idx.RLock()
+ defer idx.RUnlock()
+ if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil {
+ return "", err
+ }
+ if id != "" {
+ return id, nil
+ }
+ return "", ErrNotExist
+}
+
+// Iterate iterates over all stored IDs and passes each of them to the given
+// handler. Take care that the handler method does not call any public
+// method on truncindex as the internal locking is not reentrant/recursive
+// and will result in deadlock.
+func (idx *TruncIndex) Iterate(handler func(id string)) {
+ idx.Lock()
+ defer idx.Unlock()
+ idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error {
+ handler(string(prefix))
+ return nil
+ })
+}
diff --git a/vendor/github.com/docker/docker/vendor.conf b/vendor/github.com/docker/docker/vendor.conf
new file mode 100644
index 000000000..7608b0e33
--- /dev/null
+++ b/vendor/github.com/docker/docker/vendor.conf
@@ -0,0 +1,147 @@
+# the following lines are in sorted order, FYI
+github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
+github.com/Microsoft/hcsshim v0.6.2
+github.com/Microsoft/go-winio v0.4.4
+github.com/moby/buildkit da2b9dc7dab99e824b2b1067ad7d0523e32dd2d9 https://github.com/dmcgowan/buildkit.git
+github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
+github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
+github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git
+github.com/gorilla/context v1.1
+github.com/gorilla/mux v1.1
+github.com/jhowardmsft/opengcs v0.0.12
+github.com/kr/pty 5cf931ef8f
+github.com/mattn/go-shellwords v1.0.3
+github.com/sirupsen/logrus v1.0.1
+github.com/tchap/go-patricia v2.2.6
+github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
+golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
+golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
+github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
+github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
+golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
+github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
+github.com/pmezard/go-difflib v1.0.0
+
+github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
+github.com/imdario/mergo 0.2.1
+golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0
+
+#get libnetwork packages
+github.com/docker/libnetwork 248fd5ea6a67f8810da322e6e7441e8de96a9045 https://github.com/dmcgowan/libnetwork.git
+github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
+github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
+github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
+github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
+github.com/hashicorp/memberlist v0.1.0
+github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372
+github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d
+github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e
+github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
+github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
+github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
+github.com/vishvananda/netlink bd6d5de5ccef2d66b0a26177928d0d8895d7f969
+github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
+github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
+github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
+github.com/coreos/etcd v3.2.1
+github.com/coreos/go-semver v0.2.0
+github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
+github.com/hashicorp/consul v0.5.2
+github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904
+github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7
+
+# get graph and distribution packages
+github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
+github.com/vbatts/tar-split v0.10.1
+github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
+
+# get go-zfs packages
+github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
+github.com/pborman/uuid v1.0
+
+google.golang.org/grpc v1.3.0
+
+# When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly
+github.com/opencontainers/runc e9325d442f5979c4f79bfa9e09bdf7abb74ba03b https://github.com/dmcgowan/runc.git
+github.com/opencontainers/image-spec 372ad780f63454fbbbbcc7cf80e5b90245c13e13
+github.com/opencontainers/runtime-spec d42f1eb741e6361e858d83fc75aa6893b66292c4 # specs
+
+github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
+
+# libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json)
+github.com/coreos/go-systemd v4
+github.com/godbus/dbus v4.0.0
+github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
+github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4
+
+# gelf logging driver deps
+github.com/Graylog2/go-gelf 7029da823dad4ef3a876df61065156acb703b2ea
+
+github.com/fluent/fluent-logger-golang v1.2.1
+# fluent-logger-golang deps
+github.com/philhofer/fwd 98c11a7a6ec829d672b03833c3d69a7fae1ca972
+github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c
+
+# fsnotify
+github.com/fsnotify/fsnotify v1.4.2
+
+# awslogs deps
+github.com/aws/aws-sdk-go v1.4.22
+github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0
+github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74
+
+# logentries
+github.com/bsphere/le_go 7a984a84b5492ae539b79b62fb4a10afc63c7bcf
+
+# gcplogs deps
+golang.org/x/oauth2 96382aa079b72d8c014eb0c50f6c223d1e6a2de0
+google.golang.org/api 3cc2e591b550923a2c5f0ab5a803feda924d5823
+cloud.google.com/go 9d965e63e8cceb1b5d7977a202f0fcb8866d6525
+github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7
+google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
+
+# containerd
+github.com/containerd/containerd fc10004571bb9b26695ccbf2dd4a83213f60b93e https://github.com/dmcgowan/containerd.git
+github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
+github.com/stevvooe/continuity cd7a8e21e2b6f84799f5dd4b65faf49c8d3ee02d
+github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb
+
+# cluster
+github.com/docker/swarmkit 8bdecc57887ffc598b63d6433f58e0d2852112c3 https://github.com/dmcgowan/swarmkit.git
+github.com/gogo/protobuf v0.4
+github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
+github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
+golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2
+golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
+github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
+github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
+github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
+github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8
+github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0
+github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e
+github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
+github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
+github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8
+github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
+github.com/matttproud/golang_protobuf_extensions v1.0.0
+github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
+github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
+
+# cli
+github.com/spf13/cobra v1.5.1 https://github.com/dnephin/cobra.git
+github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7
+github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
+github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty
+
+# metrics
+github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18
+
+github.com/opencontainers/selinux v1.0.0-rc1
+
+# archive/tar
+# mkdir -p ./vendor/archive
+# git clone git://github.com/tonistiigi/go-1.git ./go
+# git --git-dir ./go/.git --work-tree ./go checkout revert-prefix-ignore
+# cp -a go/src/archive/tar ./vendor/archive/tar
+# rm -rf ./go
+# vndr
diff --git a/vendor/github.com/docker/go-connections/LICENSE b/vendor/github.com/docker/go-connections/LICENSE
new file mode 100644
index 000000000..b55b37bc3
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/go-connections/README.md b/vendor/github.com/docker/go-connections/README.md
new file mode 100644
index 000000000..d257e44fd
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/README.md
@@ -0,0 +1,13 @@
+[![GoDoc](https://godoc.org/github.com/docker/go-connections?status.svg)](https://godoc.org/github.com/docker/go-connections)
+
+# Introduction
+
+go-connections provides common package to work with network connections.
+
+## Usage
+
+See the [docs in godoc](https://godoc.org/github.com/docker/go-connections) for examples and documentation.
+
+## License
+
+go-connections is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text.
diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go
new file mode 100644
index 000000000..4d5f5ae63
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/nat/nat.go
@@ -0,0 +1,242 @@
+// Package nat is a convenience package for manipulation of strings describing network ports.
+package nat
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+)
+
+const (
+ // portSpecTemplate is the expected format for port specifications
+ portSpecTemplate = "ip:hostPort:containerPort"
+)
+
+// PortBinding represents a binding between a Host IP address and a Host Port
+type PortBinding struct {
+ // HostIP is the host IP Address
+ HostIP string `json:"HostIp"`
+ // HostPort is the host port number
+ HostPort string
+}
+
+// PortMap is a collection of PortBinding indexed by Port
+type PortMap map[Port][]PortBinding
+
+// PortSet is a collection of structs indexed by Port
+type PortSet map[Port]struct{}
+
+// Port is a string containing port number and protocol in the format "80/tcp"
+type Port string
+
+// NewPort creates a new instance of a Port given a protocol and port number or port range
+func NewPort(proto, port string) (Port, error) {
+ // Check for parsing issues on "port" now so we can avoid having
+ // to check it later on.
+
+ portStartInt, portEndInt, err := ParsePortRangeToInt(port)
+ if err != nil {
+ return "", err
+ }
+
+ if portStartInt == portEndInt {
+ return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil
+ }
+ return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil
+}
+
+// ParsePort parses the port number string and returns an int
+func ParsePort(rawPort string) (int, error) {
+ if len(rawPort) == 0 {
+ return 0, nil
+ }
+ port, err := strconv.ParseUint(rawPort, 10, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int(port), nil
+}
+
+// ParsePortRangeToInt parses the port range string and returns start/end ints
+func ParsePortRangeToInt(rawPort string) (int, int, error) {
+ if len(rawPort) == 0 {
+ return 0, 0, nil
+ }
+ start, end, err := ParsePortRange(rawPort)
+ if err != nil {
+ return 0, 0, err
+ }
+ return int(start), int(end), nil
+}
+
+// Proto returns the protocol of a Port
+func (p Port) Proto() string {
+ proto, _ := SplitProtoPort(string(p))
+ return proto
+}
+
+// Port returns the port number of a Port
+func (p Port) Port() string {
+ _, port := SplitProtoPort(string(p))
+ return port
+}
+
+// Int returns the port number of a Port as an int
+func (p Port) Int() int {
+ portStr := p.Port()
+ // We don't need to check for an error because we're going to
+ // assume that any error would have been found, and reported, in NewPort()
+ port, _ := ParsePort(portStr)
+ return port
+}
+
+// Range returns the start/end port numbers of a Port range as ints
+func (p Port) Range() (int, int, error) {
+ return ParsePortRangeToInt(p.Port())
+}
+
+// SplitProtoPort splits a port in the format of proto/port
+func SplitProtoPort(rawPort string) (string, string) {
+ parts := strings.Split(rawPort, "/")
+ l := len(parts)
+ if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 {
+ return "", ""
+ }
+ if l == 1 {
+ return "tcp", rawPort
+ }
+ if len(parts[1]) == 0 {
+ return "tcp", parts[0]
+ }
+ return parts[1], parts[0]
+}
+
+func validateProto(proto string) bool {
+ for _, availableProto := range []string{"tcp", "udp"} {
+ if availableProto == proto {
+ return true
+ }
+ }
+ return false
+}
+
+// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses
+// these in to the internal types
+func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) {
+ var (
+ exposedPorts = make(map[Port]struct{}, len(ports))
+ bindings = make(map[Port][]PortBinding)
+ )
+ for _, rawPort := range ports {
+ portMappings, err := ParsePortSpec(rawPort)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ for _, portMapping := range portMappings {
+ port := portMapping.Port
+ if _, exists := exposedPorts[port]; !exists {
+ exposedPorts[port] = struct{}{}
+ }
+ bslice, exists := bindings[port]
+ if !exists {
+ bslice = []PortBinding{}
+ }
+ bindings[port] = append(bslice, portMapping.Binding)
+ }
+ }
+ return exposedPorts, bindings, nil
+}
+
+// PortMapping is a data object mapping a Port to a PortBinding
+type PortMapping struct {
+ Port Port
+ Binding PortBinding
+}
+
+func splitParts(rawport string) (string, string, string) {
+ parts := strings.Split(rawport, ":")
+ n := len(parts)
+ containerport := parts[n-1]
+
+ switch n {
+ case 1:
+ return "", "", containerport
+ case 2:
+ return "", parts[0], containerport
+ case 3:
+ return parts[0], parts[1], containerport
+ default:
+ return strings.Join(parts[:n-2], ":"), parts[n-2], containerport
+ }
+}
+
+// ParsePortSpec parses a port specification string into a slice of PortMappings
+func ParsePortSpec(rawPort string) ([]PortMapping, error) {
+ var proto string
+ rawIP, hostPort, containerPort := splitParts(rawPort)
+ proto, containerPort = SplitProtoPort(containerPort)
+
+ // Strip [] from IPV6 addresses
+ ip, _, err := net.SplitHostPort(rawIP + ":")
+ if err != nil {
+ return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err)
+ }
+ if ip != "" && net.ParseIP(ip) == nil {
+ return nil, fmt.Errorf("Invalid ip address: %s", ip)
+ }
+ if containerPort == "" {
+ return nil, fmt.Errorf("No port specified: %s<empty>", rawPort)
+ }
+
+ startPort, endPort, err := ParsePortRange(containerPort)
+ if err != nil {
+ return nil, fmt.Errorf("Invalid containerPort: %s", containerPort)
+ }
+
+ var startHostPort, endHostPort uint64 = 0, 0
+ if len(hostPort) > 0 {
+ startHostPort, endHostPort, err = ParsePortRange(hostPort)
+ if err != nil {
+ return nil, fmt.Errorf("Invalid hostPort: %s", hostPort)
+ }
+ }
+
+ if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) {
+ // Allow host port range iff containerPort is not a range.
+ // In this case, use the host port range as the dynamic
+ // host port range to allocate into.
+ if endPort != startPort {
+ return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort)
+ }
+ }
+
+ if !validateProto(strings.ToLower(proto)) {
+ return nil, fmt.Errorf("Invalid proto: %s", proto)
+ }
+
+ ports := []PortMapping{}
+ for i := uint64(0); i <= (endPort - startPort); i++ {
+ containerPort = strconv.FormatUint(startPort+i, 10)
+ if len(hostPort) > 0 {
+ hostPort = strconv.FormatUint(startHostPort+i, 10)
+ }
+ // Set hostPort to a range only if there is a single container port
+ // and a dynamic host port.
+ if startPort == endPort && startHostPort != endHostPort {
+ hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10))
+ }
+ port, err := NewPort(strings.ToLower(proto), containerPort)
+ if err != nil {
+ return nil, err
+ }
+
+ binding := PortBinding{
+ HostIP: ip,
+ HostPort: hostPort,
+ }
+ ports = append(ports, PortMapping{Port: port, Binding: binding})
+ }
+ return ports, nil
+}
diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go
new file mode 100644
index 000000000..892adf8c6
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/nat/parse.go
@@ -0,0 +1,57 @@
+package nat
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// PartParser parses and validates the specified string (data) using the specified template
+// e.g. ip:public:private -> 192.168.0.1:80:8000
+// DEPRECATED: do not use, this function may be removed in a future version
+func PartParser(template, data string) (map[string]string, error) {
+ // ip:public:private
+ var (
+ templateParts = strings.Split(template, ":")
+ parts = strings.Split(data, ":")
+ out = make(map[string]string, len(templateParts))
+ )
+ if len(parts) != len(templateParts) {
+ return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template)
+ }
+
+ for i, t := range templateParts {
+ value := ""
+ if len(parts) > i {
+ value = parts[i]
+ }
+ out[t] = value
+ }
+ return out, nil
+}
+
+// ParsePortRange parses and validates the specified string as a port-range (8000-9000)
+func ParsePortRange(ports string) (uint64, uint64, error) {
+ if ports == "" {
+ return 0, 0, fmt.Errorf("Empty string specified for ports.")
+ }
+ if !strings.Contains(ports, "-") {
+ start, err := strconv.ParseUint(ports, 10, 16)
+ end := start
+ return start, end, err
+ }
+
+ parts := strings.Split(ports, "-")
+ start, err := strconv.ParseUint(parts[0], 10, 16)
+ if err != nil {
+ return 0, 0, err
+ }
+ end, err := strconv.ParseUint(parts[1], 10, 16)
+ if err != nil {
+ return 0, 0, err
+ }
+ if end < start {
+ return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports)
+ }
+ return start, end, nil
+}
diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go
new file mode 100644
index 000000000..ce950171e
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/nat/sort.go
@@ -0,0 +1,96 @@
+package nat
+
+import (
+ "sort"
+ "strings"
+)
+
+type portSorter struct {
+ ports []Port
+ by func(i, j Port) bool
+}
+
+func (s *portSorter) Len() int {
+ return len(s.ports)
+}
+
+func (s *portSorter) Swap(i, j int) {
+ s.ports[i], s.ports[j] = s.ports[j], s.ports[i]
+}
+
+func (s *portSorter) Less(i, j int) bool {
+ ip := s.ports[i]
+ jp := s.ports[j]
+
+ return s.by(ip, jp)
+}
+
+// Sort sorts a list of ports using the provided predicate
+// This function should compare `i` and `j`, returning true if `i` is
+// considered to be less than `j`
+func Sort(ports []Port, predicate func(i, j Port) bool) {
+ s := &portSorter{ports, predicate}
+ sort.Sort(s)
+}
+
+type portMapEntry struct {
+ port Port
+ binding PortBinding
+}
+
+type portMapSorter []portMapEntry
+
+func (s portMapSorter) Len() int { return len(s) }
+func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// sort the port so that the order is:
+// 1. port with larger specified bindings
+// 2. larger port
+// 3. port with tcp protocol
+func (s portMapSorter) Less(i, j int) bool {
+ pi, pj := s[i].port, s[j].port
+ hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort)
+ return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp")
+}
+
+// SortPortMap sorts the list of ports and their respected mapping. The ports
+// will explicit HostPort will be placed first.
+func SortPortMap(ports []Port, bindings PortMap) {
+ s := portMapSorter{}
+ for _, p := range ports {
+ if binding, ok := bindings[p]; ok {
+ for _, b := range binding {
+ s = append(s, portMapEntry{port: p, binding: b})
+ }
+ bindings[p] = []PortBinding{}
+ } else {
+ s = append(s, portMapEntry{port: p})
+ }
+ }
+
+ sort.Sort(s)
+ var (
+ i int
+ pm = make(map[Port]struct{})
+ )
+ // reorder ports
+ for _, entry := range s {
+ if _, ok := pm[entry.port]; !ok {
+ ports[i] = entry.port
+ pm[entry.port] = struct{}{}
+ i++
+ }
+ // reorder bindings for this port
+ if _, ok := bindings[entry.port]; ok {
+ bindings[entry.port] = append(bindings[entry.port], entry.binding)
+ }
+ }
+}
+
+func toInt(s string) uint64 {
+ i, _, err := ParsePortRange(s)
+ if err != nil {
+ i = 0
+ }
+ return i
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/README.md
diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go
new file mode 100644
index 000000000..99846ffdd
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go
@@ -0,0 +1,81 @@
+package sockets
+
+import (
+ "errors"
+ "net"
+ "sync"
+)
+
+var errClosed = errors.New("use of closed network connection")
+
+// InmemSocket implements net.Listener using in-memory only connections.
+type InmemSocket struct {
+ chConn chan net.Conn
+ chClose chan struct{}
+ addr string
+ mu sync.Mutex
+}
+
+// dummyAddr is used to satisfy net.Addr for the in-mem socket
+// it is just stored as a string and returns the string for all calls
+type dummyAddr string
+
+// NewInmemSocket creates an in-memory only net.Listener
+// The addr argument can be any string, but is used to satisfy the `Addr()` part
+// of the net.Listener interface
+func NewInmemSocket(addr string, bufSize int) *InmemSocket {
+ return &InmemSocket{
+ chConn: make(chan net.Conn, bufSize),
+ chClose: make(chan struct{}),
+ addr: addr,
+ }
+}
+
+// Addr returns the socket's addr string to satisfy net.Listener
+func (s *InmemSocket) Addr() net.Addr {
+ return dummyAddr(s.addr)
+}
+
+// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn.
+func (s *InmemSocket) Accept() (net.Conn, error) {
+ select {
+ case conn := <-s.chConn:
+ return conn, nil
+ case <-s.chClose:
+ return nil, errClosed
+ }
+}
+
+// Close closes the listener. It will be unavailable for use once closed.
+func (s *InmemSocket) Close() error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ select {
+ case <-s.chClose:
+ default:
+ close(s.chClose)
+ }
+ return nil
+}
+
+// Dial is used to establish a connection with the in-mem server
+func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) {
+ srvConn, clientConn := net.Pipe()
+ select {
+ case s.chConn <- srvConn:
+ case <-s.chClose:
+ return nil, errClosed
+ }
+
+ return clientConn, nil
+}
+
+// Network returns the addr string, satisfies net.Addr
+func (a dummyAddr) Network() string {
+ return string(a)
+}
+
+// String returns the string form
+func (a dummyAddr) String() string {
+ return string(a)
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go
new file mode 100644
index 000000000..98e9a1dc6
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/proxy.go
@@ -0,0 +1,51 @@
+package sockets
+
+import (
+ "net"
+ "net/url"
+ "os"
+ "strings"
+
+ "golang.org/x/net/proxy"
+)
+
+// GetProxyEnv allows access to the uppercase and the lowercase forms of
+// proxy-related variables. See the Go specification for details on these
+// variables. https://golang.org/pkg/net/http/
+func GetProxyEnv(key string) string {
+ proxyValue := os.Getenv(strings.ToUpper(key))
+ if proxyValue == "" {
+ return os.Getenv(strings.ToLower(key))
+ }
+ return proxyValue
+}
+
+// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a
+// proxy.Dialer which will route the connections through the proxy using the
+// given dialer.
+func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) {
+ allProxy := GetProxyEnv("all_proxy")
+ if len(allProxy) == 0 {
+ return direct, nil
+ }
+
+ proxyURL, err := url.Parse(allProxy)
+ if err != nil {
+ return direct, err
+ }
+
+ proxyFromURL, err := proxy.FromURL(proxyURL, direct)
+ if err != nil {
+ return direct, err
+ }
+
+ noProxy := GetProxyEnv("no_proxy")
+ if len(noProxy) == 0 {
+ return proxyFromURL, nil
+ }
+
+ perHost := proxy.NewPerHost(proxyFromURL, direct)
+ perHost.AddFromString(noProxy)
+
+ return perHost, nil
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go
new file mode 100644
index 000000000..a1d7beb4d
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/sockets.go
@@ -0,0 +1,38 @@
+// Package sockets provides helper functions to create and configure Unix or TCP sockets.
+package sockets
+
+import (
+ "errors"
+ "net"
+ "net/http"
+ "time"
+)
+
+// Why 32? See https://github.com/docker/docker/pull/8035.
+const defaultTimeout = 32 * time.Second
+
+// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system.
+var ErrProtocolNotAvailable = errors.New("protocol not available")
+
+// ConfigureTransport configures the specified Transport according to the
+// specified proto and addr.
+// If the proto is unix (using a unix socket to communicate) or npipe the
+// compression is disabled.
+func ConfigureTransport(tr *http.Transport, proto, addr string) error {
+ switch proto {
+ case "unix":
+ return configureUnixTransport(tr, proto, addr)
+ case "npipe":
+ return configureNpipeTransport(tr, proto, addr)
+ default:
+ tr.Proxy = http.ProxyFromEnvironment
+ dialer, err := DialerFromEnvironment(&net.Dialer{
+ Timeout: defaultTimeout,
+ })
+ if err != nil {
+ return err
+ }
+ tr.Dial = dialer.Dial
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
new file mode 100644
index 000000000..386cf0dbb
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
@@ -0,0 +1,35 @@
+// +build !windows
+
+package sockets
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "syscall"
+ "time"
+)
+
+const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path)
+
+func configureUnixTransport(tr *http.Transport, proto, addr string) error {
+ if len(addr) > maxUnixSocketPathSize {
+ return fmt.Errorf("Unix socket path %q is too long", addr)
+ }
+ // No need for compression in local communications.
+ tr.DisableCompression = true
+ tr.Dial = func(_, _ string) (net.Conn, error) {
+ return net.DialTimeout(proto, addr, defaultTimeout)
+ }
+ return nil
+}
+
+func configureNpipeTransport(tr *http.Transport, proto, addr string) error {
+ return ErrProtocolNotAvailable
+}
+
+// DialPipe connects to a Windows named pipe.
+// This is not supported on other OSes.
+func DialPipe(_ string, _ time.Duration) (net.Conn, error) {
+ return nil, syscall.EAFNOSUPPORT
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
new file mode 100644
index 000000000..5c21644e1
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
@@ -0,0 +1,27 @@
+package sockets
+
+import (
+ "net"
+ "net/http"
+ "time"
+
+ "github.com/Microsoft/go-winio"
+)
+
+func configureUnixTransport(tr *http.Transport, proto, addr string) error {
+ return ErrProtocolNotAvailable
+}
+
+func configureNpipeTransport(tr *http.Transport, proto, addr string) error {
+ // No need for compression in local communications.
+ tr.DisableCompression = true
+ tr.Dial = func(_, _ string) (net.Conn, error) {
+ return DialPipe(addr, defaultTimeout)
+ }
+ return nil
+}
+
+// DialPipe connects to a Windows named pipe.
+func DialPipe(addr string, timeout time.Duration) (net.Conn, error) {
+ return winio.DialPipe(addr, &timeout)
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
new file mode 100644
index 000000000..53cbb6c79
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
@@ -0,0 +1,22 @@
+// Package sockets provides helper functions to create and configure Unix or TCP sockets.
+package sockets
+
+import (
+ "crypto/tls"
+ "net"
+)
+
+// NewTCPSocket creates a TCP socket listener with the specified address and
+// the specified tls configuration. If TLSConfig is set, will encapsulate the
+// TCP listener inside a TLS one.
+func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) {
+ l, err := net.Listen("tcp", addr)
+ if err != nil {
+ return nil, err
+ }
+ if tlsConfig != nil {
+ tlsConfig.NextProtos = []string{"http/1.1"}
+ l = tls.NewListener(l, tlsConfig)
+ }
+ return l, nil
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go
new file mode 100644
index 000000000..a8b5dbb6f
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go
@@ -0,0 +1,32 @@
+// +build !windows
+
+package sockets
+
+import (
+ "net"
+ "os"
+ "syscall"
+)
+
+// NewUnixSocket creates a unix socket with the specified path and group.
+func NewUnixSocket(path string, gid int) (net.Listener, error) {
+ if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ mask := syscall.Umask(0777)
+ defer syscall.Umask(mask)
+
+ l, err := net.Listen("unix", path)
+ if err != nil {
+ return nil, err
+ }
+ if err := os.Chown(path, 0, gid); err != nil {
+ l.Close()
+ return nil, err
+ }
+ if err := os.Chmod(path, 0660); err != nil {
+ l.Close()
+ return nil, err
+ }
+ return l, nil
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go
new file mode 100644
index 000000000..1ca0965e0
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go
@@ -0,0 +1,18 @@
+// +build go1.7
+
+package tlsconfig
+
+import (
+ "crypto/x509"
+ "runtime"
+)
+
+// SystemCertPool returns a copy of the system cert pool,
+// returns an error if failed to load or empty pool on windows.
+func SystemCertPool() (*x509.CertPool, error) {
+ certpool, err := x509.SystemCertPool()
+ if err != nil && runtime.GOOS == "windows" {
+ return x509.NewCertPool(), nil
+ }
+ return certpool, err
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go
new file mode 100644
index 000000000..9ca974539
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go
@@ -0,0 +1,14 @@
+// +build !go1.7
+
+package tlsconfig
+
+import (
+ "crypto/x509"
+
+)
+
+// SystemCertPool returns an new empty cert pool,
+// accessing system cert pool is supported in go 1.7
+func SystemCertPool() (*x509.CertPool, error) {
+ return x509.NewCertPool(), nil
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go
new file mode 100644
index 000000000..1b31bbb8b
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go
@@ -0,0 +1,244 @@
+// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
+//
+// As a reminder from https://golang.org/pkg/crypto/tls/#Config:
+// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified.
+// A Config may be reused; the tls package will also not modify it.
+package tlsconfig
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/pkg/errors"
+)
+
+// Options represents the information needed to create client and server TLS configurations.
+type Options struct {
+ CAFile string
+
+ // If either CertFile or KeyFile is empty, Client() will not load them
+ // preventing the client from authenticating to the server.
+ // However, Server() requires them and will error out if they are empty.
+ CertFile string
+ KeyFile string
+
+ // client-only option
+ InsecureSkipVerify bool
+ // server-only option
+ ClientAuth tls.ClientAuthType
+ // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS
+ // creds will include exclusively the roots in that CA file. If no CA file is provided,
+ // the system pool will be used.
+ ExclusiveRootPools bool
+ MinVersion uint16
+ // If Passphrase is set, it will be used to decrypt a TLS private key
+ // if the key is encrypted
+ Passphrase string
+}
+
+// Extra (server-side) accepted CBC cipher suites - will phase out in the future
+var acceptedCBCCiphers = []uint16{
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+}
+
+// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls
+// options struct but wants to use a commonly accepted set of TLS cipher suites, with
+// known weak algorithms removed.
+var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...)
+
+// allTLSVersions lists all the TLS versions and is used by the code that validates
+// a uint16 value as a TLS version.
+var allTLSVersions = map[uint16]struct{}{
+ tls.VersionSSL30: {},
+ tls.VersionTLS10: {},
+ tls.VersionTLS11: {},
+ tls.VersionTLS12: {},
+}
+
+// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration.
+func ServerDefault() *tls.Config {
+ return &tls.Config{
+ // Avoid fallback to SSL protocols < TLS1.0
+ MinVersion: tls.VersionTLS10,
+ PreferServerCipherSuites: true,
+ CipherSuites: DefaultServerAcceptedCiphers,
+ }
+}
+
+// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration.
+func ClientDefault() *tls.Config {
+ return &tls.Config{
+ // Prefer TLS1.2 as the client minimum
+ MinVersion: tls.VersionTLS12,
+ CipherSuites: clientCipherSuites,
+ }
+}
+
+// certPool returns an X.509 certificate pool from `caFile`, the certificate file.
+func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) {
+ // If we should verify the server, we need to load a trusted ca
+ var (
+ certPool *x509.CertPool
+ err error
+ )
+ if exclusivePool {
+ certPool = x509.NewCertPool()
+ } else {
+ certPool, err = SystemCertPool()
+ if err != nil {
+ return nil, fmt.Errorf("failed to read system certificates: %v", err)
+ }
+ }
+ pem, err := ioutil.ReadFile(caFile)
+ if err != nil {
+ return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err)
+ }
+ if !certPool.AppendCertsFromPEM(pem) {
+ return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile)
+ }
+ return certPool, nil
+}
+
+// isValidMinVersion checks that the input value is a valid tls minimum version
+func isValidMinVersion(version uint16) bool {
+ _, ok := allTLSVersions[version]
+ return ok
+}
+
+// adjustMinVersion sets the MinVersion on `config`, the input configuration.
+// It assumes the current MinVersion on the `config` is the lowest allowed.
+func adjustMinVersion(options Options, config *tls.Config) error {
+ if options.MinVersion > 0 {
+ if !isValidMinVersion(options.MinVersion) {
+ return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion)
+ }
+ if options.MinVersion < config.MinVersion {
+ return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion)
+ }
+ config.MinVersion = options.MinVersion
+ }
+
+ return nil
+}
+
+// IsErrEncryptedKey returns true if the 'err' is an error of incorrect
+// password when tryin to decrypt a TLS private key
+func IsErrEncryptedKey(err error) bool {
+ return errors.Cause(err) == x509.IncorrectPasswordError
+}
+
+// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format.
+// If the private key is encrypted, 'passphrase' is used to decrypted the
+// private key.
+func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) {
+ // this section makes some small changes to code from notary/tuf/utils/x509.go
+ pemBlock, _ := pem.Decode(keyBytes)
+ if pemBlock == nil {
+ return nil, fmt.Errorf("no valid private key found")
+ }
+
+ var err error
+ if x509.IsEncryptedPEMBlock(pemBlock) {
+ keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase))
+ if err != nil {
+ return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it")
+ }
+ keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes})
+ }
+
+ return keyBytes, nil
+}
+
+// getCert returns a Certificate from the CertFile and KeyFile in 'options',
+// if the key is encrypted, the Passphrase in 'options' will be used to
+// decrypt it.
+func getCert(options Options) ([]tls.Certificate, error) {
+ if options.CertFile == "" && options.KeyFile == "" {
+ return nil, nil
+ }
+
+ errMessage := "Could not load X509 key pair"
+
+ cert, err := ioutil.ReadFile(options.CertFile)
+ if err != nil {
+ return nil, errors.Wrap(err, errMessage)
+ }
+
+ prKeyBytes, err := ioutil.ReadFile(options.KeyFile)
+ if err != nil {
+ return nil, errors.Wrap(err, errMessage)
+ }
+
+ prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase)
+ if err != nil {
+ return nil, errors.Wrap(err, errMessage)
+ }
+
+ tlsCert, err := tls.X509KeyPair(cert, prKeyBytes)
+ if err != nil {
+ return nil, errors.Wrap(err, errMessage)
+ }
+
+ return []tls.Certificate{tlsCert}, nil
+}
+
+// Client returns a TLS configuration meant to be used by a client.
+func Client(options Options) (*tls.Config, error) {
+ tlsConfig := ClientDefault()
+ tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify
+ if !options.InsecureSkipVerify && options.CAFile != "" {
+ CAs, err := certPool(options.CAFile, options.ExclusiveRootPools)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.RootCAs = CAs
+ }
+
+ tlsCerts, err := getCert(options)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.Certificates = tlsCerts
+
+ if err := adjustMinVersion(options, tlsConfig); err != nil {
+ return nil, err
+ }
+
+ return tlsConfig, nil
+}
+
+// Server returns a TLS configuration meant to be used by a server.
+func Server(options Options) (*tls.Config, error) {
+ tlsConfig := ServerDefault()
+ tlsConfig.ClientAuth = options.ClientAuth
+ tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err)
+ }
+ return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{tlsCert}
+ if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" {
+ CAs, err := certPool(options.CAFile, options.ExclusiveRootPools)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.ClientCAs = CAs
+ }
+
+ if err := adjustMinVersion(options, tlsConfig); err != nil {
+ return nil, err
+ }
+
+ return tlsConfig, nil
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go
new file mode 100644
index 000000000..6b4c6a7c0
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go
@@ -0,0 +1,17 @@
+// +build go1.5
+
+// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
+//
+package tlsconfig
+
+import (
+ "crypto/tls"
+)
+
+// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
+var clientCipherSuites = []uint16{
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go
new file mode 100644
index 000000000..ee22df47c
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go
@@ -0,0 +1,15 @@
+// +build !go1.5
+
+// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
+//
+package tlsconfig
+
+import (
+ "crypto/tls"
+)
+
+// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
+var clientCipherSuites = []uint16{
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+}
diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE
new file mode 100644
index 000000000..b55b37bc3
--- /dev/null
+++ b/vendor/github.com/docker/go-units/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md
new file mode 100644
index 000000000..4f70a4e13
--- /dev/null
+++ b/vendor/github.com/docker/go-units/README.md
@@ -0,0 +1,16 @@
+[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units)
+
+# Introduction
+
+go-units is a library to transform human friendly measurements into machine friendly values.
+
+## Usage
+
+See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation.
+
+## Copyright and license
+
+Copyright © 2015 Docker, Inc.
+
+go-units is licensed under the Apache License, Version 2.0.
+See [LICENSE](LICENSE) for the full text of the license.
diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go
new file mode 100644
index 000000000..c219a8a96
--- /dev/null
+++ b/vendor/github.com/docker/go-units/duration.go
@@ -0,0 +1,33 @@
+// Package units provides helper function to parse and print size and time units
+// in human-readable format.
+package units
+
+import (
+ "fmt"
+ "time"
+)
+
+// HumanDuration returns a human-readable approximation of a duration
+// (eg. "About a minute", "4 hours ago", etc.).
+func HumanDuration(d time.Duration) string {
+ if seconds := int(d.Seconds()); seconds < 1 {
+ return "Less than a second"
+ } else if seconds < 60 {
+ return fmt.Sprintf("%d seconds", seconds)
+ } else if minutes := int(d.Minutes()); minutes == 1 {
+ return "About a minute"
+ } else if minutes < 60 {
+ return fmt.Sprintf("%d minutes", minutes)
+ } else if hours := int(d.Hours()); hours == 1 {
+ return "About an hour"
+ } else if hours < 48 {
+ return fmt.Sprintf("%d hours", hours)
+ } else if hours < 24*7*2 {
+ return fmt.Sprintf("%d days", hours/24)
+ } else if hours < 24*30*3 {
+ return fmt.Sprintf("%d weeks", hours/24/7)
+ } else if hours < 24*365*2 {
+ return fmt.Sprintf("%d months", hours/24/30)
+ }
+ return fmt.Sprintf("%d years", int(d.Hours())/24/365)
+}
diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go
new file mode 100644
index 000000000..f5b82ea24
--- /dev/null
+++ b/vendor/github.com/docker/go-units/size.go
@@ -0,0 +1,96 @@
+package units
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// See: http://en.wikipedia.org/wiki/Binary_prefix
+const (
+ // Decimal
+
+ KB = 1000
+ MB = 1000 * KB
+ GB = 1000 * MB
+ TB = 1000 * GB
+ PB = 1000 * TB
+
+ // Binary
+
+ KiB = 1024
+ MiB = 1024 * KiB
+ GiB = 1024 * MiB
+ TiB = 1024 * GiB
+ PiB = 1024 * TiB
+)
+
+type unitMap map[string]int64
+
+var (
+ decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
+ binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
+ sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`)
+)
+
+var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+
+// CustomSize returns a human-readable approximation of a size
+// using custom format.
+func CustomSize(format string, size float64, base float64, _map []string) string {
+ i := 0
+ unitsLimit := len(_map) - 1
+ for size >= base && i < unitsLimit {
+ size = size / base
+ i++
+ }
+ return fmt.Sprintf(format, size, _map[i])
+}
+
+// HumanSize returns a human-readable approximation of a size
+// capped at 4 valid numbers (eg. "2.746 MB", "796 KB").
+func HumanSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs)
+}
+
+// BytesSize returns a human-readable size in bytes, kibibytes,
+// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB").
+func BytesSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs)
+}
+
+// FromHumanSize returns an integer from a human-readable specification of a
+// size using SI standard (eg. "44kB", "17MB").
+func FromHumanSize(size string) (int64, error) {
+ return parseSize(size, decimalMap)
+}
+
+// RAMInBytes parses a human-readable string representing an amount of RAM
+// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
+// returns the number of bytes, or -1 if the string is unparseable.
+// Units are case-insensitive, and the 'b' suffix is optional.
+func RAMInBytes(size string) (int64, error) {
+ return parseSize(size, binaryMap)
+}
+
+// Parses the human-readable size string into the amount it represents.
+func parseSize(sizeStr string, uMap unitMap) (int64, error) {
+ matches := sizeRegex.FindStringSubmatch(sizeStr)
+ if len(matches) != 4 {
+ return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
+ }
+
+ size, err := strconv.ParseFloat(matches[1], 64)
+ if err != nil {
+ return -1, err
+ }
+
+ unitPrefix := strings.ToLower(matches[3])
+ if mul, ok := uMap[unitPrefix]; ok {
+ size *= float64(mul)
+ }
+
+ return int64(size), nil
+}
diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go
new file mode 100644
index 000000000..5ac7fd825
--- /dev/null
+++ b/vendor/github.com/docker/go-units/ulimit.go
@@ -0,0 +1,118 @@
+package units
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Ulimit is a human friendly version of Rlimit.
+type Ulimit struct {
+ Name string
+ Hard int64
+ Soft int64
+}
+
+// Rlimit specifies the resource limits, such as max open files.
+type Rlimit struct {
+ Type int `json:"type,omitempty"`
+ Hard uint64 `json:"hard,omitempty"`
+ Soft uint64 `json:"soft,omitempty"`
+}
+
+const (
+ // magic numbers for making the syscall
+ // some of these are defined in the syscall package, but not all.
+ // Also since Windows client doesn't get access to the syscall package, need to
+ // define these here
+ rlimitAs = 9
+ rlimitCore = 4
+ rlimitCPU = 0
+ rlimitData = 2
+ rlimitFsize = 1
+ rlimitLocks = 10
+ rlimitMemlock = 8
+ rlimitMsgqueue = 12
+ rlimitNice = 13
+ rlimitNofile = 7
+ rlimitNproc = 6
+ rlimitRss = 5
+ rlimitRtprio = 14
+ rlimitRttime = 15
+ rlimitSigpending = 11
+ rlimitStack = 3
+)
+
+var ulimitNameMapping = map[string]int{
+ //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container.
+ "core": rlimitCore,
+ "cpu": rlimitCPU,
+ "data": rlimitData,
+ "fsize": rlimitFsize,
+ "locks": rlimitLocks,
+ "memlock": rlimitMemlock,
+ "msgqueue": rlimitMsgqueue,
+ "nice": rlimitNice,
+ "nofile": rlimitNofile,
+ "nproc": rlimitNproc,
+ "rss": rlimitRss,
+ "rtprio": rlimitRtprio,
+ "rttime": rlimitRttime,
+ "sigpending": rlimitSigpending,
+ "stack": rlimitStack,
+}
+
+// ParseUlimit parses and returns a Ulimit from the specified string.
+func ParseUlimit(val string) (*Ulimit, error) {
+ parts := strings.SplitN(val, "=", 2)
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("invalid ulimit argument: %s", val)
+ }
+
+ if _, exists := ulimitNameMapping[parts[0]]; !exists {
+ return nil, fmt.Errorf("invalid ulimit type: %s", parts[0])
+ }
+
+ var (
+ soft int64
+ hard = &soft // default to soft in case no hard was set
+ temp int64
+ err error
+ )
+ switch limitVals := strings.Split(parts[1], ":"); len(limitVals) {
+ case 2:
+ temp, err = strconv.ParseInt(limitVals[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ hard = &temp
+ fallthrough
+ case 1:
+ soft, err = strconv.ParseInt(limitVals[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1])
+ }
+
+ if soft > *hard {
+ return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard)
+ }
+
+ return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil
+}
+
+// GetRlimit returns the RLimit corresponding to Ulimit.
+func (u *Ulimit) GetRlimit() (*Rlimit, error) {
+ t, exists := ulimitNameMapping[u.Name]
+ if !exists {
+ return nil, fmt.Errorf("invalid ulimit name %s", u.Name)
+ }
+
+ return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil
+}
+
+func (u *Ulimit) String() string {
+ return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard)
+}
diff --git a/vendor/github.com/docker/libtrust/LICENSE b/vendor/github.com/docker/libtrust/LICENSE
new file mode 100644
index 000000000..27448585a
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/libtrust/README.md b/vendor/github.com/docker/libtrust/README.md
new file mode 100644
index 000000000..dcffb31ae
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/README.md
@@ -0,0 +1,22 @@
+# libtrust
+
+> **WARNING** this library is no longer actively developed, and will be integrated
+> in the [docker/distribution][https://www.github.com/docker/distribution]
+> repository in future.
+
+Libtrust is library for managing authentication and authorization using public key cryptography.
+
+Authentication is handled using the identity attached to the public key.
+Libtrust provides multiple methods to prove possession of the private key associated with an identity.
+ - TLS x509 certificates
+ - Signature verification
+ - Key Challenge
+
+Authorization and access control is managed through a distributed trust graph.
+Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access.
+
+## Copyright and license
+
+Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
+Docs released under Creative commons.
+
diff --git a/vendor/github.com/docker/libtrust/certificates.go b/vendor/github.com/docker/libtrust/certificates.go
new file mode 100644
index 000000000..3dcca33cb
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/certificates.go
@@ -0,0 +1,175 @@
+package libtrust
+
+import (
+ "crypto/rand"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "math/big"
+ "net"
+ "time"
+)
+
+type certTemplateInfo struct {
+ commonName string
+ domains []string
+ ipAddresses []net.IP
+ isCA bool
+ clientAuth bool
+ serverAuth bool
+}
+
+func generateCertTemplate(info *certTemplateInfo) *x509.Certificate {
+ // Generate a certificate template which is valid from the past week to
+ // 10 years from now. The usage of the certificate depends on the
+ // specified fields in the given certTempInfo object.
+ var (
+ keyUsage x509.KeyUsage
+ extKeyUsage []x509.ExtKeyUsage
+ )
+
+ if info.isCA {
+ keyUsage = x509.KeyUsageCertSign
+ }
+
+ if info.clientAuth {
+ extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth)
+ }
+
+ if info.serverAuth {
+ extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth)
+ }
+
+ return &x509.Certificate{
+ SerialNumber: big.NewInt(0),
+ Subject: pkix.Name{
+ CommonName: info.commonName,
+ },
+ NotBefore: time.Now().Add(-time.Hour * 24 * 7),
+ NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10),
+ DNSNames: info.domains,
+ IPAddresses: info.ipAddresses,
+ IsCA: info.isCA,
+ KeyUsage: keyUsage,
+ ExtKeyUsage: extKeyUsage,
+ BasicConstraintsValid: info.isCA,
+ }
+}
+
+func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) {
+ pubCertTemplate := generateCertTemplate(subInfo)
+ privCertTemplate := generateCertTemplate(issInfo)
+
+ certDER, err := x509.CreateCertificate(
+ rand.Reader, pubCertTemplate, privCertTemplate,
+ pub.CryptoPublicKey(), priv.CryptoPrivateKey(),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create certificate: %s", err)
+ }
+
+ cert, err = x509.ParseCertificate(certDER)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse certificate: %s", err)
+ }
+
+ return
+}
+
+// GenerateSelfSignedServerCert creates a self-signed certificate for the
+// given key which is to be used for TLS servers with the given domains and
+// IP addresses.
+func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) {
+ info := &certTemplateInfo{
+ commonName: key.KeyID(),
+ domains: domains,
+ ipAddresses: ipAddresses,
+ serverAuth: true,
+ }
+
+ return generateCert(key.PublicKey(), key, info, info)
+}
+
+// GenerateSelfSignedClientCert creates a self-signed certificate for the
+// given key which is to be used for TLS clients.
+func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) {
+ info := &certTemplateInfo{
+ commonName: key.KeyID(),
+ clientAuth: true,
+ }
+
+ return generateCert(key.PublicKey(), key, info, info)
+}
+
+// GenerateCACert creates a certificate which can be used as a trusted
+// certificate authority.
+func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) {
+ subjectInfo := &certTemplateInfo{
+ commonName: trustedKey.KeyID(),
+ isCA: true,
+ }
+ issuerInfo := &certTemplateInfo{
+ commonName: signer.KeyID(),
+ }
+
+ return generateCert(trustedKey, signer, subjectInfo, issuerInfo)
+}
+
+// GenerateCACertPool creates a certificate authority pool to be used for a
+// TLS configuration. Any self-signed certificates issued by the specified
+// trusted keys will be verified during a TLS handshake
+func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) {
+ certPool := x509.NewCertPool()
+
+ for _, trustedKey := range trustedKeys {
+ cert, err := GenerateCACert(signer, trustedKey)
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate CA certificate: %s", err)
+ }
+
+ certPool.AddCert(cert)
+ }
+
+ return certPool, nil
+}
+
+// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded
+// containing one or more certificates. The expected pem type is "CERTIFICATE".
+func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ certificates := []*x509.Certificate{}
+ var block *pem.Block
+ block, b = pem.Decode(b)
+ for ; block != nil; block, b = pem.Decode(b) {
+ if block.Type == "CERTIFICATE" {
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ certificates = append(certificates, cert)
+ } else {
+ return nil, fmt.Errorf("invalid pem block type: %s", block.Type)
+ }
+ }
+
+ return certificates, nil
+}
+
+// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded
+// containing one or more certificates. The expected pem type is "CERTIFICATE".
+func LoadCertificatePool(filename string) (*x509.CertPool, error) {
+ certs, err := LoadCertificateBundle(filename)
+ if err != nil {
+ return nil, err
+ }
+ pool := x509.NewCertPool()
+ for _, cert := range certs {
+ pool.AddCert(cert)
+ }
+ return pool, nil
+}
diff --git a/vendor/github.com/docker/libtrust/doc.go b/vendor/github.com/docker/libtrust/doc.go
new file mode 100644
index 000000000..ec5d2159c
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/doc.go
@@ -0,0 +1,9 @@
+/*
+Package libtrust provides an interface for managing authentication and
+authorization using public key cryptography. Authentication is handled
+using the identity attached to the public key and verified through TLS
+x509 certificates, a key challenge, or signature. Authorization and
+access control is managed through a trust graph distributed between
+both remote trust servers and locally cached and managed data.
+*/
+package libtrust
diff --git a/vendor/github.com/docker/libtrust/ec_key.go b/vendor/github.com/docker/libtrust/ec_key.go
new file mode 100644
index 000000000..00bbe4b3c
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/ec_key.go
@@ -0,0 +1,428 @@
+package libtrust
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+)
+
+/*
+ * EC DSA PUBLIC KEY
+ */
+
+// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital
+// signature algorithms.
+type ecPublicKey struct {
+ *ecdsa.PublicKey
+ curveName string
+ signatureAlgorithm *signatureAlgorithm
+ extended map[string]interface{}
+}
+
+func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) {
+ curve := cryptoPublicKey.Curve
+
+ switch {
+ case curve == elliptic.P256():
+ return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil
+ case curve == elliptic.P384():
+ return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil
+ case curve == elliptic.P521():
+ return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil
+ default:
+ return nil, errors.New("unsupported elliptic curve")
+ }
+}
+
+// KeyType returns the key type for elliptic curve keys, i.e., "EC".
+func (k *ecPublicKey) KeyType() string {
+ return "EC"
+}
+
+// CurveName returns the elliptic curve identifier.
+// Possible values are "P-256", "P-384", and "P-521".
+func (k *ecPublicKey) CurveName() string {
+ return k.curveName
+}
+
+// KeyID returns a distinct identifier which is unique to this Public Key.
+func (k *ecPublicKey) KeyID() string {
+ return keyIDFromCryptoKey(k)
+}
+
+func (k *ecPublicKey) String() string {
+ return fmt.Sprintf("EC Public Key <%s>", k.KeyID())
+}
+
+// Verify verifyies the signature of the data in the io.Reader using this
+// PublicKey. The alg parameter should identify the digital signature
+// algorithm which was used to produce the signature and should be supported
+// by this public key. Returns a nil error if the signature is valid.
+func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
+ // For EC keys there is only one supported signature algorithm depending
+ // on the curve parameters.
+ if k.signatureAlgorithm.HeaderParam() != alg {
+ return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg)
+ }
+
+ // signature is the concatenation of (r, s), base64Url encoded.
+ sigLength := len(signature)
+ expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3)
+ if sigLength != expectedOctetLength {
+ return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength)
+ }
+
+ rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:]
+ r := new(big.Int).SetBytes(rBytes)
+ s := new(big.Int).SetBytes(sBytes)
+
+ hasher := k.signatureAlgorithm.HashID().New()
+ _, err := io.Copy(hasher, data)
+ if err != nil {
+ return fmt.Errorf("error reading data to sign: %s", err)
+ }
+ hash := hasher.Sum(nil)
+
+ if !ecdsa.Verify(k.PublicKey, hash, r, s) {
+ return errors.New("invalid signature")
+ }
+
+ return nil
+}
+
+// CryptoPublicKey returns the internal object which can be used as a
+// crypto.PublicKey for use with other standard library operations. The type
+// is either *rsa.PublicKey or *ecdsa.PublicKey
+func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey {
+ return k.PublicKey
+}
+
+func (k *ecPublicKey) toMap() map[string]interface{} {
+ jwk := make(map[string]interface{})
+ for k, v := range k.extended {
+ jwk[k] = v
+ }
+ jwk["kty"] = k.KeyType()
+ jwk["kid"] = k.KeyID()
+ jwk["crv"] = k.CurveName()
+
+ xBytes := k.X.Bytes()
+ yBytes := k.Y.Bytes()
+ octetLength := (k.Params().BitSize + 7) >> 3
+ // MUST include leading zeros in the output so that x, y are each
+ // *octetLength* bytes long.
+ xBuf := make([]byte, octetLength-len(xBytes), octetLength)
+ yBuf := make([]byte, octetLength-len(yBytes), octetLength)
+ xBuf = append(xBuf, xBytes...)
+ yBuf = append(yBuf, yBytes...)
+
+ jwk["x"] = joseBase64UrlEncode(xBuf)
+ jwk["y"] = joseBase64UrlEncode(yBuf)
+
+ return jwk
+}
+
+// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
+// elliptic curve keys.
+func (k *ecPublicKey) MarshalJSON() (data []byte, err error) {
+ return json.Marshal(k.toMap())
+}
+
+// PEMBlock serializes this Public Key to DER-encoded PKIX format.
+func (k *ecPublicKey) PEMBlock() (*pem.Block, error) {
+ derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
+ if err != nil {
+ return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err)
+ }
+ k.extended["kid"] = k.KeyID() // For display purposes.
+ return createPemBlock("PUBLIC KEY", derBytes, k.extended)
+}
+
+func (k *ecPublicKey) AddExtendedField(field string, value interface{}) {
+ k.extended[field] = value
+}
+
+func (k *ecPublicKey) GetExtendedField(field string) interface{} {
+ v, ok := k.extended[field]
+ if !ok {
+ return nil
+ }
+ return v
+}
+
+func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) {
+ // JWK key type (kty) has already been determined to be "EC".
+ // Need to extract 'crv', 'x', 'y', and 'kid' and check for
+ // consistency.
+
+ // Get the curve identifier value.
+ crv, err := stringFromMap(jwk, "crv")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err)
+ }
+
+ var (
+ curve elliptic.Curve
+ sigAlg *signatureAlgorithm
+ )
+
+ switch {
+ case crv == "P-256":
+ curve = elliptic.P256()
+ sigAlg = es256
+ case crv == "P-384":
+ curve = elliptic.P384()
+ sigAlg = es384
+ case crv == "P-521":
+ curve = elliptic.P521()
+ sigAlg = es512
+ default:
+ return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv)
+ }
+
+ // Get the X and Y coordinates for the public key point.
+ xB64Url, err := stringFromMap(jwk, "x")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
+ }
+ x, err := parseECCoordinate(xB64Url, curve)
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
+ }
+
+ yB64Url, err := stringFromMap(jwk, "y")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
+ }
+ y, err := parseECCoordinate(yB64Url, curve)
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
+ }
+
+ key := &ecPublicKey{
+ PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y},
+ curveName: crv, signatureAlgorithm: sigAlg,
+ }
+
+ // Key ID is optional too, but if it exists, it should match the key.
+ _, ok := jwk["kid"]
+ if ok {
+ kid, err := stringFromMap(jwk, "kid")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key ID: %s", err)
+ }
+ if kid != key.KeyID() {
+ return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid)
+ }
+ }
+
+ key.extended = jwk
+
+ return key, nil
+}
+
+/*
+ * EC DSA PRIVATE KEY
+ */
+
+// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature
+// algorithms.
+type ecPrivateKey struct {
+ ecPublicKey
+ *ecdsa.PrivateKey
+}
+
+func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) {
+ publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil
+}
+
+// PublicKey returns the Public Key data associated with this Private Key.
+func (k *ecPrivateKey) PublicKey() PublicKey {
+ return &k.ecPublicKey
+}
+
+func (k *ecPrivateKey) String() string {
+ return fmt.Sprintf("EC Private Key <%s>", k.KeyID())
+}
+
+// Sign signs the data read from the io.Reader using a signature algorithm supported
+// by the elliptic curve private key. If the specified hashing algorithm is
+// supported by this key, that hash function is used to generate the signature
+// otherwise the the default hashing algorithm for this key is used. Returns
+// the signature and the name of the JWK signature algorithm used, e.g.,
+// "ES256", "ES384", "ES512".
+func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
+ // Generate a signature of the data using the internal alg.
+ // The given hashId is only a suggestion, and since EC keys only support
+ // on signature/hash algorithm given the curve name, we disregard it for
+ // the elliptic curve JWK signature implementation.
+ hasher := k.signatureAlgorithm.HashID().New()
+ _, err = io.Copy(hasher, data)
+ if err != nil {
+ return nil, "", fmt.Errorf("error reading data to sign: %s", err)
+ }
+ hash := hasher.Sum(nil)
+
+ r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash)
+ if err != nil {
+ return nil, "", fmt.Errorf("error producing signature: %s", err)
+ }
+ rBytes, sBytes := r.Bytes(), s.Bytes()
+ octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3
+ // MUST include leading zeros in the output
+ rBuf := make([]byte, octetLength-len(rBytes), octetLength)
+ sBuf := make([]byte, octetLength-len(sBytes), octetLength)
+
+ rBuf = append(rBuf, rBytes...)
+ sBuf = append(sBuf, sBytes...)
+
+ signature = append(rBuf, sBuf...)
+ alg = k.signatureAlgorithm.HeaderParam()
+
+ return
+}
+
+// CryptoPrivateKey returns the internal object which can be used as a
+// crypto.PublicKey for use with other standard library operations. The type
+// is either *rsa.PublicKey or *ecdsa.PublicKey
+func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
+ return k.PrivateKey
+}
+
+func (k *ecPrivateKey) toMap() map[string]interface{} {
+ jwk := k.ecPublicKey.toMap()
+
+ dBytes := k.D.Bytes()
+ // The length of this octet string MUST be ceiling(log-base-2(n)/8)
+ // octets (where n is the order of the curve). This is because the private
+ // key d must be in the interval [1, n-1] so the bitlength of d should be
+ // no larger than the bitlength of n-1. The easiest way to find the octet
+ // length is to take bitlength(n-1), add 7 to force a carry, and shift this
+ // bit sequence right by 3, which is essentially dividing by 8 and adding
+ // 1 if there is any remainder. Thus, the private key value d should be
+ // output to (bitlength(n-1)+7)>>3 octets.
+ n := k.ecPublicKey.Params().N
+ octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
+ // Create a buffer with the necessary zero-padding.
+ dBuf := make([]byte, octetLength-len(dBytes), octetLength)
+ dBuf = append(dBuf, dBytes...)
+
+ jwk["d"] = joseBase64UrlEncode(dBuf)
+
+ return jwk
+}
+
+// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
+// elliptic curve keys.
+func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) {
+ return json.Marshal(k.toMap())
+}
+
+// PEMBlock serializes this Private Key to DER-encoded PKIX format.
+func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) {
+ derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey)
+ if err != nil {
+ return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err)
+ }
+ k.extended["keyID"] = k.KeyID() // For display purposes.
+ return createPemBlock("EC PRIVATE KEY", derBytes, k.extended)
+}
+
+func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) {
+ dB64Url, err := stringFromMap(jwk, "d")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Private Key: %s", err)
+ }
+
+ // JWK key type (kty) has already been determined to be "EC".
+ // Need to extract the public key information, then extract the private
+ // key value 'd'.
+ publicKey, err := ecPublicKeyFromMap(jwk)
+ if err != nil {
+ return nil, err
+ }
+
+ d, err := parseECPrivateParam(dB64Url, publicKey.Curve)
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err)
+ }
+
+ key := &ecPrivateKey{
+ ecPublicKey: *publicKey,
+ PrivateKey: &ecdsa.PrivateKey{
+ PublicKey: *publicKey.PublicKey,
+ D: d,
+ },
+ }
+
+ return key, nil
+}
+
+/*
+ * Key Generation Functions.
+ */
+
+func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) {
+ k = new(ecPrivateKey)
+ k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader)
+ if err != nil {
+ return nil, err
+ }
+
+ k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey
+ k.extended = make(map[string]interface{})
+
+ return
+}
+
+// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256.
+func GenerateECP256PrivateKey() (PrivateKey, error) {
+ k, err := generateECPrivateKey(elliptic.P256())
+ if err != nil {
+ return nil, fmt.Errorf("error generating EC P-256 key: %s", err)
+ }
+
+ k.curveName = "P-256"
+ k.signatureAlgorithm = es256
+
+ return k, nil
+}
+
+// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384.
+func GenerateECP384PrivateKey() (PrivateKey, error) {
+ k, err := generateECPrivateKey(elliptic.P384())
+ if err != nil {
+ return nil, fmt.Errorf("error generating EC P-384 key: %s", err)
+ }
+
+ k.curveName = "P-384"
+ k.signatureAlgorithm = es384
+
+ return k, nil
+}
+
+// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521.
+func GenerateECP521PrivateKey() (PrivateKey, error) {
+ k, err := generateECPrivateKey(elliptic.P521())
+ if err != nil {
+ return nil, fmt.Errorf("error generating EC P-521 key: %s", err)
+ }
+
+ k.curveName = "P-521"
+ k.signatureAlgorithm = es512
+
+ return k, nil
+}
diff --git a/vendor/github.com/docker/libtrust/filter.go b/vendor/github.com/docker/libtrust/filter.go
new file mode 100644
index 000000000..5b2b4fca6
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/filter.go
@@ -0,0 +1,50 @@
+package libtrust
+
+import (
+ "path/filepath"
+)
+
+// FilterByHosts filters the list of PublicKeys to only those which contain a
+// 'hosts' pattern which matches the given host. If *includeEmpty* is true,
+// then keys which do not specify any hosts are also returned.
+func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) {
+ filtered := make([]PublicKey, 0, len(keys))
+
+ for _, pubKey := range keys {
+ var hosts []string
+ switch v := pubKey.GetExtendedField("hosts").(type) {
+ case []string:
+ hosts = v
+ case []interface{}:
+ for _, value := range v {
+ h, ok := value.(string)
+ if !ok {
+ continue
+ }
+ hosts = append(hosts, h)
+ }
+ }
+
+ if len(hosts) == 0 {
+ if includeEmpty {
+ filtered = append(filtered, pubKey)
+ }
+ continue
+ }
+
+ // Check if any hosts match pattern
+ for _, hostPattern := range hosts {
+ match, err := filepath.Match(hostPattern, host)
+ if err != nil {
+ return nil, err
+ }
+
+ if match {
+ filtered = append(filtered, pubKey)
+ continue
+ }
+ }
+ }
+
+ return filtered, nil
+}
diff --git a/vendor/github.com/docker/libtrust/hash.go b/vendor/github.com/docker/libtrust/hash.go
new file mode 100644
index 000000000..a2df787dd
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/hash.go
@@ -0,0 +1,56 @@
+package libtrust
+
+import (
+ "crypto"
+ _ "crypto/sha256" // Registrer SHA224 and SHA256
+ _ "crypto/sha512" // Registrer SHA384 and SHA512
+ "fmt"
+)
+
+type signatureAlgorithm struct {
+ algHeaderParam string
+ hashID crypto.Hash
+}
+
+func (h *signatureAlgorithm) HeaderParam() string {
+ return h.algHeaderParam
+}
+
+func (h *signatureAlgorithm) HashID() crypto.Hash {
+ return h.hashID
+}
+
+var (
+ rs256 = &signatureAlgorithm{"RS256", crypto.SHA256}
+ rs384 = &signatureAlgorithm{"RS384", crypto.SHA384}
+ rs512 = &signatureAlgorithm{"RS512", crypto.SHA512}
+ es256 = &signatureAlgorithm{"ES256", crypto.SHA256}
+ es384 = &signatureAlgorithm{"ES384", crypto.SHA384}
+ es512 = &signatureAlgorithm{"ES512", crypto.SHA512}
+)
+
+func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) {
+ switch {
+ case alg == "RS256":
+ return rs256, nil
+ case alg == "RS384":
+ return rs384, nil
+ case alg == "RS512":
+ return rs512, nil
+ default:
+ return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg)
+ }
+}
+
+func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm {
+ switch {
+ case hashID == crypto.SHA512:
+ return rs512
+ case hashID == crypto.SHA384:
+ return rs384
+ case hashID == crypto.SHA256:
+ fallthrough
+ default:
+ return rs256
+ }
+}
diff --git a/vendor/github.com/docker/libtrust/jsonsign.go b/vendor/github.com/docker/libtrust/jsonsign.go
new file mode 100644
index 000000000..cb2ca9a76
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/jsonsign.go
@@ -0,0 +1,657 @@
+package libtrust
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "sort"
+ "time"
+ "unicode"
+)
+
+var (
+ // ErrInvalidSignContent is used when the content to be signed is invalid.
+ ErrInvalidSignContent = errors.New("invalid sign content")
+
+ // ErrInvalidJSONContent is used when invalid json is encountered.
+ ErrInvalidJSONContent = errors.New("invalid json content")
+
+ // ErrMissingSignatureKey is used when the specified signature key
+ // does not exist in the JSON content.
+ ErrMissingSignatureKey = errors.New("missing signature key")
+)
+
+type jsHeader struct {
+ JWK PublicKey `json:"jwk,omitempty"`
+ Algorithm string `json:"alg"`
+ Chain []string `json:"x5c,omitempty"`
+}
+
+type jsSignature struct {
+ Header jsHeader `json:"header"`
+ Signature string `json:"signature"`
+ Protected string `json:"protected,omitempty"`
+}
+
+type jsSignaturesSorted []jsSignature
+
+func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] }
+func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) }
+
+func (jsbkid jsSignaturesSorted) Less(i, j int) bool {
+ ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID()
+ si, sj := jsbkid[i].Signature, jsbkid[j].Signature
+
+ if ki == kj {
+ return si < sj
+ }
+
+ return ki < kj
+}
+
+type signKey struct {
+ PrivateKey
+ Chain []*x509.Certificate
+}
+
+// JSONSignature represents a signature of a json object.
+type JSONSignature struct {
+ payload string
+ signatures []jsSignature
+ indent string
+ formatLength int
+ formatTail []byte
+}
+
+func newJSONSignature() *JSONSignature {
+ return &JSONSignature{
+ signatures: make([]jsSignature, 0, 1),
+ }
+}
+
+// Payload returns the encoded payload of the signature. This
+// payload should not be signed directly
+func (js *JSONSignature) Payload() ([]byte, error) {
+ return joseBase64UrlDecode(js.payload)
+}
+
+func (js *JSONSignature) protectedHeader() (string, error) {
+ protected := map[string]interface{}{
+ "formatLength": js.formatLength,
+ "formatTail": joseBase64UrlEncode(js.formatTail),
+ "time": time.Now().UTC().Format(time.RFC3339),
+ }
+ protectedBytes, err := json.Marshal(protected)
+ if err != nil {
+ return "", err
+ }
+
+ return joseBase64UrlEncode(protectedBytes), nil
+}
+
+func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) {
+ buf := make([]byte, len(js.payload)+len(protectedHeader)+1)
+ copy(buf, protectedHeader)
+ buf[len(protectedHeader)] = '.'
+ copy(buf[len(protectedHeader)+1:], js.payload)
+ return buf, nil
+}
+
+// Sign adds a signature using the given private key.
+func (js *JSONSignature) Sign(key PrivateKey) error {
+ protected, err := js.protectedHeader()
+ if err != nil {
+ return err
+ }
+ signBytes, err := js.signBytes(protected)
+ if err != nil {
+ return err
+ }
+ sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
+ if err != nil {
+ return err
+ }
+
+ js.signatures = append(js.signatures, jsSignature{
+ Header: jsHeader{
+ JWK: key.PublicKey(),
+ Algorithm: algorithm,
+ },
+ Signature: joseBase64UrlEncode(sigBytes),
+ Protected: protected,
+ })
+
+ return nil
+}
+
+// SignWithChain adds a signature using the given private key
+// and setting the x509 chain. The public key of the first element
+// in the chain must be the public key corresponding with the sign key.
+func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error {
+ // Ensure key.Chain[0] is public key for key
+ //key.Chain.PublicKey
+ //key.PublicKey().CryptoPublicKey()
+
+ // Verify chain
+ protected, err := js.protectedHeader()
+ if err != nil {
+ return err
+ }
+ signBytes, err := js.signBytes(protected)
+ if err != nil {
+ return err
+ }
+ sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
+ if err != nil {
+ return err
+ }
+
+ header := jsHeader{
+ Chain: make([]string, len(chain)),
+ Algorithm: algorithm,
+ }
+
+ for i, cert := range chain {
+ header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw)
+ }
+
+ js.signatures = append(js.signatures, jsSignature{
+ Header: header,
+ Signature: joseBase64UrlEncode(sigBytes),
+ Protected: protected,
+ })
+
+ return nil
+}
+
+// Verify verifies all the signatures and returns the list of
+// public keys used to sign. Any x509 chains are not checked.
+func (js *JSONSignature) Verify() ([]PublicKey, error) {
+ keys := make([]PublicKey, len(js.signatures))
+ for i, signature := range js.signatures {
+ signBytes, err := js.signBytes(signature.Protected)
+ if err != nil {
+ return nil, err
+ }
+ var publicKey PublicKey
+ if len(signature.Header.Chain) > 0 {
+ certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
+ if err != nil {
+ return nil, err
+ }
+ cert, err := x509.ParseCertificate(certBytes)
+ if err != nil {
+ return nil, err
+ }
+ publicKey, err = FromCryptoPublicKey(cert.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+ } else if signature.Header.JWK != nil {
+ publicKey = signature.Header.JWK
+ } else {
+ return nil, errors.New("missing public key")
+ }
+
+ sigBytes, err := joseBase64UrlDecode(signature.Signature)
+ if err != nil {
+ return nil, err
+ }
+
+ err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ keys[i] = publicKey
+ }
+ return keys, nil
+}
+
+// VerifyChains verifies all the signatures and the chains associated
+// with each signature and returns the list of verified chains.
+// Signatures without an x509 chain are not checked.
+func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) {
+ chains := make([][]*x509.Certificate, 0, len(js.signatures))
+ for _, signature := range js.signatures {
+ signBytes, err := js.signBytes(signature.Protected)
+ if err != nil {
+ return nil, err
+ }
+ var publicKey PublicKey
+ if len(signature.Header.Chain) > 0 {
+ certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
+ if err != nil {
+ return nil, err
+ }
+ cert, err := x509.ParseCertificate(certBytes)
+ if err != nil {
+ return nil, err
+ }
+ publicKey, err = FromCryptoPublicKey(cert.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+ intermediates := x509.NewCertPool()
+ if len(signature.Header.Chain) > 1 {
+ intermediateChain := signature.Header.Chain[1:]
+ for i := range intermediateChain {
+ certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i])
+ if err != nil {
+ return nil, err
+ }
+ intermediate, err := x509.ParseCertificate(certBytes)
+ if err != nil {
+ return nil, err
+ }
+ intermediates.AddCert(intermediate)
+ }
+ }
+
+ verifyOptions := x509.VerifyOptions{
+ Intermediates: intermediates,
+ Roots: ca,
+ }
+
+ verifiedChains, err := cert.Verify(verifyOptions)
+ if err != nil {
+ return nil, err
+ }
+ chains = append(chains, verifiedChains...)
+
+ sigBytes, err := joseBase64UrlDecode(signature.Signature)
+ if err != nil {
+ return nil, err
+ }
+
+ err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ }
+ return chains, nil
+}
+
+// JWS returns JSON serialized JWS according to
+// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2
+func (js *JSONSignature) JWS() ([]byte, error) {
+ if len(js.signatures) == 0 {
+ return nil, errors.New("missing signature")
+ }
+
+ sort.Sort(jsSignaturesSorted(js.signatures))
+
+ jsonMap := map[string]interface{}{
+ "payload": js.payload,
+ "signatures": js.signatures,
+ }
+
+ return json.MarshalIndent(jsonMap, "", " ")
+}
+
+func notSpace(r rune) bool {
+ return !unicode.IsSpace(r)
+}
+
+func detectJSONIndent(jsonContent []byte) (indent string) {
+ if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' {
+ quoteIndex := bytes.IndexRune(jsonContent[1:], '"')
+ if quoteIndex > 0 {
+ indent = string(jsonContent[2 : quoteIndex+1])
+ }
+ }
+ return
+}
+
+type jsParsedHeader struct {
+ JWK json.RawMessage `json:"jwk"`
+ Algorithm string `json:"alg"`
+ Chain []string `json:"x5c"`
+}
+
+type jsParsedSignature struct {
+ Header jsParsedHeader `json:"header"`
+ Signature string `json:"signature"`
+ Protected string `json:"protected"`
+}
+
+// ParseJWS parses a JWS serialized JSON object into a Json Signature.
+func ParseJWS(content []byte) (*JSONSignature, error) {
+ type jsParsed struct {
+ Payload string `json:"payload"`
+ Signatures []jsParsedSignature `json:"signatures"`
+ }
+ parsed := &jsParsed{}
+ err := json.Unmarshal(content, parsed)
+ if err != nil {
+ return nil, err
+ }
+ if len(parsed.Signatures) == 0 {
+ return nil, errors.New("missing signatures")
+ }
+ payload, err := joseBase64UrlDecode(parsed.Payload)
+ if err != nil {
+ return nil, err
+ }
+
+ js, err := NewJSONSignature(payload)
+ if err != nil {
+ return nil, err
+ }
+ js.signatures = make([]jsSignature, len(parsed.Signatures))
+ for i, signature := range parsed.Signatures {
+ header := jsHeader{
+ Algorithm: signature.Header.Algorithm,
+ }
+ if signature.Header.Chain != nil {
+ header.Chain = signature.Header.Chain
+ }
+ if signature.Header.JWK != nil {
+ publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK))
+ if err != nil {
+ return nil, err
+ }
+ header.JWK = publicKey
+ }
+ js.signatures[i] = jsSignature{
+ Header: header,
+ Signature: signature.Signature,
+ Protected: signature.Protected,
+ }
+ }
+
+ return js, nil
+}
+
+// NewJSONSignature returns a new unsigned JWS from a json byte array.
+// JSONSignature will need to be signed before serializing or storing.
+// Optionally, one or more signatures can be provided as byte buffers,
+// containing serialized JWS signatures, to assemble a fully signed JWS
+// package. It is the callers responsibility to ensure uniqueness of the
+// provided signatures.
+func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) {
+ var dataMap map[string]interface{}
+ err := json.Unmarshal(content, &dataMap)
+ if err != nil {
+ return nil, err
+ }
+
+ js := newJSONSignature()
+ js.indent = detectJSONIndent(content)
+
+ js.payload = joseBase64UrlEncode(content)
+
+ // Find trailing } and whitespace, put in protected header
+ closeIndex := bytes.LastIndexFunc(content, notSpace)
+ if content[closeIndex] != '}' {
+ return nil, ErrInvalidJSONContent
+ }
+ lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace)
+ if content[lastRuneIndex] == ',' {
+ return nil, ErrInvalidJSONContent
+ }
+ js.formatLength = lastRuneIndex + 1
+ js.formatTail = content[js.formatLength:]
+
+ if len(signatures) > 0 {
+ for _, signature := range signatures {
+ var parsedJSig jsParsedSignature
+
+ if err := json.Unmarshal(signature, &parsedJSig); err != nil {
+ return nil, err
+ }
+
+ // TODO(stevvooe): A lot of the code below is repeated in
+ // ParseJWS. It will require more refactoring to fix that.
+ jsig := jsSignature{
+ Header: jsHeader{
+ Algorithm: parsedJSig.Header.Algorithm,
+ },
+ Signature: parsedJSig.Signature,
+ Protected: parsedJSig.Protected,
+ }
+
+ if parsedJSig.Header.Chain != nil {
+ jsig.Header.Chain = parsedJSig.Header.Chain
+ }
+
+ if parsedJSig.Header.JWK != nil {
+ publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK))
+ if err != nil {
+ return nil, err
+ }
+ jsig.Header.JWK = publicKey
+ }
+
+ js.signatures = append(js.signatures, jsig)
+ }
+ }
+
+ return js, nil
+}
+
+// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or
+// struct. JWS will need to be signed before serializing or storing.
+func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) {
+ switch content.(type) {
+ case map[string]interface{}:
+ case struct{}:
+ default:
+ return nil, errors.New("invalid data type")
+ }
+
+ js := newJSONSignature()
+ js.indent = " "
+
+ payload, err := json.MarshalIndent(content, "", js.indent)
+ if err != nil {
+ return nil, err
+ }
+ js.payload = joseBase64UrlEncode(payload)
+
+ // Remove '\n}' from formatted section, put in protected header
+ js.formatLength = len(payload) - 2
+ js.formatTail = payload[js.formatLength:]
+
+ return js, nil
+}
+
+func readIntFromMap(key string, m map[string]interface{}) (int, bool) {
+ value, ok := m[key]
+ if !ok {
+ return 0, false
+ }
+ switch v := value.(type) {
+ case int:
+ return v, true
+ case float64:
+ return int(v), true
+ default:
+ return 0, false
+ }
+}
+
+func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) {
+ value, ok := m[key]
+ if !ok {
+ return "", false
+ }
+ v, ok = value.(string)
+ return
+}
+
+// ParsePrettySignature parses a formatted signature into a
+// JSON signature. If the signatures are missing the format information
+// an error is thrown. The formatted signature must be created by
+// the same method as format signature.
+func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) {
+ var contentMap map[string]json.RawMessage
+ err := json.Unmarshal(content, &contentMap)
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshalling content: %s", err)
+ }
+ sigMessage, ok := contentMap[signatureKey]
+ if !ok {
+ return nil, ErrMissingSignatureKey
+ }
+
+ var signatureBlocks []jsParsedSignature
+ err = json.Unmarshal([]byte(sigMessage), &signatureBlocks)
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshalling signatures: %s", err)
+ }
+
+ js := newJSONSignature()
+ js.signatures = make([]jsSignature, len(signatureBlocks))
+
+ for i, signatureBlock := range signatureBlocks {
+ protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected)
+ if err != nil {
+ return nil, fmt.Errorf("base64 decode error: %s", err)
+ }
+ var protectedHeader map[string]interface{}
+ err = json.Unmarshal(protectedBytes, &protectedHeader)
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshalling protected header: %s", err)
+ }
+
+ formatLength, ok := readIntFromMap("formatLength", protectedHeader)
+ if !ok {
+ return nil, errors.New("missing formatted length")
+ }
+ encodedTail, ok := readStringFromMap("formatTail", protectedHeader)
+ if !ok {
+ return nil, errors.New("missing formatted tail")
+ }
+ formatTail, err := joseBase64UrlDecode(encodedTail)
+ if err != nil {
+ return nil, fmt.Errorf("base64 decode error on tail: %s", err)
+ }
+ if js.formatLength == 0 {
+ js.formatLength = formatLength
+ } else if js.formatLength != formatLength {
+ return nil, errors.New("conflicting format length")
+ }
+ if len(js.formatTail) == 0 {
+ js.formatTail = formatTail
+ } else if bytes.Compare(js.formatTail, formatTail) != 0 {
+ return nil, errors.New("conflicting format tail")
+ }
+
+ header := jsHeader{
+ Algorithm: signatureBlock.Header.Algorithm,
+ Chain: signatureBlock.Header.Chain,
+ }
+ if signatureBlock.Header.JWK != nil {
+ publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK))
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshalling public key: %s", err)
+ }
+ header.JWK = publicKey
+ }
+ js.signatures[i] = jsSignature{
+ Header: header,
+ Signature: signatureBlock.Signature,
+ Protected: signatureBlock.Protected,
+ }
+ }
+ if js.formatLength > len(content) {
+ return nil, errors.New("invalid format length")
+ }
+ formatted := make([]byte, js.formatLength+len(js.formatTail))
+ copy(formatted, content[:js.formatLength])
+ copy(formatted[js.formatLength:], js.formatTail)
+ js.indent = detectJSONIndent(formatted)
+ js.payload = joseBase64UrlEncode(formatted)
+
+ return js, nil
+}
+
+// PrettySignature formats a json signature into an easy to read
+// single json serialized object.
+func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) {
+ if len(js.signatures) == 0 {
+ return nil, errors.New("no signatures")
+ }
+ payload, err := joseBase64UrlDecode(js.payload)
+ if err != nil {
+ return nil, err
+ }
+ payload = payload[:js.formatLength]
+
+ sort.Sort(jsSignaturesSorted(js.signatures))
+
+ var marshalled []byte
+ var marshallErr error
+ if js.indent != "" {
+ marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent)
+ } else {
+ marshalled, marshallErr = json.Marshal(js.signatures)
+ }
+ if marshallErr != nil {
+ return nil, marshallErr
+ }
+
+ buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34))
+ buf.Write(payload)
+ buf.WriteByte(',')
+ if js.indent != "" {
+ buf.WriteByte('\n')
+ buf.WriteString(js.indent)
+ buf.WriteByte('"')
+ buf.WriteString(signatureKey)
+ buf.WriteString("\": ")
+ buf.Write(marshalled)
+ buf.WriteByte('\n')
+ } else {
+ buf.WriteByte('"')
+ buf.WriteString(signatureKey)
+ buf.WriteString("\":")
+ buf.Write(marshalled)
+ }
+ buf.WriteByte('}')
+
+ return buf.Bytes(), nil
+}
+
+// Signatures provides the signatures on this JWS as opaque blobs, sorted by
+// keyID. These blobs can be stored and reassembled with payloads. Internally,
+// they are simply marshaled json web signatures but implementations should
+// not rely on this.
+func (js *JSONSignature) Signatures() ([][]byte, error) {
+ sort.Sort(jsSignaturesSorted(js.signatures))
+
+ var sb [][]byte
+ for _, jsig := range js.signatures {
+ p, err := json.Marshal(jsig)
+ if err != nil {
+ return nil, err
+ }
+
+ sb = append(sb, p)
+ }
+
+ return sb, nil
+}
+
+// Merge combines the signatures from one or more other signatures into the
+// method receiver. If the payloads differ for any argument, an error will be
+// returned and the receiver will not be modified.
+func (js *JSONSignature) Merge(others ...*JSONSignature) error {
+ merged := js.signatures
+ for _, other := range others {
+ if js.payload != other.payload {
+ return fmt.Errorf("payloads differ from merge target")
+ }
+ merged = append(merged, other.signatures...)
+ }
+
+ js.signatures = merged
+ return nil
+}
diff --git a/vendor/github.com/docker/libtrust/key.go b/vendor/github.com/docker/libtrust/key.go
new file mode 100644
index 000000000..73642db2a
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/key.go
@@ -0,0 +1,253 @@
+package libtrust
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// PublicKey is a generic interface for a Public Key.
+type PublicKey interface {
+ // KeyType returns the key type for this key. For elliptic curve keys,
+ // this value should be "EC". For RSA keys, this value should be "RSA".
+ KeyType() string
+ // KeyID returns a distinct identifier which is unique to this Public Key.
+ // The format generated by this library is a base32 encoding of a 240 bit
+ // hash of the public key data divided into 12 groups like so:
+ // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
+ KeyID() string
+ // Verify verifyies the signature of the data in the io.Reader using this
+ // Public Key. The alg parameter should identify the digital signature
+ // algorithm which was used to produce the signature and should be
+ // supported by this public key. Returns a nil error if the signature
+ // is valid.
+ Verify(data io.Reader, alg string, signature []byte) error
+ // CryptoPublicKey returns the internal object which can be used as a
+ // crypto.PublicKey for use with other standard library operations. The type
+ // is either *rsa.PublicKey or *ecdsa.PublicKey
+ CryptoPublicKey() crypto.PublicKey
+ // These public keys can be serialized to the standard JSON encoding for
+ // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web
+ // Algorithms.
+ MarshalJSON() ([]byte, error)
+ // These keys can also be serialized to the standard PEM encoding.
+ PEMBlock() (*pem.Block, error)
+ // The string representation of a key is its key type and ID.
+ String() string
+ AddExtendedField(string, interface{})
+ GetExtendedField(string) interface{}
+}
+
+// PrivateKey is a generic interface for a Private Key.
+type PrivateKey interface {
+ // A PrivateKey contains all fields and methods of a PublicKey of the
+ // same type. The MarshalJSON method also outputs the private key as a
+ // JSON Web Key, and the PEMBlock method outputs the private key as a
+ // PEM block.
+ PublicKey
+ // PublicKey returns the PublicKey associated with this PrivateKey.
+ PublicKey() PublicKey
+ // Sign signs the data read from the io.Reader using a signature algorithm
+ // supported by the private key. If the specified hashing algorithm is
+ // supported by this key, that hash function is used to generate the
+ // signature otherwise the the default hashing algorithm for this key is
+ // used. Returns the signature and identifier of the algorithm used.
+ Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error)
+ // CryptoPrivateKey returns the internal object which can be used as a
+ // crypto.PublicKey for use with other standard library operations. The
+ // type is either *rsa.PublicKey or *ecdsa.PublicKey
+ CryptoPrivateKey() crypto.PrivateKey
+}
+
+// FromCryptoPublicKey returns a libtrust PublicKey representation of the given
+// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given
+// key is of an unsupported type.
+func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) {
+ switch cryptoPublicKey := cryptoPublicKey.(type) {
+ case *ecdsa.PublicKey:
+ return fromECPublicKey(cryptoPublicKey)
+ case *rsa.PublicKey:
+ return fromRSAPublicKey(cryptoPublicKey), nil
+ default:
+ return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey)
+ }
+}
+
+// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given
+// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given
+// key is of an unsupported type.
+func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) {
+ switch cryptoPrivateKey := cryptoPrivateKey.(type) {
+ case *ecdsa.PrivateKey:
+ return fromECPrivateKey(cryptoPrivateKey)
+ case *rsa.PrivateKey:
+ return fromRSAPrivateKey(cryptoPrivateKey), nil
+ default:
+ return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey)
+ }
+}
+
+// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust
+// PublicKey or an error if there is a problem with the encoding.
+func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) {
+ pemBlock, _ := pem.Decode(data)
+ if pemBlock == nil {
+ return nil, errors.New("unable to find PEM encoded data")
+ } else if pemBlock.Type != "PUBLIC KEY" {
+ return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
+ }
+
+ return pubKeyFromPEMBlock(pemBlock)
+}
+
+// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of
+// PEM blocks appended one after the other and returns a slice of PublicKey
+// objects that it finds.
+func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) {
+ pubKeys := []PublicKey{}
+
+ for {
+ var pemBlock *pem.Block
+ pemBlock, data = pem.Decode(data)
+ if pemBlock == nil {
+ break
+ } else if pemBlock.Type != "PUBLIC KEY" {
+ return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
+ }
+
+ pubKey, err := pubKeyFromPEMBlock(pemBlock)
+ if err != nil {
+ return nil, err
+ }
+
+ pubKeys = append(pubKeys, pubKey)
+ }
+
+ return pubKeys, nil
+}
+
+// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust
+// PrivateKey or an error if there is a problem with the encoding.
+func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) {
+ pemBlock, _ := pem.Decode(data)
+ if pemBlock == nil {
+ return nil, errors.New("unable to find PEM encoded data")
+ }
+
+ var key PrivateKey
+
+ switch {
+ case pemBlock.Type == "RSA PRIVATE KEY":
+ rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err)
+ }
+ key = fromRSAPrivateKey(rsaPrivateKey)
+ case pemBlock.Type == "EC PRIVATE KEY":
+ ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err)
+ }
+ key, err = fromECPrivateKey(ecPrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type)
+ }
+
+ addPEMHeadersToKey(pemBlock, key.PublicKey())
+
+ return key, nil
+}
+
+// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic
+// Public Key to be used with libtrust.
+func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) {
+ jwk := make(map[string]interface{})
+
+ err := json.Unmarshal(data, &jwk)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "decoding JWK Public Key JSON data: %s\n", err,
+ )
+ }
+
+ // Get the Key Type value.
+ kty, err := stringFromMap(jwk, "kty")
+ if err != nil {
+ return nil, fmt.Errorf("JWK Public Key type: %s", err)
+ }
+
+ switch {
+ case kty == "EC":
+ // Call out to unmarshal EC public key.
+ return ecPublicKeyFromMap(jwk)
+ case kty == "RSA":
+ // Call out to unmarshal RSA public key.
+ return rsaPublicKeyFromMap(jwk)
+ default:
+ return nil, fmt.Errorf(
+ "JWK Public Key type not supported: %q\n", kty,
+ )
+ }
+}
+
+// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set
+// and returns a slice of Public Key objects.
+func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) {
+ rawKeys, err := loadJSONKeySetRaw(data)
+ if err != nil {
+ return nil, err
+ }
+
+ pubKeys := make([]PublicKey, 0, len(rawKeys))
+
+ for _, rawKey := range rawKeys {
+ pubKey, err := UnmarshalPublicKeyJWK(rawKey)
+ if err != nil {
+ return nil, err
+ }
+ pubKeys = append(pubKeys, pubKey)
+ }
+
+ return pubKeys, nil
+}
+
+// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic
+// Private Key to be used with libtrust.
+func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) {
+ jwk := make(map[string]interface{})
+
+ err := json.Unmarshal(data, &jwk)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "decoding JWK Private Key JSON data: %s\n", err,
+ )
+ }
+
+ // Get the Key Type value.
+ kty, err := stringFromMap(jwk, "kty")
+ if err != nil {
+ return nil, fmt.Errorf("JWK Private Key type: %s", err)
+ }
+
+ switch {
+ case kty == "EC":
+ // Call out to unmarshal EC private key.
+ return ecPrivateKeyFromMap(jwk)
+ case kty == "RSA":
+ // Call out to unmarshal RSA private key.
+ return rsaPrivateKeyFromMap(jwk)
+ default:
+ return nil, fmt.Errorf(
+ "JWK Private Key type not supported: %q\n", kty,
+ )
+ }
+}
diff --git a/vendor/github.com/docker/libtrust/key_files.go b/vendor/github.com/docker/libtrust/key_files.go
new file mode 100644
index 000000000..c526de545
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/key_files.go
@@ -0,0 +1,255 @@
+package libtrust
+
+import (
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+)
+
+var (
+ // ErrKeyFileDoesNotExist indicates that the private key file does not exist.
+ ErrKeyFileDoesNotExist = errors.New("key file does not exist")
+)
+
+func readKeyFileBytes(filename string) ([]byte, error) {
+ data, err := ioutil.ReadFile(filename)
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = ErrKeyFileDoesNotExist
+ } else {
+ err = fmt.Errorf("unable to read key file %s: %s", filename, err)
+ }
+
+ return nil, err
+ }
+
+ return data, nil
+}
+
+/*
+ Loading and Saving of Public and Private Keys in either PEM or JWK format.
+*/
+
+// LoadKeyFile opens the given filename and attempts to read a Private Key
+// encoded in either PEM or JWK format (if .json or .jwk file extension).
+func LoadKeyFile(filename string) (PrivateKey, error) {
+ contents, err := readKeyFileBytes(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ var key PrivateKey
+
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ key, err = UnmarshalPrivateKeyJWK(contents)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode private key JWK: %s", err)
+ }
+ } else {
+ key, err = UnmarshalPrivateKeyPEM(contents)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode private key PEM: %s", err)
+ }
+ }
+
+ return key, nil
+}
+
+// LoadPublicKeyFile opens the given filename and attempts to read a Public Key
+// encoded in either PEM or JWK format (if .json or .jwk file extension).
+func LoadPublicKeyFile(filename string) (PublicKey, error) {
+ contents, err := readKeyFileBytes(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ var key PublicKey
+
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ key, err = UnmarshalPublicKeyJWK(contents)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode public key JWK: %s", err)
+ }
+ } else {
+ key, err = UnmarshalPublicKeyPEM(contents)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode public key PEM: %s", err)
+ }
+ }
+
+ return key, nil
+}
+
+// SaveKey saves the given key to a file using the provided filename.
+// This process will overwrite any existing file at the provided location.
+func SaveKey(filename string, key PrivateKey) error {
+ var encodedKey []byte
+ var err error
+
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ // Encode in JSON Web Key format.
+ encodedKey, err = json.MarshalIndent(key, "", " ")
+ if err != nil {
+ return fmt.Errorf("unable to encode private key JWK: %s", err)
+ }
+ } else {
+ // Encode in PEM format.
+ pemBlock, err := key.PEMBlock()
+ if err != nil {
+ return fmt.Errorf("unable to encode private key PEM: %s", err)
+ }
+ encodedKey = pem.EncodeToMemory(pemBlock)
+ }
+
+ err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600))
+ if err != nil {
+ return fmt.Errorf("unable to write private key file %s: %s", filename, err)
+ }
+
+ return nil
+}
+
+// SavePublicKey saves the given public key to the file.
+func SavePublicKey(filename string, key PublicKey) error {
+ var encodedKey []byte
+ var err error
+
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ // Encode in JSON Web Key format.
+ encodedKey, err = json.MarshalIndent(key, "", " ")
+ if err != nil {
+ return fmt.Errorf("unable to encode public key JWK: %s", err)
+ }
+ } else {
+ // Encode in PEM format.
+ pemBlock, err := key.PEMBlock()
+ if err != nil {
+ return fmt.Errorf("unable to encode public key PEM: %s", err)
+ }
+ encodedKey = pem.EncodeToMemory(pemBlock)
+ }
+
+ err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644))
+ if err != nil {
+ return fmt.Errorf("unable to write public key file %s: %s", filename, err)
+ }
+
+ return nil
+}
+
+// Public Key Set files
+
+type jwkSet struct {
+ Keys []json.RawMessage `json:"keys"`
+}
+
+// LoadKeySetFile loads a key set
+func LoadKeySetFile(filename string) ([]PublicKey, error) {
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ return loadJSONKeySetFile(filename)
+ }
+
+ // Must be a PEM format file
+ return loadPEMKeySetFile(filename)
+}
+
+func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) {
+ if len(data) == 0 {
+ // This is okay, just return an empty slice.
+ return []json.RawMessage{}, nil
+ }
+
+ keySet := jwkSet{}
+
+ err := json.Unmarshal(data, &keySet)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err)
+ }
+
+ return keySet.Keys, nil
+}
+
+func loadJSONKeySetFile(filename string) ([]PublicKey, error) {
+ contents, err := readKeyFileBytes(filename)
+ if err != nil && err != ErrKeyFileDoesNotExist {
+ return nil, err
+ }
+
+ return UnmarshalPublicKeyJWKSet(contents)
+}
+
+func loadPEMKeySetFile(filename string) ([]PublicKey, error) {
+ data, err := readKeyFileBytes(filename)
+ if err != nil && err != ErrKeyFileDoesNotExist {
+ return nil, err
+ }
+
+ return UnmarshalPublicKeyPEMBundle(data)
+}
+
+// AddKeySetFile adds a key to a key set
+func AddKeySetFile(filename string, key PublicKey) error {
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ return addKeySetJSONFile(filename, key)
+ }
+
+ // Must be a PEM format file
+ return addKeySetPEMFile(filename, key)
+}
+
+func addKeySetJSONFile(filename string, key PublicKey) error {
+ encodedKey, err := json.Marshal(key)
+ if err != nil {
+ return fmt.Errorf("unable to encode trusted client key: %s", err)
+ }
+
+ contents, err := readKeyFileBytes(filename)
+ if err != nil && err != ErrKeyFileDoesNotExist {
+ return err
+ }
+
+ rawEntries, err := loadJSONKeySetRaw(contents)
+ if err != nil {
+ return err
+ }
+
+ rawEntries = append(rawEntries, json.RawMessage(encodedKey))
+ entriesWrapper := jwkSet{Keys: rawEntries}
+
+ encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ")
+ if err != nil {
+ return fmt.Errorf("unable to encode trusted client keys: %s", err)
+ }
+
+ err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644))
+ if err != nil {
+ return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err)
+ }
+
+ return nil
+}
+
+func addKeySetPEMFile(filename string, key PublicKey) error {
+ // Encode to PEM, open file for appending, write PEM.
+ file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644))
+ if err != nil {
+ return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err)
+ }
+ defer file.Close()
+
+ pemBlock, err := key.PEMBlock()
+ if err != nil {
+ return fmt.Errorf("unable to encoded trusted key: %s", err)
+ }
+
+ _, err = file.Write(pem.EncodeToMemory(pemBlock))
+ if err != nil {
+ return fmt.Errorf("unable to write trusted keys file: %s", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/libtrust/key_manager.go b/vendor/github.com/docker/libtrust/key_manager.go
new file mode 100644
index 000000000..9a98ae357
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/key_manager.go
@@ -0,0 +1,175 @@
+package libtrust
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "path"
+ "sync"
+)
+
+// ClientKeyManager manages client keys on the filesystem
+type ClientKeyManager struct {
+ key PrivateKey
+ clientFile string
+ clientDir string
+
+ clientLock sync.RWMutex
+ clients []PublicKey
+
+ configLock sync.Mutex
+ configs []*tls.Config
+}
+
+// NewClientKeyManager loads a new manager from a set of key files
+// and managed by the given private key.
+func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) {
+ m := &ClientKeyManager{
+ key: trustKey,
+ clientFile: clientFile,
+ clientDir: clientDir,
+ }
+ if err := m.loadKeys(); err != nil {
+ return nil, err
+ }
+ // TODO Start watching file and directory
+
+ return m, nil
+}
+
+func (c *ClientKeyManager) loadKeys() (err error) {
+ // Load authorized keys file
+ var clients []PublicKey
+ if c.clientFile != "" {
+ clients, err = LoadKeySetFile(c.clientFile)
+ if err != nil {
+ return fmt.Errorf("unable to load authorized keys: %s", err)
+ }
+ }
+
+ // Add clients from authorized keys directory
+ files, err := ioutil.ReadDir(c.clientDir)
+ if err != nil && !os.IsNotExist(err) {
+ return fmt.Errorf("unable to open authorized keys directory: %s", err)
+ }
+ for _, f := range files {
+ if !f.IsDir() {
+ publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name()))
+ if err != nil {
+ return fmt.Errorf("unable to load authorized key file: %s", err)
+ }
+ clients = append(clients, publicKey)
+ }
+ }
+
+ c.clientLock.Lock()
+ c.clients = clients
+ c.clientLock.Unlock()
+
+ return nil
+}
+
+// RegisterTLSConfig registers a tls configuration to manager
+// such that any changes to the keys may be reflected in
+// the tls client CA pool
+func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error {
+ c.clientLock.RLock()
+ certPool, err := GenerateCACertPool(c.key, c.clients)
+ if err != nil {
+ return fmt.Errorf("CA pool generation error: %s", err)
+ }
+ c.clientLock.RUnlock()
+
+ tlsConfig.ClientCAs = certPool
+
+ c.configLock.Lock()
+ c.configs = append(c.configs, tlsConfig)
+ c.configLock.Unlock()
+
+ return nil
+}
+
+// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for
+// libtrust identity authentication for the domain specified
+func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) {
+ tlsConfig := newTLSConfig()
+
+ tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
+ if err := clients.RegisterTLSConfig(tlsConfig); err != nil {
+ return nil, err
+ }
+
+ // Generate cert
+ ips, domains, err := parseAddr(addr)
+ if err != nil {
+ return nil, err
+ }
+ // add domain that it expects clients to use
+ domains = append(domains, domain)
+ x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips)
+ if err != nil {
+ return nil, fmt.Errorf("certificate generation error: %s", err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{{
+ Certificate: [][]byte{x509Cert.Raw},
+ PrivateKey: trustKey.CryptoPrivateKey(),
+ Leaf: x509Cert,
+ }}
+
+ return tlsConfig, nil
+}
+
+// NewCertAuthTLSConfig creates a tls.Config for the server to use for
+// certificate authentication
+func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) {
+ tlsConfig := newTLSConfig()
+
+ cert, err := tls.LoadX509KeyPair(certPath, keyPath)
+ if err != nil {
+ return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{cert}
+
+ // Verify client certificates against a CA?
+ if caPath != "" {
+ certPool := x509.NewCertPool()
+ file, err := ioutil.ReadFile(caPath)
+ if err != nil {
+ return nil, fmt.Errorf("Couldn't read CA certificate: %s", err)
+ }
+ certPool.AppendCertsFromPEM(file)
+
+ tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
+ tlsConfig.ClientCAs = certPool
+ }
+
+ return tlsConfig, nil
+}
+
+func newTLSConfig() *tls.Config {
+ return &tls.Config{
+ NextProtos: []string{"http/1.1"},
+ // Avoid fallback on insecure SSL protocols
+ MinVersion: tls.VersionTLS10,
+ }
+}
+
+// parseAddr parses an address into an array of IPs and domains
+func parseAddr(addr string) ([]net.IP, []string, error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, nil, err
+ }
+ var domains []string
+ var ips []net.IP
+ ip := net.ParseIP(host)
+ if ip != nil {
+ ips = []net.IP{ip}
+ } else {
+ domains = []string{host}
+ }
+ return ips, domains, nil
+}
diff --git a/vendor/github.com/docker/libtrust/rsa_key.go b/vendor/github.com/docker/libtrust/rsa_key.go
new file mode 100644
index 000000000..dac4cacf2
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/rsa_key.go
@@ -0,0 +1,427 @@
+package libtrust
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+)
+
+/*
+ * RSA DSA PUBLIC KEY
+ */
+
+// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms.
+type rsaPublicKey struct {
+ *rsa.PublicKey
+ extended map[string]interface{}
+}
+
+func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey {
+ return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}}
+}
+
+// KeyType returns the JWK key type for RSA keys, i.e., "RSA".
+func (k *rsaPublicKey) KeyType() string {
+ return "RSA"
+}
+
+// KeyID returns a distinct identifier which is unique to this Public Key.
+func (k *rsaPublicKey) KeyID() string {
+ return keyIDFromCryptoKey(k)
+}
+
+func (k *rsaPublicKey) String() string {
+ return fmt.Sprintf("RSA Public Key <%s>", k.KeyID())
+}
+
+// Verify verifyies the signature of the data in the io.Reader using this Public Key.
+// The alg parameter should be the name of the JWA digital signature algorithm
+// which was used to produce the signature and should be supported by this
+// public key. Returns a nil error if the signature is valid.
+func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
+ // Verify the signature of the given date, return non-nil error if valid.
+ sigAlg, err := rsaSignatureAlgorithmByName(alg)
+ if err != nil {
+ return fmt.Errorf("unable to verify Signature: %s", err)
+ }
+
+ hasher := sigAlg.HashID().New()
+ _, err = io.Copy(hasher, data)
+ if err != nil {
+ return fmt.Errorf("error reading data to sign: %s", err)
+ }
+ hash := hasher.Sum(nil)
+
+ err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature)
+ if err != nil {
+ return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err)
+ }
+
+ return nil
+}
+
+// CryptoPublicKey returns the internal object which can be used as a
+// crypto.PublicKey for use with other standard library operations. The type
+// is either *rsa.PublicKey or *ecdsa.PublicKey
+func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey {
+ return k.PublicKey
+}
+
+func (k *rsaPublicKey) toMap() map[string]interface{} {
+ jwk := make(map[string]interface{})
+ for k, v := range k.extended {
+ jwk[k] = v
+ }
+ jwk["kty"] = k.KeyType()
+ jwk["kid"] = k.KeyID()
+ jwk["n"] = joseBase64UrlEncode(k.N.Bytes())
+ jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E))
+
+ return jwk
+}
+
+// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
+// RSA keys.
+func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) {
+ return json.Marshal(k.toMap())
+}
+
+// PEMBlock serializes this Public Key to DER-encoded PKIX format.
+func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) {
+ derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
+ if err != nil {
+ return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err)
+ }
+ k.extended["kid"] = k.KeyID() // For display purposes.
+ return createPemBlock("PUBLIC KEY", derBytes, k.extended)
+}
+
+func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) {
+ k.extended[field] = value
+}
+
+func (k *rsaPublicKey) GetExtendedField(field string) interface{} {
+ v, ok := k.extended[field]
+ if !ok {
+ return nil
+ }
+ return v
+}
+
+func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) {
+ // JWK key type (kty) has already been determined to be "RSA".
+ // Need to extract 'n', 'e', and 'kid' and check for
+ // consistency.
+
+ // Get the modulus parameter N.
+ nB64Url, err := stringFromMap(jwk, "n")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
+ }
+
+ n, err := parseRSAModulusParam(nB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
+ }
+
+ // Get the public exponent E.
+ eB64Url, err := stringFromMap(jwk, "e")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
+ }
+
+ e, err := parseRSAPublicExponentParam(eB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
+ }
+
+ key := &rsaPublicKey{
+ PublicKey: &rsa.PublicKey{N: n, E: e},
+ }
+
+ // Key ID is optional, but if it exists, it should match the key.
+ _, ok := jwk["kid"]
+ if ok {
+ kid, err := stringFromMap(jwk, "kid")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err)
+ }
+ if kid != key.KeyID() {
+ return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid)
+ }
+ }
+
+ if _, ok := jwk["d"]; ok {
+ return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent")
+ }
+
+ key.extended = jwk
+
+ return key, nil
+}
+
+/*
+ * RSA DSA PRIVATE KEY
+ */
+
+// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms.
+type rsaPrivateKey struct {
+ rsaPublicKey
+ *rsa.PrivateKey
+}
+
+func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey {
+ return &rsaPrivateKey{
+ *fromRSAPublicKey(&cryptoPrivateKey.PublicKey),
+ cryptoPrivateKey,
+ }
+}
+
+// PublicKey returns the Public Key data associated with this Private Key.
+func (k *rsaPrivateKey) PublicKey() PublicKey {
+ return &k.rsaPublicKey
+}
+
+func (k *rsaPrivateKey) String() string {
+ return fmt.Sprintf("RSA Private Key <%s>", k.KeyID())
+}
+
+// Sign signs the data read from the io.Reader using a signature algorithm supported
+// by the RSA private key. If the specified hashing algorithm is supported by
+// this key, that hash function is used to generate the signature otherwise the
+// the default hashing algorithm for this key is used. Returns the signature
+// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384",
+// "RS512".
+func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
+ // Generate a signature of the data using the internal alg.
+ sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID)
+ hasher := sigAlg.HashID().New()
+
+ _, err = io.Copy(hasher, data)
+ if err != nil {
+ return nil, "", fmt.Errorf("error reading data to sign: %s", err)
+ }
+ hash := hasher.Sum(nil)
+
+ signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash)
+ if err != nil {
+ return nil, "", fmt.Errorf("error producing signature: %s", err)
+ }
+
+ alg = sigAlg.HeaderParam()
+
+ return
+}
+
+// CryptoPrivateKey returns the internal object which can be used as a
+// crypto.PublicKey for use with other standard library operations. The type
+// is either *rsa.PublicKey or *ecdsa.PublicKey
+func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
+ return k.PrivateKey
+}
+
+func (k *rsaPrivateKey) toMap() map[string]interface{} {
+ k.Precompute() // Make sure the precomputed values are stored.
+ jwk := k.rsaPublicKey.toMap()
+
+ jwk["d"] = joseBase64UrlEncode(k.D.Bytes())
+ jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes())
+ jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes())
+ jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes())
+ jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes())
+ jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes())
+
+ otherPrimes := k.Primes[2:]
+
+ if len(otherPrimes) > 0 {
+ otherPrimesInfo := make([]interface{}, len(otherPrimes))
+ for i, r := range otherPrimes {
+ otherPrimeInfo := make(map[string]string, 3)
+ otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes())
+ crtVal := k.Precomputed.CRTValues[i]
+ otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes())
+ otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes())
+ otherPrimesInfo[i] = otherPrimeInfo
+ }
+ jwk["oth"] = otherPrimesInfo
+ }
+
+ return jwk
+}
+
+// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
+// RSA keys.
+func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) {
+ return json.Marshal(k.toMap())
+}
+
+// PEMBlock serializes this Private Key to DER-encoded PKIX format.
+func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) {
+ derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey)
+ k.extended["keyID"] = k.KeyID() // For display purposes.
+ return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended)
+}
+
+func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) {
+ // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that
+ // only the private key exponent 'd' is REQUIRED, the others are just for
+ // signature/decryption optimizations and SHOULD be included when the JWK
+ // is produced. We MAY choose to accept a JWK which only includes 'd', but
+ // we're going to go ahead and not choose to accept it without the extra
+ // fields. Only the 'oth' field will be optional (for multi-prime keys).
+ privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err)
+ }
+ firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
+ }
+ secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
+ }
+ firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
+ }
+ secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
+ }
+ crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
+ }
+
+ var oth interface{}
+ if _, ok := jwk["oth"]; ok {
+ oth = jwk["oth"]
+ delete(jwk, "oth")
+ }
+
+ // JWK key type (kty) has already been determined to be "RSA".
+ // Need to extract the public key information, then extract the private
+ // key values.
+ publicKey, err := rsaPublicKeyFromMap(jwk)
+ if err != nil {
+ return nil, err
+ }
+
+ privateKey := &rsa.PrivateKey{
+ PublicKey: *publicKey.PublicKey,
+ D: privateExponent,
+ Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor},
+ Precomputed: rsa.PrecomputedValues{
+ Dp: firstFactorCRT,
+ Dq: secondFactorCRT,
+ Qinv: crtCoeff,
+ },
+ }
+
+ if oth != nil {
+ // Should be an array of more JSON objects.
+ otherPrimesInfo, ok := oth.([]interface{})
+ if !ok {
+ return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array")
+ }
+ numOtherPrimeFactors := len(otherPrimesInfo)
+ if numOtherPrimeFactors == 0 {
+ return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty")
+ }
+ otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors)
+ productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor)
+ crtValues := make([]rsa.CRTValue, numOtherPrimeFactors)
+
+ for i, val := range otherPrimesInfo {
+ otherPrimeinfo, ok := val.(map[string]interface{})
+ if !ok {
+ return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object")
+ }
+
+ otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
+ }
+ otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
+ }
+ otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
+ }
+
+ crtValue := crtValues[i]
+ crtValue.Exp = otherFactorCRT
+ crtValue.Coeff = otherCrtCoeff
+ crtValue.R = productOfPrimes
+ otherPrimeFactors[i] = otherPrimeFactor
+ productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor)
+ }
+
+ privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...)
+ privateKey.Precomputed.CRTValues = crtValues
+ }
+
+ key := &rsaPrivateKey{
+ rsaPublicKey: *publicKey,
+ PrivateKey: privateKey,
+ }
+
+ return key, nil
+}
+
+/*
+ * Key Generation Functions.
+ */
+
+func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) {
+ k = new(rsaPrivateKey)
+ k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits)
+ if err != nil {
+ return nil, err
+ }
+
+ k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey
+ k.extended = make(map[string]interface{})
+
+ return
+}
+
+// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA.
+func GenerateRSA2048PrivateKey() (PrivateKey, error) {
+ k, err := generateRSAPrivateKey(2048)
+ if err != nil {
+ return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err)
+ }
+
+ return k, nil
+}
+
+// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA.
+func GenerateRSA3072PrivateKey() (PrivateKey, error) {
+ k, err := generateRSAPrivateKey(3072)
+ if err != nil {
+ return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err)
+ }
+
+ return k, nil
+}
+
+// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA.
+func GenerateRSA4096PrivateKey() (PrivateKey, error) {
+ k, err := generateRSAPrivateKey(4096)
+ if err != nil {
+ return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err)
+ }
+
+ return k, nil
+}
diff --git a/vendor/github.com/docker/libtrust/util.go b/vendor/github.com/docker/libtrust/util.go
new file mode 100644
index 000000000..a5a101d3f
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/util.go
@@ -0,0 +1,363 @@
+package libtrust
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/elliptic"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/base32"
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "math/big"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+// LoadOrCreateTrustKey will load a PrivateKey from the specified path
+func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) {
+ if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil {
+ return nil, err
+ }
+
+ trustKey, err := LoadKeyFile(trustKeyPath)
+ if err == ErrKeyFileDoesNotExist {
+ trustKey, err = GenerateECP256PrivateKey()
+ if err != nil {
+ return nil, fmt.Errorf("error generating key: %s", err)
+ }
+
+ if err := SaveKey(trustKeyPath, trustKey); err != nil {
+ return nil, fmt.Errorf("error saving key file: %s", err)
+ }
+
+ dir, file := filepath.Split(trustKeyPath)
+ if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil {
+ return nil, fmt.Errorf("error saving public key file: %s", err)
+ }
+ } else if err != nil {
+ return nil, fmt.Errorf("error loading key file: %s", err)
+ }
+ return trustKey, nil
+}
+
+// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity
+// based authentication from the specified dockerUrl, the rootConfigPath and
+// the server name to which it is connecting.
+// If trustUnknownHosts is true it will automatically add the host to the
+// known-hosts.json in rootConfigPath.
+func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) {
+ tlsConfig := newTLSConfig()
+
+ trustKeyPath := filepath.Join(rootConfigPath, "key.json")
+ knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json")
+
+ u, err := url.Parse(dockerUrl)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse machine url")
+ }
+
+ if u.Scheme == "unix" {
+ return nil, nil
+ }
+
+ addr := u.Host
+ proto := "tcp"
+
+ trustKey, err := LoadOrCreateTrustKey(trustKeyPath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to load trust key: %s", err)
+ }
+
+ knownHosts, err := LoadKeySetFile(knownHostsPath)
+ if err != nil {
+ return nil, fmt.Errorf("could not load trusted hosts file: %s", err)
+ }
+
+ allowedHosts, err := FilterByHosts(knownHosts, addr, false)
+ if err != nil {
+ return nil, fmt.Errorf("error filtering hosts: %s", err)
+ }
+
+ certPool, err := GenerateCACertPool(trustKey, allowedHosts)
+ if err != nil {
+ return nil, fmt.Errorf("Could not create CA pool: %s", err)
+ }
+
+ tlsConfig.ServerName = serverName
+ tlsConfig.RootCAs = certPool
+
+ x509Cert, err := GenerateSelfSignedClientCert(trustKey)
+ if err != nil {
+ return nil, fmt.Errorf("certificate generation error: %s", err)
+ }
+
+ tlsConfig.Certificates = []tls.Certificate{{
+ Certificate: [][]byte{x509Cert.Raw},
+ PrivateKey: trustKey.CryptoPrivateKey(),
+ Leaf: x509Cert,
+ }}
+
+ tlsConfig.InsecureSkipVerify = true
+
+ testConn, err := tls.Dial(proto, addr, tlsConfig)
+ if err != nil {
+ return nil, fmt.Errorf("tls Handshake error: %s", err)
+ }
+
+ opts := x509.VerifyOptions{
+ Roots: tlsConfig.RootCAs,
+ CurrentTime: time.Now(),
+ DNSName: tlsConfig.ServerName,
+ Intermediates: x509.NewCertPool(),
+ }
+
+ certs := testConn.ConnectionState().PeerCertificates
+ for i, cert := range certs {
+ if i == 0 {
+ continue
+ }
+ opts.Intermediates.AddCert(cert)
+ }
+
+ if _, err := certs[0].Verify(opts); err != nil {
+ if _, ok := err.(x509.UnknownAuthorityError); ok {
+ if trustUnknownHosts {
+ pubKey, err := FromCryptoPublicKey(certs[0].PublicKey)
+ if err != nil {
+ return nil, fmt.Errorf("error extracting public key from cert: %s", err)
+ }
+
+ pubKey.AddExtendedField("hosts", []string{addr})
+
+ if err := AddKeySetFile(knownHostsPath, pubKey); err != nil {
+ return nil, fmt.Errorf("error adding machine to known hosts: %s", err)
+ }
+ } else {
+ return nil, fmt.Errorf("unable to connect. unknown host: %s", addr)
+ }
+ }
+ }
+
+ testConn.Close()
+ tlsConfig.InsecureSkipVerify = false
+
+ return tlsConfig, nil
+}
+
+// joseBase64UrlEncode encodes the given data using the standard base64 url
+// encoding format but with all trailing '=' characters omitted in accordance
+// with the jose specification.
+// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
+func joseBase64UrlEncode(b []byte) string {
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// joseBase64UrlDecode decodes the given string using the standard base64 url
+// decoder but first adds the appropriate number of trailing '=' characters in
+// accordance with the jose specification.
+// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
+func joseBase64UrlDecode(s string) ([]byte, error) {
+ s = strings.Replace(s, "\n", "", -1)
+ s = strings.Replace(s, " ", "", -1)
+ switch len(s) % 4 {
+ case 0:
+ case 2:
+ s += "=="
+ case 3:
+ s += "="
+ default:
+ return nil, errors.New("illegal base64url string")
+ }
+ return base64.URLEncoding.DecodeString(s)
+}
+
+func keyIDEncode(b []byte) string {
+ s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=")
+ var buf bytes.Buffer
+ var i int
+ for i = 0; i < len(s)/4-1; i++ {
+ start := i * 4
+ end := start + 4
+ buf.WriteString(s[start:end] + ":")
+ }
+ buf.WriteString(s[i*4:])
+ return buf.String()
+}
+
+func keyIDFromCryptoKey(pubKey PublicKey) string {
+ // Generate and return a 'libtrust' fingerprint of the public key.
+ // For an RSA key this should be:
+ // SHA256(DER encoded ASN1)
+ // Then truncated to 240 bits and encoded into 12 base32 groups like so:
+ // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
+ derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey())
+ if err != nil {
+ return ""
+ }
+ hasher := crypto.SHA256.New()
+ hasher.Write(derBytes)
+ return keyIDEncode(hasher.Sum(nil)[:30])
+}
+
+func stringFromMap(m map[string]interface{}, key string) (string, error) {
+ val, ok := m[key]
+ if !ok {
+ return "", fmt.Errorf("%q value not specified", key)
+ }
+
+ str, ok := val.(string)
+ if !ok {
+ return "", fmt.Errorf("%q value must be a string", key)
+ }
+ delete(m, key)
+
+ return str, nil
+}
+
+func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) {
+ curveByteLen := (curve.Params().BitSize + 7) >> 3
+
+ cBytes, err := joseBase64UrlDecode(cB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
+ }
+ cByteLength := len(cBytes)
+ if cByteLength != curveByteLen {
+ return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen)
+ }
+ return new(big.Int).SetBytes(cBytes), nil
+}
+
+func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) {
+ dBytes, err := joseBase64UrlDecode(dB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
+ }
+
+ // The length of this octet string MUST be ceiling(log-base-2(n)/8)
+ // octets (where n is the order of the curve). This is because the private
+ // key d must be in the interval [1, n-1] so the bitlength of d should be
+ // no larger than the bitlength of n-1. The easiest way to find the octet
+ // length is to take bitlength(n-1), add 7 to force a carry, and shift this
+ // bit sequence right by 3, which is essentially dividing by 8 and adding
+ // 1 if there is any remainder. Thus, the private key value d should be
+ // output to (bitlength(n-1)+7)>>3 octets.
+ n := curve.Params().N
+ octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
+ dByteLength := len(dBytes)
+
+ if dByteLength != octetLength {
+ return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength)
+ }
+
+ return new(big.Int).SetBytes(dBytes), nil
+}
+
+func parseRSAModulusParam(nB64Url string) (*big.Int, error) {
+ nBytes, err := joseBase64UrlDecode(nB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
+ }
+
+ return new(big.Int).SetBytes(nBytes), nil
+}
+
+func serializeRSAPublicExponentParam(e int) []byte {
+ // We MUST use the minimum number of octets to represent E.
+ // E is supposed to be 65537 for performance and security reasons
+ // and is what golang's rsa package generates, but it might be
+ // different if imported from some other generator.
+ buf := make([]byte, 4)
+ binary.BigEndian.PutUint32(buf, uint32(e))
+ var i int
+ for i = 0; i < 8; i++ {
+ if buf[i] != 0 {
+ break
+ }
+ }
+ return buf[i:]
+}
+
+func parseRSAPublicExponentParam(eB64Url string) (int, error) {
+ eBytes, err := joseBase64UrlDecode(eB64Url)
+ if err != nil {
+ return 0, fmt.Errorf("invalid base64 URL encoding: %s", err)
+ }
+ // Only the minimum number of bytes were used to represent E, but
+ // binary.BigEndian.Uint32 expects at least 4 bytes, so we need
+ // to add zero padding if necassary.
+ byteLen := len(eBytes)
+ buf := make([]byte, 4-byteLen, 4)
+ eBytes = append(buf, eBytes...)
+
+ return int(binary.BigEndian.Uint32(eBytes)), nil
+}
+
+func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) {
+ b64Url, err := stringFromMap(m, key)
+ if err != nil {
+ return nil, err
+ }
+
+ paramBytes, err := joseBase64UrlDecode(b64Url)
+ if err != nil {
+ return nil, fmt.Errorf("invaled base64 URL encoding: %s", err)
+ }
+
+ return new(big.Int).SetBytes(paramBytes), nil
+}
+
+func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) {
+ pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}}
+ for k, v := range headers {
+ switch val := v.(type) {
+ case string:
+ pemBlock.Headers[k] = val
+ case []string:
+ if k == "hosts" {
+ pemBlock.Headers[k] = strings.Join(val, ",")
+ } else {
+ // Return error, non-encodable type
+ }
+ default:
+ // Return error, non-encodable type
+ }
+ }
+
+ return pemBlock, nil
+}
+
+func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) {
+ cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err)
+ }
+
+ pubKey, err := FromCryptoPublicKey(cryptoPublicKey)
+ if err != nil {
+ return nil, err
+ }
+
+ addPEMHeadersToKey(pemBlock, pubKey)
+
+ return pubKey, nil
+}
+
+func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) {
+ for key, value := range pemBlock.Headers {
+ var safeVal interface{}
+ if key == "hosts" {
+ safeVal = strings.Split(value, ",")
+ } else {
+ safeVal = value
+ }
+ pubKey.AddExtendedField(key, safeVal)
+ }
+}
diff --git a/vendor/github.com/docker/spdystream/LICENSE b/vendor/github.com/docker/spdystream/LICENSE
new file mode 100644
index 000000000..9e4bd4dbe
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014-2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/spdystream/LICENSE.docs b/vendor/github.com/docker/spdystream/LICENSE.docs
new file mode 100644
index 000000000..e26cd4fc8
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/LICENSE.docs
@@ -0,0 +1,425 @@
+Attribution-ShareAlike 4.0 International
+
+=======================================================================
+
+Creative Commons Corporation ("Creative Commons") is not a law firm and
+does not provide legal services or legal advice. Distribution of
+Creative Commons public licenses does not create a lawyer-client or
+other relationship. Creative Commons makes its licenses and related
+information available on an "as-is" basis. Creative Commons gives no
+warranties regarding its licenses, any material licensed under their
+terms and conditions, or any related information. Creative Commons
+disclaims all liability for damages resulting from their use to the
+fullest extent possible.
+
+Using Creative Commons Public Licenses
+
+Creative Commons public licenses provide a standard set of terms and
+conditions that creators and other rights holders may use to share
+original works of authorship and other material subject to copyright
+and certain other rights specified in the public license below. The
+following considerations are for informational purposes only, are not
+exhaustive, and do not form part of our licenses.
+
+ Considerations for licensors: Our public licenses are
+ intended for use by those authorized to give the public
+ permission to use material in ways otherwise restricted by
+ copyright and certain other rights. Our licenses are
+ irrevocable. Licensors should read and understand the terms
+ and conditions of the license they choose before applying it.
+ Licensors should also secure all rights necessary before
+ applying our licenses so that the public can reuse the
+ material as expected. Licensors should clearly mark any
+ material not subject to the license. This includes other CC-
+ licensed material, or material used under an exception or
+ limitation to copyright. More considerations for licensors:
+ wiki.creativecommons.org/Considerations_for_licensors
+
+ Considerations for the public: By using one of our public
+ licenses, a licensor grants the public permission to use the
+ licensed material under specified terms and conditions. If
+ the licensor's permission is not necessary for any reason--for
+ example, because of any applicable exception or limitation to
+ copyright--then that use is not regulated by the license. Our
+ licenses grant only permissions under copyright and certain
+ other rights that a licensor has authority to grant. Use of
+ the licensed material may still be restricted for other
+ reasons, including because others have copyright or other
+ rights in the material. A licensor may make special requests,
+ such as asking that all changes be marked or described.
+ Although not required by our licenses, you are encouraged to
+ respect those requests where reasonable. More_considerations
+ for the public:
+ wiki.creativecommons.org/Considerations_for_licensees
+
+=======================================================================
+
+Creative Commons Attribution-ShareAlike 4.0 International Public
+License
+
+By exercising the Licensed Rights (defined below), You accept and agree
+to be bound by the terms and conditions of this Creative Commons
+Attribution-ShareAlike 4.0 International Public License ("Public
+License"). To the extent this Public License may be interpreted as a
+contract, You are granted the Licensed Rights in consideration of Your
+acceptance of these terms and conditions, and the Licensor grants You
+such rights in consideration of benefits the Licensor receives from
+making the Licensed Material available under these terms and
+conditions.
+
+
+Section 1 -- Definitions.
+
+ a. Adapted Material means material subject to Copyright and Similar
+ Rights that is derived from or based upon the Licensed Material
+ and in which the Licensed Material is translated, altered,
+ arranged, transformed, or otherwise modified in a manner requiring
+ permission under the Copyright and Similar Rights held by the
+ Licensor. For purposes of this Public License, where the Licensed
+ Material is a musical work, performance, or sound recording,
+ Adapted Material is always produced where the Licensed Material is
+ synched in timed relation with a moving image.
+
+ b. Adapter's License means the license You apply to Your Copyright
+ and Similar Rights in Your contributions to Adapted Material in
+ accordance with the terms and conditions of this Public License.
+
+ c. BY-SA Compatible License means a license listed at
+ creativecommons.org/compatiblelicenses, approved by Creative
+ Commons as essentially the equivalent of this Public License.
+
+ d. Copyright and Similar Rights means copyright and/or similar rights
+ closely related to copyright including, without limitation,
+ performance, broadcast, sound recording, and Sui Generis Database
+ Rights, without regard to how the rights are labeled or
+ categorized. For purposes of this Public License, the rights
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
+ Rights.
+
+ e. Effective Technological Measures means those measures that, in the
+ absence of proper authority, may not be circumvented under laws
+ fulfilling obligations under Article 11 of the WIPO Copyright
+ Treaty adopted on December 20, 1996, and/or similar international
+ agreements.
+
+ f. Exceptions and Limitations means fair use, fair dealing, and/or
+ any other exception or limitation to Copyright and Similar Rights
+ that applies to Your use of the Licensed Material.
+
+ g. License Elements means the license attributes listed in the name
+ of a Creative Commons Public License. The License Elements of this
+ Public License are Attribution and ShareAlike.
+
+ h. Licensed Material means the artistic or literary work, database,
+ or other material to which the Licensor applied this Public
+ License.
+
+ i. Licensed Rights means the rights granted to You subject to the
+ terms and conditions of this Public License, which are limited to
+ all Copyright and Similar Rights that apply to Your use of the
+ Licensed Material and that the Licensor has authority to license.
+
+ j. Licensor means the individual(s) or entity(ies) granting rights
+ under this Public License.
+
+ k. Share means to provide material to the public by any means or
+ process that requires permission under the Licensed Rights, such
+ as reproduction, public display, public performance, distribution,
+ dissemination, communication, or importation, and to make material
+ available to the public including in ways that members of the
+ public may access the material from a place and at a time
+ individually chosen by them.
+
+ l. Sui Generis Database Rights means rights other than copyright
+ resulting from Directive 96/9/EC of the European Parliament and of
+ the Council of 11 March 1996 on the legal protection of databases,
+ as amended and/or succeeded, as well as other essentially
+ equivalent rights anywhere in the world.
+
+ m. You means the individual or entity exercising the Licensed Rights
+ under this Public License. Your has a corresponding meaning.
+
+
+Section 2 -- Scope.
+
+ a. License grant.
+
+ 1. Subject to the terms and conditions of this Public License,
+ the Licensor hereby grants You a worldwide, royalty-free,
+ non-sublicensable, non-exclusive, irrevocable license to
+ exercise the Licensed Rights in the Licensed Material to:
+
+ a. reproduce and Share the Licensed Material, in whole or
+ in part; and
+
+ b. produce, reproduce, and Share Adapted Material.
+
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
+ Exceptions and Limitations apply to Your use, this Public
+ License does not apply, and You do not need to comply with
+ its terms and conditions.
+
+ 3. Term. The term of this Public License is specified in Section
+ 6(a).
+
+ 4. Media and formats; technical modifications allowed. The
+ Licensor authorizes You to exercise the Licensed Rights in
+ all media and formats whether now known or hereafter created,
+ and to make technical modifications necessary to do so. The
+ Licensor waives and/or agrees not to assert any right or
+ authority to forbid You from making technical modifications
+ necessary to exercise the Licensed Rights, including
+ technical modifications necessary to circumvent Effective
+ Technological Measures. For purposes of this Public License,
+ simply making modifications authorized by this Section 2(a)
+ (4) never produces Adapted Material.
+
+ 5. Downstream recipients.
+
+ a. Offer from the Licensor -- Licensed Material. Every
+ recipient of the Licensed Material automatically
+ receives an offer from the Licensor to exercise the
+ Licensed Rights under the terms and conditions of this
+ Public License.
+
+ b. Additional offer from the Licensor -- Adapted Material.
+ Every recipient of Adapted Material from You
+ automatically receives an offer from the Licensor to
+ exercise the Licensed Rights in the Adapted Material
+ under the conditions of the Adapter's License You apply.
+
+ c. No downstream restrictions. You may not offer or impose
+ any additional or different terms or conditions on, or
+ apply any Effective Technological Measures to, the
+ Licensed Material if doing so restricts exercise of the
+ Licensed Rights by any recipient of the Licensed
+ Material.
+
+ 6. No endorsement. Nothing in this Public License constitutes or
+ may be construed as permission to assert or imply that You
+ are, or that Your use of the Licensed Material is, connected
+ with, or sponsored, endorsed, or granted official status by,
+ the Licensor or others designated to receive attribution as
+ provided in Section 3(a)(1)(A)(i).
+
+ b. Other rights.
+
+ 1. Moral rights, such as the right of integrity, are not
+ licensed under this Public License, nor are publicity,
+ privacy, and/or other similar personality rights; however, to
+ the extent possible, the Licensor waives and/or agrees not to
+ assert any such rights held by the Licensor to the limited
+ extent necessary to allow You to exercise the Licensed
+ Rights, but not otherwise.
+
+ 2. Patent and trademark rights are not licensed under this
+ Public License.
+
+ 3. To the extent possible, the Licensor waives any right to
+ collect royalties from You for the exercise of the Licensed
+ Rights, whether directly or through a collecting society
+ under any voluntary or waivable statutory or compulsory
+ licensing scheme. In all other cases the Licensor expressly
+ reserves any right to collect such royalties.
+
+
+Section 3 -- License Conditions.
+
+Your exercise of the Licensed Rights is expressly made subject to the
+following conditions.
+
+ a. Attribution.
+
+ 1. If You Share the Licensed Material (including in modified
+ form), You must:
+
+ a. retain the following if it is supplied by the Licensor
+ with the Licensed Material:
+
+ i. identification of the creator(s) of the Licensed
+ Material and any others designated to receive
+ attribution, in any reasonable manner requested by
+ the Licensor (including by pseudonym if
+ designated);
+
+ ii. a copyright notice;
+
+ iii. a notice that refers to this Public License;
+
+ iv. a notice that refers to the disclaimer of
+ warranties;
+
+ v. a URI or hyperlink to the Licensed Material to the
+ extent reasonably practicable;
+
+ b. indicate if You modified the Licensed Material and
+ retain an indication of any previous modifications; and
+
+ c. indicate the Licensed Material is licensed under this
+ Public License, and include the text of, or the URI or
+ hyperlink to, this Public License.
+
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
+ reasonable manner based on the medium, means, and context in
+ which You Share the Licensed Material. For example, it may be
+ reasonable to satisfy the conditions by providing a URI or
+ hyperlink to a resource that includes the required
+ information.
+
+ 3. If requested by the Licensor, You must remove any of the
+ information required by Section 3(a)(1)(A) to the extent
+ reasonably practicable.
+
+ b. ShareAlike.
+
+ In addition to the conditions in Section 3(a), if You Share
+ Adapted Material You produce, the following conditions also apply.
+
+ 1. The Adapter's License You apply must be a Creative Commons
+ license with the same License Elements, this version or
+ later, or a BY-SA Compatible License.
+
+ 2. You must include the text of, or the URI or hyperlink to, the
+ Adapter's License You apply. You may satisfy this condition
+ in any reasonable manner based on the medium, means, and
+ context in which You Share Adapted Material.
+
+ 3. You may not offer or impose any additional or different terms
+ or conditions on, or apply any Effective Technological
+ Measures to, Adapted Material that restrict exercise of the
+ rights granted under the Adapter's License You apply.
+
+
+Section 4 -- Sui Generis Database Rights.
+
+Where the Licensed Rights include Sui Generis Database Rights that
+apply to Your use of the Licensed Material:
+
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
+ to extract, reuse, reproduce, and Share all or a substantial
+ portion of the contents of the database;
+
+ b. if You include all or a substantial portion of the database
+ contents in a database in which You have Sui Generis Database
+ Rights, then the database in which You have Sui Generis Database
+ Rights (but not its individual contents) is Adapted Material,
+
+ including for purposes of Section 3(b); and
+ c. You must comply with the conditions in Section 3(a) if You Share
+ all or a substantial portion of the contents of the database.
+
+For the avoidance of doubt, this Section 4 supplements and does not
+replace Your obligations under this Public License where the Licensed
+Rights include other Copyright and Similar Rights.
+
+
+Section 5 -- Disclaimer of Warranties and Limitation of Liability.
+
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
+
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
+
+ c. The disclaimer of warranties and limitation of liability provided
+ above shall be interpreted in a manner that, to the extent
+ possible, most closely approximates an absolute disclaimer and
+ waiver of all liability.
+
+
+Section 6 -- Term and Termination.
+
+ a. This Public License applies for the term of the Copyright and
+ Similar Rights licensed here. However, if You fail to comply with
+ this Public License, then Your rights under this Public License
+ terminate automatically.
+
+ b. Where Your right to use the Licensed Material has terminated under
+ Section 6(a), it reinstates:
+
+ 1. automatically as of the date the violation is cured, provided
+ it is cured within 30 days of Your discovery of the
+ violation; or
+
+ 2. upon express reinstatement by the Licensor.
+
+ For the avoidance of doubt, this Section 6(b) does not affect any
+ right the Licensor may have to seek remedies for Your violations
+ of this Public License.
+
+ c. For the avoidance of doubt, the Licensor may also offer the
+ Licensed Material under separate terms or conditions or stop
+ distributing the Licensed Material at any time; however, doing so
+ will not terminate this Public License.
+
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
+ License.
+
+
+Section 7 -- Other Terms and Conditions.
+
+ a. The Licensor shall not be bound by any additional or different
+ terms or conditions communicated by You unless expressly agreed.
+
+ b. Any arrangements, understandings, or agreements regarding the
+ Licensed Material not stated herein are separate from and
+ independent of the terms and conditions of this Public License.
+
+
+Section 8 -- Interpretation.
+
+ a. For the avoidance of doubt, this Public License does not, and
+ shall not be interpreted to, reduce, limit, restrict, or impose
+ conditions on any use of the Licensed Material that could lawfully
+ be made without permission under this Public License.
+
+ b. To the extent possible, if any provision of this Public License is
+ deemed unenforceable, it shall be automatically reformed to the
+ minimum extent necessary to make it enforceable. If the provision
+ cannot be reformed, it shall be severed from this Public License
+ without affecting the enforceability of the remaining terms and
+ conditions.
+
+ c. No term or condition of this Public License will be waived and no
+ failure to comply consented to unless expressly agreed to by the
+ Licensor.
+
+ d. Nothing in this Public License constitutes or may be interpreted
+ as a limitation upon, or waiver of, any privileges and immunities
+ that apply to the Licensor or You, including from the legal
+ processes of any jurisdiction or authority.
+
+
+=======================================================================
+
+Creative Commons is not a party to its public licenses.
+Notwithstanding, Creative Commons may elect to apply one of its public
+licenses to material it publishes and in those instances will be
+considered the "Licensor." Except for the limited purpose of indicating
+that material is shared under a Creative Commons public license or as
+otherwise permitted by the Creative Commons policies published at
+creativecommons.org/policies, Creative Commons does not authorize the
+use of the trademark "Creative Commons" or any other trademark or logo
+of Creative Commons without its prior written consent including,
+without limitation, in connection with any unauthorized modifications
+to any of its public licenses or any other arrangements,
+understandings, or agreements concerning use of licensed material. For
+the avoidance of doubt, this paragraph does not form part of the public
+licenses.
+
+Creative Commons may be contacted at creativecommons.org.
diff --git a/vendor/github.com/docker/spdystream/README.md b/vendor/github.com/docker/spdystream/README.md
new file mode 100644
index 000000000..11cccd0a0
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/README.md
@@ -0,0 +1,77 @@
+# SpdyStream
+
+A multiplexed stream library using spdy
+
+## Usage
+
+Client example (connecting to mirroring server without auth)
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/docker/spdystream"
+ "net"
+ "net/http"
+)
+
+func main() {
+ conn, err := net.Dial("tcp", "localhost:8080")
+ if err != nil {
+ panic(err)
+ }
+ spdyConn, err := spdystream.NewConnection(conn, false)
+ if err != nil {
+ panic(err)
+ }
+ go spdyConn.Serve(spdystream.NoOpStreamHandler)
+ stream, err := spdyConn.CreateStream(http.Header{}, nil, false)
+ if err != nil {
+ panic(err)
+ }
+
+ stream.Wait()
+
+ fmt.Fprint(stream, "Writing to stream")
+
+ buf := make([]byte, 25)
+ stream.Read(buf)
+ fmt.Println(string(buf))
+
+ stream.Close()
+}
+```
+
+Server example (mirroring server without auth)
+
+```go
+package main
+
+import (
+ "github.com/docker/spdystream"
+ "net"
+)
+
+func main() {
+ listener, err := net.Listen("tcp", "localhost:8080")
+ if err != nil {
+ panic(err)
+ }
+ for {
+ conn, err := listener.Accept()
+ if err != nil {
+ panic(err)
+ }
+ spdyConn, err := spdystream.NewConnection(conn, true)
+ if err != nil {
+ panic(err)
+ }
+ go spdyConn.Serve(spdystream.MirrorStreamHandler)
+ }
+}
+```
+
+## Copyright and license
+
+Copyright © 2014-2015 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.
diff --git a/vendor/github.com/docker/spdystream/connection.go b/vendor/github.com/docker/spdystream/connection.go
new file mode 100644
index 000000000..df27d1dd1
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/connection.go
@@ -0,0 +1,959 @@
+package spdystream
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/docker/spdystream/spdy"
+)
+
+var (
+ ErrInvalidStreamId = errors.New("Invalid stream id")
+ ErrTimeout = errors.New("Timeout occured")
+ ErrReset = errors.New("Stream reset")
+ ErrWriteClosedStream = errors.New("Write on closed stream")
+)
+
+const (
+ FRAME_WORKERS = 5
+ QUEUE_SIZE = 50
+)
+
+type StreamHandler func(stream *Stream)
+
+type AuthHandler func(header http.Header, slot uint8, parent uint32) bool
+
+type idleAwareFramer struct {
+ f *spdy.Framer
+ conn *Connection
+ writeLock sync.Mutex
+ resetChan chan struct{}
+ setTimeoutLock sync.Mutex
+ setTimeoutChan chan time.Duration
+ timeout time.Duration
+}
+
+func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer {
+ iaf := &idleAwareFramer{
+ f: framer,
+ resetChan: make(chan struct{}, 2),
+ // setTimeoutChan needs to be buffered to avoid deadlocks when calling setIdleTimeout at about
+ // the same time the connection is being closed
+ setTimeoutChan: make(chan time.Duration, 1),
+ }
+ return iaf
+}
+
+func (i *idleAwareFramer) monitor() {
+ var (
+ timer *time.Timer
+ expired <-chan time.Time
+ resetChan = i.resetChan
+ setTimeoutChan = i.setTimeoutChan
+ )
+Loop:
+ for {
+ select {
+ case timeout := <-i.setTimeoutChan:
+ i.timeout = timeout
+ if timeout == 0 {
+ if timer != nil {
+ timer.Stop()
+ }
+ } else {
+ if timer == nil {
+ timer = time.NewTimer(timeout)
+ expired = timer.C
+ } else {
+ timer.Reset(timeout)
+ }
+ }
+ case <-resetChan:
+ if timer != nil && i.timeout > 0 {
+ timer.Reset(i.timeout)
+ }
+ case <-expired:
+ i.conn.streamCond.L.Lock()
+ streams := i.conn.streams
+ i.conn.streams = make(map[spdy.StreamId]*Stream)
+ i.conn.streamCond.Broadcast()
+ i.conn.streamCond.L.Unlock()
+ go func() {
+ for _, stream := range streams {
+ stream.resetStream()
+ }
+ i.conn.Close()
+ }()
+ case <-i.conn.closeChan:
+ if timer != nil {
+ timer.Stop()
+ }
+
+ // Start a goroutine to drain resetChan. This is needed because we've seen
+ // some unit tests with large numbers of goroutines get into a situation
+ // where resetChan fills up, at least 1 call to Write() is still trying to
+ // send to resetChan, the connection gets closed, and this case statement
+ // attempts to grab the write lock that Write() already has, causing a
+ // deadlock.
+ //
+ // See https://github.com/docker/spdystream/issues/49 for more details.
+ go func() {
+ for _ = range resetChan {
+ }
+ }()
+
+ go func() {
+ for _ = range setTimeoutChan {
+ }
+ }()
+
+ i.writeLock.Lock()
+ close(resetChan)
+ i.resetChan = nil
+ i.writeLock.Unlock()
+
+ i.setTimeoutLock.Lock()
+ close(i.setTimeoutChan)
+ i.setTimeoutChan = nil
+ i.setTimeoutLock.Unlock()
+
+ break Loop
+ }
+ }
+
+ // Drain resetChan
+ for _ = range resetChan {
+ }
+}
+
+func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error {
+ i.writeLock.Lock()
+ defer i.writeLock.Unlock()
+ if i.resetChan == nil {
+ return io.EOF
+ }
+ err := i.f.WriteFrame(frame)
+ if err != nil {
+ return err
+ }
+
+ i.resetChan <- struct{}{}
+
+ return nil
+}
+
+func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) {
+ frame, err := i.f.ReadFrame()
+ if err != nil {
+ return nil, err
+ }
+
+ // resetChan should never be closed since it is only closed
+ // when the connection has closed its closeChan. This closure
+ // only occurs after all Reads have finished
+ // TODO (dmcgowan): refactor relationship into connection
+ i.resetChan <- struct{}{}
+
+ return frame, nil
+}
+
+func (i *idleAwareFramer) setIdleTimeout(timeout time.Duration) {
+ i.setTimeoutLock.Lock()
+ defer i.setTimeoutLock.Unlock()
+
+ if i.setTimeoutChan == nil {
+ return
+ }
+
+ i.setTimeoutChan <- timeout
+}
+
+type Connection struct {
+ conn net.Conn
+ framer *idleAwareFramer
+
+ closeChan chan bool
+ goneAway bool
+ lastStreamChan chan<- *Stream
+ goAwayTimeout time.Duration
+ closeTimeout time.Duration
+
+ streamLock *sync.RWMutex
+ streamCond *sync.Cond
+ streams map[spdy.StreamId]*Stream
+
+ nextIdLock sync.Mutex
+ receiveIdLock sync.Mutex
+ nextStreamId spdy.StreamId
+ receivedStreamId spdy.StreamId
+
+ pingIdLock sync.Mutex
+ pingId uint32
+ pingChans map[uint32]chan error
+
+ shutdownLock sync.Mutex
+ shutdownChan chan error
+ hasShutdown bool
+
+ // for testing https://github.com/docker/spdystream/pull/56
+ dataFrameHandler func(*spdy.DataFrame) error
+}
+
+// NewConnection creates a new spdy connection from an existing
+// network connection.
+func NewConnection(conn net.Conn, server bool) (*Connection, error) {
+ framer, framerErr := spdy.NewFramer(conn, conn)
+ if framerErr != nil {
+ return nil, framerErr
+ }
+ idleAwareFramer := newIdleAwareFramer(framer)
+ var sid spdy.StreamId
+ var rid spdy.StreamId
+ var pid uint32
+ if server {
+ sid = 2
+ rid = 1
+ pid = 2
+ } else {
+ sid = 1
+ rid = 2
+ pid = 1
+ }
+
+ streamLock := new(sync.RWMutex)
+ streamCond := sync.NewCond(streamLock)
+
+ session := &Connection{
+ conn: conn,
+ framer: idleAwareFramer,
+
+ closeChan: make(chan bool),
+ goAwayTimeout: time.Duration(0),
+ closeTimeout: time.Duration(0),
+
+ streamLock: streamLock,
+ streamCond: streamCond,
+ streams: make(map[spdy.StreamId]*Stream),
+ nextStreamId: sid,
+ receivedStreamId: rid,
+
+ pingId: pid,
+ pingChans: make(map[uint32]chan error),
+
+ shutdownChan: make(chan error),
+ }
+ session.dataFrameHandler = session.handleDataFrame
+ idleAwareFramer.conn = session
+ go idleAwareFramer.monitor()
+
+ return session, nil
+}
+
+// Ping sends a ping frame across the connection and
+// returns the response time
+func (s *Connection) Ping() (time.Duration, error) {
+ pid := s.pingId
+ s.pingIdLock.Lock()
+ if s.pingId > 0x7ffffffe {
+ s.pingId = s.pingId - 0x7ffffffe
+ } else {
+ s.pingId = s.pingId + 2
+ }
+ s.pingIdLock.Unlock()
+ pingChan := make(chan error)
+ s.pingChans[pid] = pingChan
+ defer delete(s.pingChans, pid)
+
+ frame := &spdy.PingFrame{Id: pid}
+ startTime := time.Now()
+ writeErr := s.framer.WriteFrame(frame)
+ if writeErr != nil {
+ return time.Duration(0), writeErr
+ }
+ select {
+ case <-s.closeChan:
+ return time.Duration(0), errors.New("connection closed")
+ case err, ok := <-pingChan:
+ if ok && err != nil {
+ return time.Duration(0), err
+ }
+ break
+ }
+ return time.Now().Sub(startTime), nil
+}
+
+// Serve handles frames sent from the server, including reply frames
+// which are needed to fully initiate connections. Both clients and servers
+// should call Serve in a separate goroutine before creating streams.
+func (s *Connection) Serve(newHandler StreamHandler) {
+ // use a WaitGroup to wait for all frames to be drained after receiving
+ // go-away.
+ var wg sync.WaitGroup
+
+ // Parition queues to ensure stream frames are handled
+ // by the same worker, ensuring order is maintained
+ frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS)
+ for i := 0; i < FRAME_WORKERS; i++ {
+ frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE)
+
+ // Ensure frame queue is drained when connection is closed
+ go func(frameQueue *PriorityFrameQueue) {
+ <-s.closeChan
+ frameQueue.Drain()
+ }(frameQueues[i])
+
+ wg.Add(1)
+ go func(frameQueue *PriorityFrameQueue) {
+ // let the WaitGroup know this worker is done
+ defer wg.Done()
+
+ s.frameHandler(frameQueue, newHandler)
+ }(frameQueues[i])
+ }
+
+ var (
+ partitionRoundRobin int
+ goAwayFrame *spdy.GoAwayFrame
+ )
+Loop:
+ for {
+ readFrame, err := s.framer.ReadFrame()
+ if err != nil {
+ if err != io.EOF {
+ debugMessage("frame read error: %s", err)
+ } else {
+ debugMessage("(%p) EOF received", s)
+ }
+ break
+ }
+ var priority uint8
+ var partition int
+ switch frame := readFrame.(type) {
+ case *spdy.SynStreamFrame:
+ if s.checkStreamFrame(frame) {
+ priority = frame.Priority
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId)
+ s.addStreamFrame(frame)
+ } else {
+ debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId)
+ continue
+ }
+ case *spdy.SynReplyFrame:
+ priority = s.getStreamPriority(frame.StreamId)
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ case *spdy.DataFrame:
+ priority = s.getStreamPriority(frame.StreamId)
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ case *spdy.RstStreamFrame:
+ priority = s.getStreamPriority(frame.StreamId)
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ case *spdy.HeadersFrame:
+ priority = s.getStreamPriority(frame.StreamId)
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ case *spdy.PingFrame:
+ priority = 0
+ partition = partitionRoundRobin
+ partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
+ case *spdy.GoAwayFrame:
+ // hold on to the go away frame and exit the loop
+ goAwayFrame = frame
+ break Loop
+ default:
+ priority = 7
+ partition = partitionRoundRobin
+ partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
+ }
+ frameQueues[partition].Push(readFrame, priority)
+ }
+ close(s.closeChan)
+
+ // wait for all frame handler workers to indicate they've drained their queues
+ // before handling the go away frame
+ wg.Wait()
+
+ if goAwayFrame != nil {
+ s.handleGoAwayFrame(goAwayFrame)
+ }
+
+ // now it's safe to close remote channels and empty s.streams
+ s.streamCond.L.Lock()
+ // notify streams that they're now closed, which will
+ // unblock any stream Read() calls
+ for _, stream := range s.streams {
+ stream.closeRemoteChannels()
+ }
+ s.streams = make(map[spdy.StreamId]*Stream)
+ s.streamCond.Broadcast()
+ s.streamCond.L.Unlock()
+}
+
+func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) {
+ for {
+ popFrame := frameQueue.Pop()
+ if popFrame == nil {
+ return
+ }
+
+ var frameErr error
+ switch frame := popFrame.(type) {
+ case *spdy.SynStreamFrame:
+ frameErr = s.handleStreamFrame(frame, newHandler)
+ case *spdy.SynReplyFrame:
+ frameErr = s.handleReplyFrame(frame)
+ case *spdy.DataFrame:
+ frameErr = s.dataFrameHandler(frame)
+ case *spdy.RstStreamFrame:
+ frameErr = s.handleResetFrame(frame)
+ case *spdy.HeadersFrame:
+ frameErr = s.handleHeaderFrame(frame)
+ case *spdy.PingFrame:
+ frameErr = s.handlePingFrame(frame)
+ case *spdy.GoAwayFrame:
+ frameErr = s.handleGoAwayFrame(frame)
+ default:
+ frameErr = fmt.Errorf("unhandled frame type: %T", frame)
+ }
+
+ if frameErr != nil {
+ debugMessage("frame handling error: %s", frameErr)
+ }
+ }
+}
+
+func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 {
+ stream, streamOk := s.getStream(streamId)
+ if !streamOk {
+ return 7
+ }
+ return stream.priority
+}
+
+func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) {
+ var parent *Stream
+ if frame.AssociatedToStreamId != spdy.StreamId(0) {
+ parent, _ = s.getStream(frame.AssociatedToStreamId)
+ }
+
+ stream := &Stream{
+ streamId: frame.StreamId,
+ parent: parent,
+ conn: s,
+ startChan: make(chan error),
+ headers: frame.Headers,
+ finished: (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00,
+ replyCond: sync.NewCond(new(sync.Mutex)),
+ dataChan: make(chan []byte),
+ headerChan: make(chan http.Header),
+ closeChan: make(chan bool),
+ priority: frame.Priority,
+ }
+ if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 {
+ stream.closeRemoteChannels()
+ }
+
+ s.addStream(stream)
+}
+
+// checkStreamFrame checks to see if a stream frame is allowed.
+// If the stream is invalid, then a reset frame with protocol error
+// will be returned.
+func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool {
+ s.receiveIdLock.Lock()
+ defer s.receiveIdLock.Unlock()
+ if s.goneAway {
+ return false
+ }
+ validationErr := s.validateStreamId(frame.StreamId)
+ if validationErr != nil {
+ go func() {
+ resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId)
+ if resetErr != nil {
+ debugMessage("reset error: %s", resetErr)
+ }
+ }()
+ return false
+ }
+ return true
+}
+
+func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error {
+ stream, ok := s.getStream(frame.StreamId)
+ if !ok {
+ return fmt.Errorf("Missing stream: %d", frame.StreamId)
+ }
+
+ newHandler(stream)
+
+ return nil
+}
+
+func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error {
+ debugMessage("(%p) Reply frame received for %d", s, frame.StreamId)
+ stream, streamOk := s.getStream(frame.StreamId)
+ if !streamOk {
+ debugMessage("Reply frame gone away for %d", frame.StreamId)
+ // Stream has already gone away
+ return nil
+ }
+ if stream.replied {
+ // Stream has already received reply
+ return nil
+ }
+ stream.replied = true
+
+ // TODO Check for error
+ if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
+ s.remoteStreamFinish(stream)
+ }
+
+ close(stream.startChan)
+
+ return nil
+}
+
+func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error {
+ stream, streamOk := s.getStream(frame.StreamId)
+ if !streamOk {
+ // Stream has already been removed
+ return nil
+ }
+ s.removeStream(stream)
+ stream.closeRemoteChannels()
+
+ if !stream.replied {
+ stream.replied = true
+ stream.startChan <- ErrReset
+ close(stream.startChan)
+ }
+
+ stream.finishLock.Lock()
+ stream.finished = true
+ stream.finishLock.Unlock()
+
+ return nil
+}
+
+func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error {
+ stream, streamOk := s.getStream(frame.StreamId)
+ if !streamOk {
+ // Stream has already gone away
+ return nil
+ }
+ if !stream.replied {
+ // No reply received...Protocol error?
+ return nil
+ }
+
+ // TODO limit headers while not blocking (use buffered chan or goroutine?)
+ select {
+ case <-stream.closeChan:
+ return nil
+ case stream.headerChan <- frame.Headers:
+ }
+
+ if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
+ s.remoteStreamFinish(stream)
+ }
+
+ return nil
+}
+
+func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error {
+ debugMessage("(%p) Data frame received for %d", s, frame.StreamId)
+ stream, streamOk := s.getStream(frame.StreamId)
+ if !streamOk {
+ debugMessage("(%p) Data frame gone away for %d", s, frame.StreamId)
+ // Stream has already gone away
+ return nil
+ }
+ if !stream.replied {
+ debugMessage("(%p) Data frame not replied %d", s, frame.StreamId)
+ // No reply received...Protocol error?
+ return nil
+ }
+
+ debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId)
+ if len(frame.Data) > 0 {
+ stream.dataLock.RLock()
+ select {
+ case <-stream.closeChan:
+ debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId)
+ case stream.dataChan <- frame.Data:
+ debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId)
+ }
+ stream.dataLock.RUnlock()
+ }
+ if (frame.Flags & spdy.DataFlagFin) != 0x00 {
+ s.remoteStreamFinish(stream)
+ }
+ return nil
+}
+
+func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error {
+ if s.pingId&0x01 != frame.Id&0x01 {
+ return s.framer.WriteFrame(frame)
+ }
+ pingChan, pingOk := s.pingChans[frame.Id]
+ if pingOk {
+ close(pingChan)
+ }
+ return nil
+}
+
+func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error {
+ debugMessage("(%p) Go away received", s)
+ s.receiveIdLock.Lock()
+ if s.goneAway {
+ s.receiveIdLock.Unlock()
+ return nil
+ }
+ s.goneAway = true
+ s.receiveIdLock.Unlock()
+
+ if s.lastStreamChan != nil {
+ stream, _ := s.getStream(frame.LastGoodStreamId)
+ go func() {
+ s.lastStreamChan <- stream
+ }()
+ }
+
+ // Do not block frame handler waiting for closure
+ go s.shutdown(s.goAwayTimeout)
+
+ return nil
+}
+
+func (s *Connection) remoteStreamFinish(stream *Stream) {
+ stream.closeRemoteChannels()
+
+ stream.finishLock.Lock()
+ if stream.finished {
+ // Stream is fully closed, cleanup
+ s.removeStream(stream)
+ }
+ stream.finishLock.Unlock()
+}
+
+// CreateStream creates a new spdy stream using the parameters for
+// creating the stream frame. The stream frame will be sent upon
+// calling this function, however this function does not wait for
+// the reply frame. If waiting for the reply is desired, use
+// the stream Wait or WaitTimeout function on the stream returned
+// by this function.
+func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) {
+ // MUST synchronize stream creation (all the way to writing the frame)
+ // as stream IDs **MUST** increase monotonically.
+ s.nextIdLock.Lock()
+ defer s.nextIdLock.Unlock()
+
+ streamId := s.getNextStreamId()
+ if streamId == 0 {
+ return nil, fmt.Errorf("Unable to get new stream id")
+ }
+
+ stream := &Stream{
+ streamId: streamId,
+ parent: parent,
+ conn: s,
+ startChan: make(chan error),
+ headers: headers,
+ dataChan: make(chan []byte),
+ headerChan: make(chan http.Header),
+ closeChan: make(chan bool),
+ }
+
+ debugMessage("(%p) (%p) Create stream", s, stream)
+
+ s.addStream(stream)
+
+ return stream, s.sendStream(stream, fin)
+}
+
+func (s *Connection) shutdown(closeTimeout time.Duration) {
+ // TODO Ensure this isn't called multiple times
+ s.shutdownLock.Lock()
+ if s.hasShutdown {
+ s.shutdownLock.Unlock()
+ return
+ }
+ s.hasShutdown = true
+ s.shutdownLock.Unlock()
+
+ var timeout <-chan time.Time
+ if closeTimeout > time.Duration(0) {
+ timeout = time.After(closeTimeout)
+ }
+ streamsClosed := make(chan bool)
+
+ go func() {
+ s.streamCond.L.Lock()
+ for len(s.streams) > 0 {
+ debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams)
+ s.streamCond.Wait()
+ }
+ s.streamCond.L.Unlock()
+ close(streamsClosed)
+ }()
+
+ var err error
+ select {
+ case <-streamsClosed:
+ // No active streams, close should be safe
+ err = s.conn.Close()
+ case <-timeout:
+ // Force ungraceful close
+ err = s.conn.Close()
+ // Wait for cleanup to clear active streams
+ <-streamsClosed
+ }
+
+ if err != nil {
+ duration := 10 * time.Minute
+ time.AfterFunc(duration, func() {
+ select {
+ case err, ok := <-s.shutdownChan:
+ if ok {
+ debugMessage("Unhandled close error after %s: %s", duration, err)
+ }
+ default:
+ }
+ })
+ s.shutdownChan <- err
+ }
+ close(s.shutdownChan)
+
+ return
+}
+
+// Closes spdy connection by sending GoAway frame and initiating shutdown
+func (s *Connection) Close() error {
+ s.receiveIdLock.Lock()
+ if s.goneAway {
+ s.receiveIdLock.Unlock()
+ return nil
+ }
+ s.goneAway = true
+ s.receiveIdLock.Unlock()
+
+ var lastStreamId spdy.StreamId
+ if s.receivedStreamId > 2 {
+ lastStreamId = s.receivedStreamId - 2
+ }
+
+ goAwayFrame := &spdy.GoAwayFrame{
+ LastGoodStreamId: lastStreamId,
+ Status: spdy.GoAwayOK,
+ }
+
+ err := s.framer.WriteFrame(goAwayFrame)
+ if err != nil {
+ return err
+ }
+
+ go s.shutdown(s.closeTimeout)
+
+ return nil
+}
+
+// CloseWait closes the connection and waits for shutdown
+// to finish. Note the underlying network Connection
+// is not closed until the end of shutdown.
+func (s *Connection) CloseWait() error {
+ closeErr := s.Close()
+ if closeErr != nil {
+ return closeErr
+ }
+ shutdownErr, ok := <-s.shutdownChan
+ if ok {
+ return shutdownErr
+ }
+ return nil
+}
+
+// Wait waits for the connection to finish shutdown or for
+// the wait timeout duration to expire. This needs to be
+// called either after Close has been called or the GOAWAYFRAME
+// has been received. If the wait timeout is 0, this function
+// will block until shutdown finishes. If wait is never called
+// and a shutdown error occurs, that error will be logged as an
+// unhandled error.
+func (s *Connection) Wait(waitTimeout time.Duration) error {
+ var timeout <-chan time.Time
+ if waitTimeout > time.Duration(0) {
+ timeout = time.After(waitTimeout)
+ }
+
+ select {
+ case err, ok := <-s.shutdownChan:
+ if ok {
+ return err
+ }
+ case <-timeout:
+ return ErrTimeout
+ }
+ return nil
+}
+
+// NotifyClose registers a channel to be called when the remote
+// peer inidicates connection closure. The last stream to be
+// received by the remote will be sent on the channel. The notify
+// timeout will determine the duration between go away received
+// and the connection being closed.
+func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) {
+ s.goAwayTimeout = timeout
+ s.lastStreamChan = c
+}
+
+// SetCloseTimeout sets the amount of time close will wait for
+// streams to finish before terminating the underlying network
+// connection. Setting the timeout to 0 will cause close to
+// wait forever, which is the default.
+func (s *Connection) SetCloseTimeout(timeout time.Duration) {
+ s.closeTimeout = timeout
+}
+
+// SetIdleTimeout sets the amount of time the connection may sit idle before
+// it is forcefully terminated.
+func (s *Connection) SetIdleTimeout(timeout time.Duration) {
+ s.framer.setIdleTimeout(timeout)
+}
+
+func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error {
+ var flags spdy.ControlFlags
+ if fin {
+ flags = spdy.ControlFlagFin
+ }
+
+ headerFrame := &spdy.HeadersFrame{
+ StreamId: stream.streamId,
+ Headers: headers,
+ CFHeader: spdy.ControlFrameHeader{Flags: flags},
+ }
+
+ return s.framer.WriteFrame(headerFrame)
+}
+
+func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error {
+ var flags spdy.ControlFlags
+ if fin {
+ flags = spdy.ControlFlagFin
+ }
+
+ replyFrame := &spdy.SynReplyFrame{
+ StreamId: stream.streamId,
+ Headers: headers,
+ CFHeader: spdy.ControlFrameHeader{Flags: flags},
+ }
+
+ return s.framer.WriteFrame(replyFrame)
+}
+
+func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error {
+ resetFrame := &spdy.RstStreamFrame{
+ StreamId: streamId,
+ Status: status,
+ }
+
+ return s.framer.WriteFrame(resetFrame)
+}
+
+func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error {
+ return s.sendResetFrame(status, stream.streamId)
+}
+
+func (s *Connection) sendStream(stream *Stream, fin bool) error {
+ var flags spdy.ControlFlags
+ if fin {
+ flags = spdy.ControlFlagFin
+ stream.finished = true
+ }
+
+ var parentId spdy.StreamId
+ if stream.parent != nil {
+ parentId = stream.parent.streamId
+ }
+
+ streamFrame := &spdy.SynStreamFrame{
+ StreamId: spdy.StreamId(stream.streamId),
+ AssociatedToStreamId: spdy.StreamId(parentId),
+ Headers: stream.headers,
+ CFHeader: spdy.ControlFrameHeader{Flags: flags},
+ }
+
+ return s.framer.WriteFrame(streamFrame)
+}
+
+// getNextStreamId returns the next sequential id
+// every call should produce a unique value or an error
+func (s *Connection) getNextStreamId() spdy.StreamId {
+ sid := s.nextStreamId
+ if sid > 0x7fffffff {
+ return 0
+ }
+ s.nextStreamId = s.nextStreamId + 2
+ return sid
+}
+
+// PeekNextStreamId returns the next sequential id and keeps the next id untouched
+func (s *Connection) PeekNextStreamId() spdy.StreamId {
+ sid := s.nextStreamId
+ return sid
+}
+
+func (s *Connection) validateStreamId(rid spdy.StreamId) error {
+ if rid > 0x7fffffff || rid < s.receivedStreamId {
+ return ErrInvalidStreamId
+ }
+ s.receivedStreamId = rid + 2
+ return nil
+}
+
+func (s *Connection) addStream(stream *Stream) {
+ s.streamCond.L.Lock()
+ s.streams[stream.streamId] = stream
+ debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId)
+ s.streamCond.Broadcast()
+ s.streamCond.L.Unlock()
+}
+
+func (s *Connection) removeStream(stream *Stream) {
+ s.streamCond.L.Lock()
+ delete(s.streams, stream.streamId)
+ debugMessage("(%p) (%p) Stream removed, broadcasting: %d", s, stream, stream.streamId)
+ s.streamCond.Broadcast()
+ s.streamCond.L.Unlock()
+}
+
+func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) {
+ s.streamLock.RLock()
+ stream, ok = s.streams[streamId]
+ s.streamLock.RUnlock()
+ return
+}
+
+// FindStream looks up the given stream id and either waits for the
+// stream to be found or returns nil if the stream id is no longer
+// valid.
+func (s *Connection) FindStream(streamId uint32) *Stream {
+ var stream *Stream
+ var ok bool
+ s.streamCond.L.Lock()
+ stream, ok = s.streams[spdy.StreamId(streamId)]
+ debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok)
+ for !ok && streamId >= uint32(s.receivedStreamId) {
+ s.streamCond.Wait()
+ stream, ok = s.streams[spdy.StreamId(streamId)]
+ }
+ s.streamCond.L.Unlock()
+ return stream
+}
+
+func (s *Connection) CloseChan() <-chan bool {
+ return s.closeChan
+}
diff --git a/vendor/github.com/docker/spdystream/handlers.go b/vendor/github.com/docker/spdystream/handlers.go
new file mode 100644
index 000000000..b59fa5fdc
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/handlers.go
@@ -0,0 +1,38 @@
+package spdystream
+
+import (
+ "io"
+ "net/http"
+)
+
+// MirrorStreamHandler mirrors all streams.
+func MirrorStreamHandler(stream *Stream) {
+ replyErr := stream.SendReply(http.Header{}, false)
+ if replyErr != nil {
+ return
+ }
+
+ go func() {
+ io.Copy(stream, stream)
+ stream.Close()
+ }()
+ go func() {
+ for {
+ header, receiveErr := stream.ReceiveHeader()
+ if receiveErr != nil {
+ return
+ }
+ sendErr := stream.SendHeader(header, false)
+ if sendErr != nil {
+ return
+ }
+ }
+ }()
+}
+
+// NoopStreamHandler does nothing when stream connects, most
+// likely used with RejectAuthHandler which will not allow any
+// streams to make it to the stream handler.
+func NoOpStreamHandler(stream *Stream) {
+ stream.SendReply(http.Header{}, false)
+}
diff --git a/vendor/github.com/docker/spdystream/priority.go b/vendor/github.com/docker/spdystream/priority.go
new file mode 100644
index 000000000..fc8582b5c
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/priority.go
@@ -0,0 +1,98 @@
+package spdystream
+
+import (
+ "container/heap"
+ "sync"
+
+ "github.com/docker/spdystream/spdy"
+)
+
+type prioritizedFrame struct {
+ frame spdy.Frame
+ priority uint8
+ insertId uint64
+}
+
+type frameQueue []*prioritizedFrame
+
+func (fq frameQueue) Len() int {
+ return len(fq)
+}
+
+func (fq frameQueue) Less(i, j int) bool {
+ if fq[i].priority == fq[j].priority {
+ return fq[i].insertId < fq[j].insertId
+ }
+ return fq[i].priority < fq[j].priority
+}
+
+func (fq frameQueue) Swap(i, j int) {
+ fq[i], fq[j] = fq[j], fq[i]
+}
+
+func (fq *frameQueue) Push(x interface{}) {
+ *fq = append(*fq, x.(*prioritizedFrame))
+}
+
+func (fq *frameQueue) Pop() interface{} {
+ old := *fq
+ n := len(old)
+ *fq = old[0 : n-1]
+ return old[n-1]
+}
+
+type PriorityFrameQueue struct {
+ queue *frameQueue
+ c *sync.Cond
+ size int
+ nextInsertId uint64
+ drain bool
+}
+
+func NewPriorityFrameQueue(size int) *PriorityFrameQueue {
+ queue := make(frameQueue, 0, size)
+ heap.Init(&queue)
+
+ return &PriorityFrameQueue{
+ queue: &queue,
+ size: size,
+ c: sync.NewCond(&sync.Mutex{}),
+ }
+}
+
+func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) {
+ q.c.L.Lock()
+ defer q.c.L.Unlock()
+ for q.queue.Len() >= q.size {
+ q.c.Wait()
+ }
+ pFrame := &prioritizedFrame{
+ frame: frame,
+ priority: priority,
+ insertId: q.nextInsertId,
+ }
+ q.nextInsertId = q.nextInsertId + 1
+ heap.Push(q.queue, pFrame)
+ q.c.Signal()
+}
+
+func (q *PriorityFrameQueue) Pop() spdy.Frame {
+ q.c.L.Lock()
+ defer q.c.L.Unlock()
+ for q.queue.Len() == 0 {
+ if q.drain {
+ return nil
+ }
+ q.c.Wait()
+ }
+ frame := heap.Pop(q.queue).(*prioritizedFrame).frame
+ q.c.Signal()
+ return frame
+}
+
+func (q *PriorityFrameQueue) Drain() {
+ q.c.L.Lock()
+ defer q.c.L.Unlock()
+ q.drain = true
+ q.c.Broadcast()
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/dictionary.go b/vendor/github.com/docker/spdystream/spdy/dictionary.go
new file mode 100644
index 000000000..5a5ff0e14
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/dictionary.go
@@ -0,0 +1,187 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package spdy
+
+// headerDictionary is the dictionary sent to the zlib compressor/decompressor.
+var headerDictionary = []byte{
+ 0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68,
+ 0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70,
+ 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70,
+ 0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00,
+ 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00,
+ 0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70,
+ 0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
+ 0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63,
+ 0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
+ 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f,
+ 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c,
+ 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00,
+ 0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70,
+ 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73,
+ 0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00,
+ 0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
+ 0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63,
+ 0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65,
+ 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
+ 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10,
+ 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d,
+ 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
+ 0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67,
+ 0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f,
+ 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00,
+ 0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+ 0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00,
+ 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00,
+ 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+ 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00,
+ 0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00,
+ 0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00,
+ 0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74,
+ 0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69,
+ 0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66,
+ 0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68,
+ 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69,
+ 0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00,
+ 0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f,
+ 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73,
+ 0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d,
+ 0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00,
+ 0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67,
+ 0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d,
+ 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69,
+ 0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65,
+ 0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74,
+ 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65,
+ 0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00,
+ 0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72,
+ 0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00,
+ 0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00,
+ 0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74,
+ 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00,
+ 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61,
+ 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05,
+ 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00,
+ 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72,
+ 0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72,
+ 0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00,
+ 0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00,
+ 0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c,
+ 0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72,
+ 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65,
+ 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00,
+ 0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61,
+ 0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73,
+ 0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74,
+ 0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79,
+ 0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00,
+ 0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69,
+ 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77,
+ 0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e,
+ 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00,
+ 0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00,
+ 0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30,
+ 0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00,
+ 0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31,
+ 0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72,
+ 0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62,
+ 0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73,
+ 0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69,
+ 0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65,
+ 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00,
+ 0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69,
+ 0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32,
+ 0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35,
+ 0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30,
+ 0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33,
+ 0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37,
+ 0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30,
+ 0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34,
+ 0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31,
+ 0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31,
+ 0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34,
+ 0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34,
+ 0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e,
+ 0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f,
+ 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65,
+ 0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20,
+ 0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f,
+ 0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d,
+ 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34,
+ 0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30,
+ 0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30,
+ 0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64,
+ 0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e,
+ 0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64,
+ 0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f,
+ 0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74,
+ 0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20,
+ 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61,
+ 0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46,
+ 0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41,
+ 0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a,
+ 0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41,
+ 0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20,
+ 0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20,
+ 0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30,
+ 0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e,
+ 0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57,
+ 0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c,
+ 0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61,
+ 0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20,
+ 0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b,
+ 0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f,
+ 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61,
+ 0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69,
+ 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67,
+ 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67,
+ 0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
+ 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
+ 0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c,
+ 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c,
+ 0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74,
+ 0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c,
+ 0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74,
+ 0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65,
+ 0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65,
+ 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64,
+ 0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
+ 0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63,
+ 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69,
+ 0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d,
+ 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a,
+ 0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e,
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/read.go b/vendor/github.com/docker/spdystream/spdy/read.go
new file mode 100644
index 000000000..9359a9501
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/read.go
@@ -0,0 +1,348 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package spdy
+
+import (
+ "compress/zlib"
+ "encoding/binary"
+ "io"
+ "net/http"
+ "strings"
+)
+
+func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error {
+ return f.readSynStreamFrame(h, frame)
+}
+
+func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error {
+ return f.readSynReplyFrame(h, frame)
+}
+
+func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
+ return err
+ }
+ if frame.Status == 0 {
+ return &Error{InvalidControlFrame, frame.StreamId}
+ }
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ return nil
+}
+
+func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ var numSettings uint32
+ if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil {
+ return err
+ }
+ frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings)
+ for i := uint32(0); i < numSettings; i++ {
+ if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil {
+ return err
+ }
+ frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24)
+ frame.FlagIdValues[i].Id &= 0xffffff
+ if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil {
+ return err
+ }
+ if frame.Id == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ if frame.CFHeader.Flags != 0 {
+ return &Error{InvalidControlFrame, StreamId(frame.Id)}
+ }
+ return nil
+}
+
+func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil {
+ return err
+ }
+ if frame.CFHeader.Flags != 0 {
+ return &Error{InvalidControlFrame, frame.LastGoodStreamId}
+ }
+ if frame.CFHeader.length != 8 {
+ return &Error{InvalidControlFrame, frame.LastGoodStreamId}
+ }
+ if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error {
+ return f.readHeadersFrame(h, frame)
+}
+
+func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ if frame.CFHeader.Flags != 0 {
+ return &Error{InvalidControlFrame, frame.StreamId}
+ }
+ if frame.CFHeader.length != 8 {
+ return &Error{InvalidControlFrame, frame.StreamId}
+ }
+ if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newControlFrame(frameType ControlFrameType) (controlFrame, error) {
+ ctor, ok := cframeCtor[frameType]
+ if !ok {
+ return nil, &Error{Err: InvalidControlFrame}
+ }
+ return ctor(), nil
+}
+
+var cframeCtor = map[ControlFrameType]func() controlFrame{
+ TypeSynStream: func() controlFrame { return new(SynStreamFrame) },
+ TypeSynReply: func() controlFrame { return new(SynReplyFrame) },
+ TypeRstStream: func() controlFrame { return new(RstStreamFrame) },
+ TypeSettings: func() controlFrame { return new(SettingsFrame) },
+ TypePing: func() controlFrame { return new(PingFrame) },
+ TypeGoAway: func() controlFrame { return new(GoAwayFrame) },
+ TypeHeaders: func() controlFrame { return new(HeadersFrame) },
+ TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) },
+}
+
+func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error {
+ if f.headerDecompressor != nil {
+ f.headerReader.N = payloadSize
+ return nil
+ }
+ f.headerReader = io.LimitedReader{R: f.r, N: payloadSize}
+ decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary))
+ if err != nil {
+ return err
+ }
+ f.headerDecompressor = decompressor
+ return nil
+}
+
+// ReadFrame reads SPDY encoded data and returns a decompressed Frame.
+func (f *Framer) ReadFrame() (Frame, error) {
+ var firstWord uint32
+ if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil {
+ return nil, err
+ }
+ if firstWord&0x80000000 != 0 {
+ frameType := ControlFrameType(firstWord & 0xffff)
+ version := uint16(firstWord >> 16 & 0x7fff)
+ return f.parseControlFrame(version, frameType)
+ }
+ return f.parseDataFrame(StreamId(firstWord & 0x7fffffff))
+}
+
+func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) {
+ var length uint32
+ if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+ flags := ControlFlags((length & 0xff000000) >> 24)
+ length &= 0xffffff
+ header := ControlFrameHeader{version, frameType, flags, length}
+ cframe, err := newControlFrame(frameType)
+ if err != nil {
+ return nil, err
+ }
+ if err = cframe.read(header, f); err != nil {
+ return nil, err
+ }
+ return cframe, nil
+}
+
+func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) {
+ var numHeaders uint32
+ if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil {
+ return nil, err
+ }
+ var e error
+ h := make(http.Header, int(numHeaders))
+ for i := 0; i < int(numHeaders); i++ {
+ var length uint32
+ if err := binary.Read(r, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+ nameBytes := make([]byte, length)
+ if _, err := io.ReadFull(r, nameBytes); err != nil {
+ return nil, err
+ }
+ name := string(nameBytes)
+ if name != strings.ToLower(name) {
+ e = &Error{UnlowercasedHeaderName, streamId}
+ name = strings.ToLower(name)
+ }
+ if h[name] != nil {
+ e = &Error{DuplicateHeaders, streamId}
+ }
+ if err := binary.Read(r, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+ value := make([]byte, length)
+ if _, err := io.ReadFull(r, value); err != nil {
+ return nil, err
+ }
+ valueList := strings.Split(string(value), headerValueSeparator)
+ for _, v := range valueList {
+ h.Add(name, v)
+ }
+ }
+ if e != nil {
+ return h, e
+ }
+ return h, nil
+}
+
+func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error {
+ frame.CFHeader = h
+ var err error
+ if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil {
+ return err
+ }
+ if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil {
+ return err
+ }
+ frame.Priority >>= 5
+ if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil {
+ return err
+ }
+ reader := f.r
+ if !f.headerCompressionDisabled {
+ err := f.uncorkHeaderDecompressor(int64(h.length - 10))
+ if err != nil {
+ return err
+ }
+ reader = f.headerDecompressor
+ }
+ frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
+ if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
+ err = &Error{WrongCompressedPayloadSize, 0}
+ }
+ if err != nil {
+ return err
+ }
+ for h := range frame.Headers {
+ if invalidReqHeaders[h] {
+ return &Error{InvalidHeaderPresent, frame.StreamId}
+ }
+ }
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ return nil
+}
+
+func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error {
+ frame.CFHeader = h
+ var err error
+ if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ reader := f.r
+ if !f.headerCompressionDisabled {
+ err := f.uncorkHeaderDecompressor(int64(h.length - 4))
+ if err != nil {
+ return err
+ }
+ reader = f.headerDecompressor
+ }
+ frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
+ if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
+ err = &Error{WrongCompressedPayloadSize, 0}
+ }
+ if err != nil {
+ return err
+ }
+ for h := range frame.Headers {
+ if invalidRespHeaders[h] {
+ return &Error{InvalidHeaderPresent, frame.StreamId}
+ }
+ }
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ return nil
+}
+
+func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error {
+ frame.CFHeader = h
+ var err error
+ if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ reader := f.r
+ if !f.headerCompressionDisabled {
+ err := f.uncorkHeaderDecompressor(int64(h.length - 4))
+ if err != nil {
+ return err
+ }
+ reader = f.headerDecompressor
+ }
+ frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
+ if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
+ err = &Error{WrongCompressedPayloadSize, 0}
+ }
+ if err != nil {
+ return err
+ }
+ var invalidHeaders map[string]bool
+ if frame.StreamId%2 == 0 {
+ invalidHeaders = invalidReqHeaders
+ } else {
+ invalidHeaders = invalidRespHeaders
+ }
+ for h := range frame.Headers {
+ if invalidHeaders[h] {
+ return &Error{InvalidHeaderPresent, frame.StreamId}
+ }
+ }
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ return nil
+}
+
+func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) {
+ var length uint32
+ if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+ var frame DataFrame
+ frame.StreamId = streamId
+ frame.Flags = DataFlags(length >> 24)
+ length &= 0xffffff
+ frame.Data = make([]byte, length)
+ if _, err := io.ReadFull(f.r, frame.Data); err != nil {
+ return nil, err
+ }
+ if frame.StreamId == 0 {
+ return nil, &Error{ZeroStreamId, 0}
+ }
+ return &frame, nil
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/types.go b/vendor/github.com/docker/spdystream/spdy/types.go
new file mode 100644
index 000000000..7b6ee9c6f
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/types.go
@@ -0,0 +1,275 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package spdy implements the SPDY protocol (currently SPDY/3), described in
+// http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3.
+package spdy
+
+import (
+ "bytes"
+ "compress/zlib"
+ "io"
+ "net/http"
+)
+
+// Version is the protocol version number that this package implements.
+const Version = 3
+
+// ControlFrameType stores the type field in a control frame header.
+type ControlFrameType uint16
+
+const (
+ TypeSynStream ControlFrameType = 0x0001
+ TypeSynReply = 0x0002
+ TypeRstStream = 0x0003
+ TypeSettings = 0x0004
+ TypePing = 0x0006
+ TypeGoAway = 0x0007
+ TypeHeaders = 0x0008
+ TypeWindowUpdate = 0x0009
+)
+
+// ControlFlags are the flags that can be set on a control frame.
+type ControlFlags uint8
+
+const (
+ ControlFlagFin ControlFlags = 0x01
+ ControlFlagUnidirectional = 0x02
+ ControlFlagSettingsClearSettings = 0x01
+)
+
+// DataFlags are the flags that can be set on a data frame.
+type DataFlags uint8
+
+const (
+ DataFlagFin DataFlags = 0x01
+)
+
+// MaxDataLength is the maximum number of bytes that can be stored in one frame.
+const MaxDataLength = 1<<24 - 1
+
+// headerValueSepator separates multiple header values.
+const headerValueSeparator = "\x00"
+
+// Frame is a single SPDY frame in its unpacked in-memory representation. Use
+// Framer to read and write it.
+type Frame interface {
+ write(f *Framer) error
+}
+
+// ControlFrameHeader contains all the fields in a control frame header,
+// in its unpacked in-memory representation.
+type ControlFrameHeader struct {
+ // Note, high bit is the "Control" bit.
+ version uint16 // spdy version number
+ frameType ControlFrameType
+ Flags ControlFlags
+ length uint32 // length of data field
+}
+
+type controlFrame interface {
+ Frame
+ read(h ControlFrameHeader, f *Framer) error
+}
+
+// StreamId represents a 31-bit value identifying the stream.
+type StreamId uint32
+
+// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM
+// frame.
+type SynStreamFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to
+ Priority uint8 // priority of this frame (3-bit)
+ Slot uint8 // index in the server's credential vector of the client certificate
+ Headers http.Header
+}
+
+// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame.
+type SynReplyFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ Headers http.Header
+}
+
+// RstStreamStatus represents the status that led to a RST_STREAM.
+type RstStreamStatus uint32
+
+const (
+ ProtocolError RstStreamStatus = iota + 1
+ InvalidStream
+ RefusedStream
+ UnsupportedVersion
+ Cancel
+ InternalError
+ FlowControlError
+ StreamInUse
+ StreamAlreadyClosed
+ InvalidCredentials
+ FrameTooLarge
+)
+
+// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM
+// frame.
+type RstStreamFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ Status RstStreamStatus
+}
+
+// SettingsFlag represents a flag in a SETTINGS frame.
+type SettingsFlag uint8
+
+const (
+ FlagSettingsPersistValue SettingsFlag = 0x1
+ FlagSettingsPersisted = 0x2
+)
+
+// SettingsFlag represents the id of an id/value pair in a SETTINGS frame.
+type SettingsId uint32
+
+const (
+ SettingsUploadBandwidth SettingsId = iota + 1
+ SettingsDownloadBandwidth
+ SettingsRoundTripTime
+ SettingsMaxConcurrentStreams
+ SettingsCurrentCwnd
+ SettingsDownloadRetransRate
+ SettingsInitialWindowSize
+ SettingsClientCretificateVectorSize
+)
+
+// SettingsFlagIdValue is the unpacked, in-memory representation of the
+// combined flag/id/value for a setting in a SETTINGS frame.
+type SettingsFlagIdValue struct {
+ Flag SettingsFlag
+ Id SettingsId
+ Value uint32
+}
+
+// SettingsFrame is the unpacked, in-memory representation of a SPDY
+// SETTINGS frame.
+type SettingsFrame struct {
+ CFHeader ControlFrameHeader
+ FlagIdValues []SettingsFlagIdValue
+}
+
+// PingFrame is the unpacked, in-memory representation of a PING frame.
+type PingFrame struct {
+ CFHeader ControlFrameHeader
+ Id uint32 // unique id for this ping, from server is even, from client is odd.
+}
+
+// GoAwayStatus represents the status in a GoAwayFrame.
+type GoAwayStatus uint32
+
+const (
+ GoAwayOK GoAwayStatus = iota
+ GoAwayProtocolError
+ GoAwayInternalError
+)
+
+// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame.
+type GoAwayFrame struct {
+ CFHeader ControlFrameHeader
+ LastGoodStreamId StreamId // last stream id which was accepted by sender
+ Status GoAwayStatus
+}
+
+// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame.
+type HeadersFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ Headers http.Header
+}
+
+// WindowUpdateFrame is the unpacked, in-memory representation of a
+// WINDOW_UPDATE frame.
+type WindowUpdateFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ DeltaWindowSize uint32 // additional number of bytes to existing window size
+}
+
+// TODO: Implement credential frame and related methods.
+
+// DataFrame is the unpacked, in-memory representation of a DATA frame.
+type DataFrame struct {
+ // Note, high bit is the "Control" bit. Should be 0 for data frames.
+ StreamId StreamId
+ Flags DataFlags
+ Data []byte // payload data of this frame
+}
+
+// A SPDY specific error.
+type ErrorCode string
+
+const (
+ UnlowercasedHeaderName ErrorCode = "header was not lowercased"
+ DuplicateHeaders = "multiple headers with same name"
+ WrongCompressedPayloadSize = "compressed payload size was incorrect"
+ UnknownFrameType = "unknown frame type"
+ InvalidControlFrame = "invalid control frame"
+ InvalidDataFrame = "invalid data frame"
+ InvalidHeaderPresent = "frame contained invalid header"
+ ZeroStreamId = "stream id zero is disallowed"
+)
+
+// Error contains both the type of error and additional values. StreamId is 0
+// if Error is not associated with a stream.
+type Error struct {
+ Err ErrorCode
+ StreamId StreamId
+}
+
+func (e *Error) Error() string {
+ return string(e.Err)
+}
+
+var invalidReqHeaders = map[string]bool{
+ "Connection": true,
+ "Host": true,
+ "Keep-Alive": true,
+ "Proxy-Connection": true,
+ "Transfer-Encoding": true,
+}
+
+var invalidRespHeaders = map[string]bool{
+ "Connection": true,
+ "Keep-Alive": true,
+ "Proxy-Connection": true,
+ "Transfer-Encoding": true,
+}
+
+// Framer handles serializing/deserializing SPDY frames, including compressing/
+// decompressing payloads.
+type Framer struct {
+ headerCompressionDisabled bool
+ w io.Writer
+ headerBuf *bytes.Buffer
+ headerCompressor *zlib.Writer
+ r io.Reader
+ headerReader io.LimitedReader
+ headerDecompressor io.ReadCloser
+}
+
+// NewFramer allocates a new Framer for a given SPDY connection, represented by
+// a io.Writer and io.Reader. Note that Framer will read and write individual fields
+// from/to the Reader and Writer, so the caller should pass in an appropriately
+// buffered implementation to optimize performance.
+func NewFramer(w io.Writer, r io.Reader) (*Framer, error) {
+ compressBuf := new(bytes.Buffer)
+ compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary))
+ if err != nil {
+ return nil, err
+ }
+ framer := &Framer{
+ w: w,
+ headerBuf: compressBuf,
+ headerCompressor: compressor,
+ r: r,
+ }
+ return framer, nil
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/write.go b/vendor/github.com/docker/spdystream/spdy/write.go
new file mode 100644
index 000000000..b212f66a2
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/write.go
@@ -0,0 +1,318 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package spdy
+
+import (
+ "encoding/binary"
+ "io"
+ "net/http"
+ "strings"
+)
+
+func (frame *SynStreamFrame) write(f *Framer) error {
+ return f.writeSynStreamFrame(frame)
+}
+
+func (frame *SynReplyFrame) write(f *Framer) error {
+ return f.writeSynReplyFrame(frame)
+}
+
+func (frame *RstStreamFrame) write(f *Framer) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeRstStream
+ frame.CFHeader.Flags = 0
+ frame.CFHeader.length = 8
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ if frame.Status == 0 {
+ return &Error{InvalidControlFrame, frame.StreamId}
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
+ return
+ }
+ return
+}
+
+func (frame *SettingsFrame) write(f *Framer) (err error) {
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeSettings
+ frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4)
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil {
+ return
+ }
+ for _, flagIdValue := range frame.FlagIdValues {
+ flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id)
+ if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (frame *PingFrame) write(f *Framer) (err error) {
+ if frame.Id == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypePing
+ frame.CFHeader.Flags = 0
+ frame.CFHeader.length = 4
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil {
+ return
+ }
+ return
+}
+
+func (frame *GoAwayFrame) write(f *Framer) (err error) {
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeGoAway
+ frame.CFHeader.Flags = 0
+ frame.CFHeader.length = 8
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
+ return
+ }
+ return nil
+}
+
+func (frame *HeadersFrame) write(f *Framer) error {
+ return f.writeHeadersFrame(frame)
+}
+
+func (frame *WindowUpdateFrame) write(f *Framer) (err error) {
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeWindowUpdate
+ frame.CFHeader.Flags = 0
+ frame.CFHeader.length = 8
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil {
+ return
+ }
+ return nil
+}
+
+func (frame *DataFrame) write(f *Framer) error {
+ return f.writeDataFrame(frame)
+}
+
+// WriteFrame writes a frame.
+func (f *Framer) WriteFrame(frame Frame) error {
+ return frame.write(f)
+}
+
+func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error {
+ if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil {
+ return err
+ }
+ if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil {
+ return err
+ }
+ flagsAndLength := uint32(h.Flags)<<24 | h.length
+ if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil {
+ return err
+ }
+ return nil
+}
+
+func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) {
+ n = 0
+ if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil {
+ return
+ }
+ n += 2
+ for name, values := range h {
+ if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil {
+ return
+ }
+ n += 2
+ name = strings.ToLower(name)
+ if _, err = io.WriteString(w, name); err != nil {
+ return
+ }
+ n += len(name)
+ v := strings.Join(values, headerValueSeparator)
+ if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil {
+ return
+ }
+ n += 2
+ if _, err = io.WriteString(w, v); err != nil {
+ return
+ }
+ n += len(v)
+ }
+ return
+}
+
+func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ // Marshal the headers.
+ var writer io.Writer = f.headerBuf
+ if !f.headerCompressionDisabled {
+ writer = f.headerCompressor
+ }
+ if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
+ return
+ }
+ if !f.headerCompressionDisabled {
+ f.headerCompressor.Flush()
+ }
+
+ // Set ControlFrameHeader.
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeSynStream
+ frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10)
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return err
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return err
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil {
+ return err
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil {
+ return err
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil {
+ return err
+ }
+ if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
+ return err
+ }
+ f.headerBuf.Reset()
+ return nil
+}
+
+func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ // Marshal the headers.
+ var writer io.Writer = f.headerBuf
+ if !f.headerCompressionDisabled {
+ writer = f.headerCompressor
+ }
+ if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
+ return
+ }
+ if !f.headerCompressionDisabled {
+ f.headerCompressor.Flush()
+ }
+
+ // Set ControlFrameHeader.
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeSynReply
+ frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
+ return
+ }
+ f.headerBuf.Reset()
+ return
+}
+
+func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ // Marshal the headers.
+ var writer io.Writer = f.headerBuf
+ if !f.headerCompressionDisabled {
+ writer = f.headerCompressor
+ }
+ if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
+ return
+ }
+ if !f.headerCompressionDisabled {
+ f.headerCompressor.Flush()
+ }
+
+ // Set ControlFrameHeader.
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeHeaders
+ frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
+ return
+ }
+ f.headerBuf.Reset()
+ return
+}
+
+func (f *Framer) writeDataFrame(frame *DataFrame) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength {
+ return &Error{InvalidDataFrame, frame.StreamId}
+ }
+
+ // Serialize frame to Writer.
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data))
+ if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil {
+ return
+ }
+ if _, err = f.w.Write(frame.Data); err != nil {
+ return
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/spdystream/stream.go b/vendor/github.com/docker/spdystream/stream.go
new file mode 100644
index 000000000..f9e9ee267
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/stream.go
@@ -0,0 +1,327 @@
+package spdystream
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/docker/spdystream/spdy"
+)
+
+var (
+ ErrUnreadPartialData = errors.New("unread partial data")
+)
+
+type Stream struct {
+ streamId spdy.StreamId
+ parent *Stream
+ conn *Connection
+ startChan chan error
+
+ dataLock sync.RWMutex
+ dataChan chan []byte
+ unread []byte
+
+ priority uint8
+ headers http.Header
+ headerChan chan http.Header
+ finishLock sync.Mutex
+ finished bool
+ replyCond *sync.Cond
+ replied bool
+ closeLock sync.Mutex
+ closeChan chan bool
+}
+
+// WriteData writes data to stream, sending a dataframe per call
+func (s *Stream) WriteData(data []byte, fin bool) error {
+ s.waitWriteReply()
+ var flags spdy.DataFlags
+
+ if fin {
+ flags = spdy.DataFlagFin
+ s.finishLock.Lock()
+ if s.finished {
+ s.finishLock.Unlock()
+ return ErrWriteClosedStream
+ }
+ s.finished = true
+ s.finishLock.Unlock()
+ }
+
+ dataFrame := &spdy.DataFrame{
+ StreamId: s.streamId,
+ Flags: flags,
+ Data: data,
+ }
+
+ debugMessage("(%p) (%d) Writing data frame", s, s.streamId)
+ return s.conn.framer.WriteFrame(dataFrame)
+}
+
+// Write writes bytes to a stream, calling write data for each call.
+func (s *Stream) Write(data []byte) (n int, err error) {
+ err = s.WriteData(data, false)
+ if err == nil {
+ n = len(data)
+ }
+ return
+}
+
+// Read reads bytes from a stream, a single read will never get more
+// than what is sent on a single data frame, but a multiple calls to
+// read may get data from the same data frame.
+func (s *Stream) Read(p []byte) (n int, err error) {
+ if s.unread == nil {
+ select {
+ case <-s.closeChan:
+ return 0, io.EOF
+ case read, ok := <-s.dataChan:
+ if !ok {
+ return 0, io.EOF
+ }
+ s.unread = read
+ }
+ }
+ n = copy(p, s.unread)
+ if n < len(s.unread) {
+ s.unread = s.unread[n:]
+ } else {
+ s.unread = nil
+ }
+ return
+}
+
+// ReadData reads an entire data frame and returns the byte array
+// from the data frame. If there is unread data from the result
+// of a Read call, this function will return an ErrUnreadPartialData.
+func (s *Stream) ReadData() ([]byte, error) {
+ debugMessage("(%p) Reading data from %d", s, s.streamId)
+ if s.unread != nil {
+ return nil, ErrUnreadPartialData
+ }
+ select {
+ case <-s.closeChan:
+ return nil, io.EOF
+ case read, ok := <-s.dataChan:
+ if !ok {
+ return nil, io.EOF
+ }
+ return read, nil
+ }
+}
+
+func (s *Stream) waitWriteReply() {
+ if s.replyCond != nil {
+ s.replyCond.L.Lock()
+ for !s.replied {
+ s.replyCond.Wait()
+ }
+ s.replyCond.L.Unlock()
+ }
+}
+
+// Wait waits for the stream to receive a reply.
+func (s *Stream) Wait() error {
+ return s.WaitTimeout(time.Duration(0))
+}
+
+// WaitTimeout waits for the stream to receive a reply or for timeout.
+// When the timeout is reached, ErrTimeout will be returned.
+func (s *Stream) WaitTimeout(timeout time.Duration) error {
+ var timeoutChan <-chan time.Time
+ if timeout > time.Duration(0) {
+ timeoutChan = time.After(timeout)
+ }
+
+ select {
+ case err := <-s.startChan:
+ if err != nil {
+ return err
+ }
+ break
+ case <-timeoutChan:
+ return ErrTimeout
+ }
+ return nil
+}
+
+// Close closes the stream by sending an empty data frame with the
+// finish flag set, indicating this side is finished with the stream.
+func (s *Stream) Close() error {
+ select {
+ case <-s.closeChan:
+ // Stream is now fully closed
+ s.conn.removeStream(s)
+ default:
+ break
+ }
+ return s.WriteData([]byte{}, true)
+}
+
+// Reset sends a reset frame, putting the stream into the fully closed state.
+func (s *Stream) Reset() error {
+ s.conn.removeStream(s)
+ return s.resetStream()
+}
+
+func (s *Stream) resetStream() error {
+ // Always call closeRemoteChannels, even if s.finished is already true.
+ // This makes it so that stream.Close() followed by stream.Reset() allows
+ // stream.Read() to unblock.
+ s.closeRemoteChannels()
+
+ s.finishLock.Lock()
+ if s.finished {
+ s.finishLock.Unlock()
+ return nil
+ }
+ s.finished = true
+ s.finishLock.Unlock()
+
+ resetFrame := &spdy.RstStreamFrame{
+ StreamId: s.streamId,
+ Status: spdy.Cancel,
+ }
+ return s.conn.framer.WriteFrame(resetFrame)
+}
+
+// CreateSubStream creates a stream using the current as the parent
+func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) {
+ return s.conn.CreateStream(headers, s, fin)
+}
+
+// SetPriority sets the stream priority, does not affect the
+// remote priority of this stream after Open has been called.
+// Valid values are 0 through 7, 0 being the highest priority
+// and 7 the lowest.
+func (s *Stream) SetPriority(priority uint8) {
+ s.priority = priority
+}
+
+// SendHeader sends a header frame across the stream
+func (s *Stream) SendHeader(headers http.Header, fin bool) error {
+ return s.conn.sendHeaders(headers, s, fin)
+}
+
+// SendReply sends a reply on a stream, only valid to be called once
+// when handling a new stream
+func (s *Stream) SendReply(headers http.Header, fin bool) error {
+ if s.replyCond == nil {
+ return errors.New("cannot reply on initiated stream")
+ }
+ s.replyCond.L.Lock()
+ defer s.replyCond.L.Unlock()
+ if s.replied {
+ return nil
+ }
+
+ err := s.conn.sendReply(headers, s, fin)
+ if err != nil {
+ return err
+ }
+
+ s.replied = true
+ s.replyCond.Broadcast()
+ return nil
+}
+
+// Refuse sends a reset frame with the status refuse, only
+// valid to be called once when handling a new stream. This
+// may be used to indicate that a stream is not allowed
+// when http status codes are not being used.
+func (s *Stream) Refuse() error {
+ if s.replied {
+ return nil
+ }
+ s.replied = true
+ return s.conn.sendReset(spdy.RefusedStream, s)
+}
+
+// Cancel sends a reset frame with the status canceled. This
+// can be used at any time by the creator of the Stream to
+// indicate the stream is no longer needed.
+func (s *Stream) Cancel() error {
+ return s.conn.sendReset(spdy.Cancel, s)
+}
+
+// ReceiveHeader receives a header sent on the other side
+// of the stream. This function will block until a header
+// is received or stream is closed.
+func (s *Stream) ReceiveHeader() (http.Header, error) {
+ select {
+ case <-s.closeChan:
+ break
+ case header, ok := <-s.headerChan:
+ if !ok {
+ return nil, fmt.Errorf("header chan closed")
+ }
+ return header, nil
+ }
+ return nil, fmt.Errorf("stream closed")
+}
+
+// Parent returns the parent stream
+func (s *Stream) Parent() *Stream {
+ return s.parent
+}
+
+// Headers returns the headers used to create the stream
+func (s *Stream) Headers() http.Header {
+ return s.headers
+}
+
+// String returns the string version of stream using the
+// streamId to uniquely identify the stream
+func (s *Stream) String() string {
+ return fmt.Sprintf("stream:%d", s.streamId)
+}
+
+// Identifier returns a 32 bit identifier for the stream
+func (s *Stream) Identifier() uint32 {
+ return uint32(s.streamId)
+}
+
+// IsFinished returns whether the stream has finished
+// sending data
+func (s *Stream) IsFinished() bool {
+ return s.finished
+}
+
+// Implement net.Conn interface
+
+func (s *Stream) LocalAddr() net.Addr {
+ return s.conn.conn.LocalAddr()
+}
+
+func (s *Stream) RemoteAddr() net.Addr {
+ return s.conn.conn.RemoteAddr()
+}
+
+// TODO set per stream values instead of connection-wide
+
+func (s *Stream) SetDeadline(t time.Time) error {
+ return s.conn.conn.SetDeadline(t)
+}
+
+func (s *Stream) SetReadDeadline(t time.Time) error {
+ return s.conn.conn.SetReadDeadline(t)
+}
+
+func (s *Stream) SetWriteDeadline(t time.Time) error {
+ return s.conn.conn.SetWriteDeadline(t)
+}
+
+func (s *Stream) closeRemoteChannels() {
+ s.closeLock.Lock()
+ defer s.closeLock.Unlock()
+ select {
+ case <-s.closeChan:
+ default:
+ close(s.closeChan)
+ }
+}
diff --git a/vendor/github.com/docker/spdystream/utils.go b/vendor/github.com/docker/spdystream/utils.go
new file mode 100644
index 000000000..1b2c199a4
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/utils.go
@@ -0,0 +1,16 @@
+package spdystream
+
+import (
+ "log"
+ "os"
+)
+
+var (
+ DEBUG = os.Getenv("DEBUG")
+)
+
+func debugMessage(fmt string, args ...interface{}) {
+ if DEBUG != "" {
+ log.Printf(fmt, args...)
+ }
+}
diff --git a/vendor/github.com/emicklei/go-restful-swagger12/LICENSE b/vendor/github.com/emicklei/go-restful-swagger12/LICENSE
new file mode 100644
index 000000000..aeab5b440
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful-swagger12/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2017 Ernest Micklei
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/vendor/github.com/emicklei/go-restful-swagger12/README.md b/vendor/github.com/emicklei/go-restful-swagger12/README.md
new file mode 100644
index 000000000..037b9b09c
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful-swagger12/README.md
@@ -0,0 +1,83 @@
+# go-restful-swagger12
+
+[![Build Status](https://travis-ci.org/emicklei/go-restful-swagger12.png)](https://travis-ci.org/emicklei/go-restful-swagger12)
+[![GoDoc](https://godoc.org/github.com/emicklei/go-restful-swagger12?status.svg)](https://godoc.org/github.com/emicklei/go-restful-swagger12)
+
+How to use Swagger UI with go-restful
+=
+
+Get the Swagger UI sources (version 1.2 only)
+
+ git clone https://github.com/wordnik/swagger-ui.git
+
+The project contains a "dist" folder.
+Its contents has all the Swagger UI files you need.
+
+The `index.html` has an `url` set to `http://petstore.swagger.wordnik.com/api/api-docs`.
+You need to change that to match your WebService JSON endpoint e.g. `http://localhost:8080/apidocs.json`
+
+Now, you can install the Swagger WebService for serving the Swagger specification in JSON.
+
+ config := swagger.Config{
+ WebServices: restful.RegisteredWebServices(),
+ ApiPath: "/apidocs.json",
+ SwaggerPath: "/apidocs/",
+ SwaggerFilePath: "/Users/emicklei/Projects/swagger-ui/dist"}
+ swagger.InstallSwaggerService(config)
+
+
+Documenting Structs
+--
+
+Currently there are 2 ways to document your structs in the go-restful Swagger.
+
+###### By using struct tags
+- Use tag "description" to annotate a struct field with a description to show in the UI
+- Use tag "modelDescription" to annotate the struct itself with a description to show in the UI. The tag can be added in an field of the struct and in case that there are multiple definition, they will be appended with an empty line.
+
+###### By using the SwaggerDoc method
+Here is an example with an `Address` struct and the documentation for each of the fields. The `""` is a special entry for **documenting the struct itself**.
+
+ type Address struct {
+ Country string `json:"country,omitempty"`
+ PostCode int `json:"postcode,omitempty"`
+ }
+
+ func (Address) SwaggerDoc() map[string]string {
+ return map[string]string{
+ "": "Address doc",
+ "country": "Country doc",
+ "postcode": "PostCode doc",
+ }
+ }
+
+This example will generate a JSON like this
+
+ {
+ "Address": {
+ "id": "Address",
+ "description": "Address doc",
+ "properties": {
+ "country": {
+ "type": "string",
+ "description": "Country doc"
+ },
+ "postcode": {
+ "type": "integer",
+ "format": "int32",
+ "description": "PostCode doc"
+ }
+ }
+ }
+ }
+
+**Very Important Notes:**
+- `SwaggerDoc()` is using a **NON-Pointer** receiver (e.g. func (Address) and not func (*Address))
+- The returned map should use as key the name of the field as defined in the JSON parameter (e.g. `"postcode"` and not `"PostCode"`)
+
+Notes
+--
+- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..)
+- The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints.
+
+© 2017, ernestmicklei.com. MIT License. Contributions welcome. \ No newline at end of file
diff --git a/vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go b/vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go
new file mode 100644
index 000000000..9f4c3690a
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go
@@ -0,0 +1,64 @@
+package swagger
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+// ApiDeclarationList maintains an ordered list of ApiDeclaration.
+type ApiDeclarationList struct {
+ List []ApiDeclaration
+}
+
+// At returns the ApiDeclaration by its path unless absent, then ok is false
+func (l *ApiDeclarationList) At(path string) (a ApiDeclaration, ok bool) {
+ for _, each := range l.List {
+ if each.ResourcePath == path {
+ return each, true
+ }
+ }
+ return a, false
+}
+
+// Put adds or replaces a ApiDeclaration with this name
+func (l *ApiDeclarationList) Put(path string, a ApiDeclaration) {
+ // maybe replace existing
+ for i, each := range l.List {
+ if each.ResourcePath == path {
+ // replace
+ l.List[i] = a
+ return
+ }
+ }
+ // add
+ l.List = append(l.List, a)
+}
+
+// Do enumerates all the properties, each with its assigned name
+func (l *ApiDeclarationList) Do(block func(path string, decl ApiDeclaration)) {
+ for _, each := range l.List {
+ block(each.ResourcePath, each)
+ }
+}
+
+// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
+func (l ApiDeclarationList) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ encoder := json.NewEncoder(&buf)
+ buf.WriteString("{\n")
+ for i, each := range l.List {
+ buf.WriteString("\"")
+ buf.WriteString(each.ResourcePath)
+ buf.WriteString("\": ")
+ encoder.Encode(each)
+ if i < len(l.List)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+ buf.WriteString("}")
+ return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/emicklei/go-restful-swagger12/config.go b/vendor/github.com/emicklei/go-restful-swagger12/config.go
new file mode 100644
index 000000000..18f8e57d9
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful-swagger12/config.go
@@ -0,0 +1,46 @@
+package swagger
+
+import (
+ "net/http"
+ "reflect"
+
+ "github.com/emicklei/go-restful"
+)
+
+// PostBuildDeclarationMapFunc can be used to modify the api declaration map.
+type PostBuildDeclarationMapFunc func(apiDeclarationMap *ApiDeclarationList)
+
+// MapSchemaFormatFunc can be used to modify typeName at definition time.
+type MapSchemaFormatFunc func(typeName string) string
+
+// MapModelTypeNameFunc can be used to return the desired typeName for a given
+// type. It will return false if the default name should be used.
+type MapModelTypeNameFunc func(t reflect.Type) (string, bool)
+
+type Config struct {
+ // url where the services are available, e.g. http://localhost:8080
+ // if left empty then the basePath of Swagger is taken from the actual request
+ WebServicesUrl string
+ // path where the JSON api is avaiable , e.g. /apidocs
+ ApiPath string
+ // [optional] path where the swagger UI will be served, e.g. /swagger
+ SwaggerPath string
+ // [optional] location of folder containing Swagger HTML5 application index.html
+ SwaggerFilePath string
+ // api listing is constructed from this list of restful WebServices.
+ WebServices []*restful.WebService
+ // will serve all static content (scripts,pages,images)
+ StaticHandler http.Handler
+ // [optional] on default CORS (Cross-Origin-Resource-Sharing) is enabled.
+ DisableCORS bool
+ // Top-level API version. Is reflected in the resource listing.
+ ApiVersion string
+ // If set then call this handler after building the complete ApiDeclaration Map
+ PostBuildHandler PostBuildDeclarationMapFunc
+ // Swagger global info struct
+ Info Info
+ // [optional] If set, model builder should call this handler to get addition typename-to-swagger-format-field conversion.
+ SchemaFormatHandler MapSchemaFormatFunc
+ // [optional] If set, model builder should call this handler to retrieve the name for a given type.
+ ModelTypeNameHandler MapModelTypeNameFunc
+}
diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_builder.go b/vendor/github.com/emicklei/go-restful-swagger12/model_builder.go
new file mode 100644
index 000000000..d40786f25
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful-swagger12/model_builder.go
@@ -0,0 +1,467 @@
+package swagger
+
+import (
+ "encoding/json"
+ "reflect"
+ "strings"
+)
+
+// ModelBuildable is used for extending Structs that need more control over
+// how the Model appears in the Swagger api declaration.
+type ModelBuildable interface {
+ PostBuildModel(m *Model) *Model
+}
+
+type modelBuilder struct {
+ Models *ModelList
+ Config *Config
+}
+
+type documentable interface {
+ SwaggerDoc() map[string]string
+}
+
+// Check if this structure has a method with signature func (<theModel>) SwaggerDoc() map[string]string
+// If it exists, retrive the documentation and overwrite all struct tag descriptions
+func getDocFromMethodSwaggerDoc2(model reflect.Type) map[string]string {
+ if docable, ok := reflect.New(model).Elem().Interface().(documentable); ok {
+ return docable.SwaggerDoc()
+ }
+ return make(map[string]string)
+}
+
+// addModelFrom creates and adds a Model to the builder and detects and calls
+// the post build hook for customizations
+func (b modelBuilder) addModelFrom(sample interface{}) {
+ if modelOrNil := b.addModel(reflect.TypeOf(sample), ""); modelOrNil != nil {
+ // allow customizations
+ if buildable, ok := sample.(ModelBuildable); ok {
+ modelOrNil = buildable.PostBuildModel(modelOrNil)
+ b.Models.Put(modelOrNil.Id, *modelOrNil)
+ }
+ }
+}
+
+func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model {
+ // Turn pointers into simpler types so further checks are
+ // correct.
+ if st.Kind() == reflect.Ptr {
+ st = st.Elem()
+ }
+
+ modelName := b.keyFrom(st)
+ if nameOverride != "" {
+ modelName = nameOverride
+ }
+ // no models needed for primitive types
+ if b.isPrimitiveType(modelName) {
+ return nil
+ }
+ // golang encoding/json packages says array and slice values encode as
+ // JSON arrays, except that []byte encodes as a base64-encoded string.
+ // If we see a []byte here, treat it at as a primitive type (string)
+ // and deal with it in buildArrayTypeProperty.
+ if (st.Kind() == reflect.Slice || st.Kind() == reflect.Array) &&
+ st.Elem().Kind() == reflect.Uint8 {
+ return nil
+ }
+ // see if we already have visited this model
+ if _, ok := b.Models.At(modelName); ok {
+ return nil
+ }
+ sm := Model{
+ Id: modelName,
+ Required: []string{},
+ Properties: ModelPropertyList{}}
+
+ // reference the model before further initializing (enables recursive structs)
+ b.Models.Put(modelName, sm)
+
+ // check for slice or array
+ if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
+ b.addModel(st.Elem(), "")
+ return &sm
+ }
+ // check for structure or primitive type
+ if st.Kind() != reflect.Struct {
+ return &sm
+ }
+
+ fullDoc := getDocFromMethodSwaggerDoc2(st)
+ modelDescriptions := []string{}
+
+ for i := 0; i < st.NumField(); i++ {
+ field := st.Field(i)
+ jsonName, modelDescription, prop := b.buildProperty(field, &sm, modelName)
+ if len(modelDescription) > 0 {
+ modelDescriptions = append(modelDescriptions, modelDescription)
+ }
+
+ // add if not omitted
+ if len(jsonName) != 0 {
+ // update description
+ if fieldDoc, ok := fullDoc[jsonName]; ok {
+ prop.Description = fieldDoc
+ }
+ // update Required
+ if b.isPropertyRequired(field) {
+ sm.Required = append(sm.Required, jsonName)
+ }
+ sm.Properties.Put(jsonName, prop)
+ }
+ }
+
+ // We always overwrite documentation if SwaggerDoc method exists
+ // "" is special for documenting the struct itself
+ if modelDoc, ok := fullDoc[""]; ok {
+ sm.Description = modelDoc
+ } else if len(modelDescriptions) != 0 {
+ sm.Description = strings.Join(modelDescriptions, "\n")
+ }
+
+ // update model builder with completed model
+ b.Models.Put(modelName, sm)
+
+ return &sm
+}
+
+func (b modelBuilder) isPropertyRequired(field reflect.StructField) bool {
+ required := true
+ if jsonTag := field.Tag.Get("json"); jsonTag != "" {
+ s := strings.Split(jsonTag, ",")
+ if len(s) > 1 && s[1] == "omitempty" {
+ return false
+ }
+ }
+ return required
+}
+
+func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, modelName string) (jsonName, modelDescription string, prop ModelProperty) {
+ jsonName = b.jsonNameOfField(field)
+ if len(jsonName) == 0 {
+ // empty name signals skip property
+ return "", "", prop
+ }
+
+ if field.Name == "XMLName" && field.Type.String() == "xml.Name" {
+ // property is metadata for the xml.Name attribute, can be skipped
+ return "", "", prop
+ }
+
+ if tag := field.Tag.Get("modelDescription"); tag != "" {
+ modelDescription = tag
+ }
+
+ prop.setPropertyMetadata(field)
+ if prop.Type != nil {
+ return jsonName, modelDescription, prop
+ }
+ fieldType := field.Type
+
+ // check if type is doing its own marshalling
+ marshalerType := reflect.TypeOf((*json.Marshaler)(nil)).Elem()
+ if fieldType.Implements(marshalerType) {
+ var pType = "string"
+ if prop.Type == nil {
+ prop.Type = &pType
+ }
+ if prop.Format == "" {
+ prop.Format = b.jsonSchemaFormat(b.keyFrom(fieldType))
+ }
+ return jsonName, modelDescription, prop
+ }
+
+ // check if annotation says it is a string
+ if jsonTag := field.Tag.Get("json"); jsonTag != "" {
+ s := strings.Split(jsonTag, ",")
+ if len(s) > 1 && s[1] == "string" {
+ stringt := "string"
+ prop.Type = &stringt
+ return jsonName, modelDescription, prop
+ }
+ }
+
+ fieldKind := fieldType.Kind()
+ switch {
+ case fieldKind == reflect.Struct:
+ jsonName, prop := b.buildStructTypeProperty(field, jsonName, model)
+ return jsonName, modelDescription, prop
+ case fieldKind == reflect.Slice || fieldKind == reflect.Array:
+ jsonName, prop := b.buildArrayTypeProperty(field, jsonName, modelName)
+ return jsonName, modelDescription, prop
+ case fieldKind == reflect.Ptr:
+ jsonName, prop := b.buildPointerTypeProperty(field, jsonName, modelName)
+ return jsonName, modelDescription, prop
+ case fieldKind == reflect.String:
+ stringt := "string"
+ prop.Type = &stringt
+ return jsonName, modelDescription, prop
+ case fieldKind == reflect.Map:
+ // if it's a map, it's unstructured, and swagger 1.2 can't handle it
+ objectType := "object"
+ prop.Type = &objectType
+ return jsonName, modelDescription, prop
+ }
+
+ fieldTypeName := b.keyFrom(fieldType)
+ if b.isPrimitiveType(fieldTypeName) {
+ mapped := b.jsonSchemaType(fieldTypeName)
+ prop.Type = &mapped
+ prop.Format = b.jsonSchemaFormat(fieldTypeName)
+ return jsonName, modelDescription, prop
+ }
+ modelType := b.keyFrom(fieldType)
+ prop.Ref = &modelType
+
+ if fieldType.Name() == "" { // override type of anonymous structs
+ nestedTypeName := modelName + "." + jsonName
+ prop.Ref = &nestedTypeName
+ b.addModel(fieldType, nestedTypeName)
+ }
+ return jsonName, modelDescription, prop
+}
+
+func hasNamedJSONTag(field reflect.StructField) bool {
+ parts := strings.Split(field.Tag.Get("json"), ",")
+ if len(parts) == 0 {
+ return false
+ }
+ for _, s := range parts[1:] {
+ if s == "inline" {
+ return false
+ }
+ }
+ return len(parts[0]) > 0
+}
+
+func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonName string, model *Model) (nameJson string, prop ModelProperty) {
+ prop.setPropertyMetadata(field)
+ // Check for type override in tag
+ if prop.Type != nil {
+ return jsonName, prop
+ }
+ fieldType := field.Type
+ // check for anonymous
+ if len(fieldType.Name()) == 0 {
+ // anonymous
+ anonType := model.Id + "." + jsonName
+ b.addModel(fieldType, anonType)
+ prop.Ref = &anonType
+ return jsonName, prop
+ }
+
+ if field.Name == fieldType.Name() && field.Anonymous && !hasNamedJSONTag(field) {
+ // embedded struct
+ sub := modelBuilder{new(ModelList), b.Config}
+ sub.addModel(fieldType, "")
+ subKey := sub.keyFrom(fieldType)
+ // merge properties from sub
+ subModel, _ := sub.Models.At(subKey)
+ subModel.Properties.Do(func(k string, v ModelProperty) {
+ model.Properties.Put(k, v)
+ // if subModel says this property is required then include it
+ required := false
+ for _, each := range subModel.Required {
+ if k == each {
+ required = true
+ break
+ }
+ }
+ if required {
+ model.Required = append(model.Required, k)
+ }
+ })
+ // add all new referenced models
+ sub.Models.Do(func(key string, sub Model) {
+ if key != subKey {
+ if _, ok := b.Models.At(key); !ok {
+ b.Models.Put(key, sub)
+ }
+ }
+ })
+ // empty name signals skip property
+ return "", prop
+ }
+ // simple struct
+ b.addModel(fieldType, "")
+ var pType = b.keyFrom(fieldType)
+ prop.Ref = &pType
+ return jsonName, prop
+}
+
+func (b modelBuilder) buildArrayTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
+ // check for type override in tags
+ prop.setPropertyMetadata(field)
+ if prop.Type != nil {
+ return jsonName, prop
+ }
+ fieldType := field.Type
+ if fieldType.Elem().Kind() == reflect.Uint8 {
+ stringt := "string"
+ prop.Type = &stringt
+ return jsonName, prop
+ }
+ var pType = "array"
+ prop.Type = &pType
+ isPrimitive := b.isPrimitiveType(fieldType.Elem().Name())
+ elemTypeName := b.getElementTypeName(modelName, jsonName, fieldType.Elem())
+ prop.Items = new(Item)
+ if isPrimitive {
+ mapped := b.jsonSchemaType(elemTypeName)
+ prop.Items.Type = &mapped
+ } else {
+ prop.Items.Ref = &elemTypeName
+ }
+ // add|overwrite model for element type
+ if fieldType.Elem().Kind() == reflect.Ptr {
+ fieldType = fieldType.Elem()
+ }
+ if !isPrimitive {
+ b.addModel(fieldType.Elem(), elemTypeName)
+ }
+ return jsonName, prop
+}
+
+func (b modelBuilder) buildPointerTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
+ prop.setPropertyMetadata(field)
+ // Check for type override in tags
+ if prop.Type != nil {
+ return jsonName, prop
+ }
+ fieldType := field.Type
+
+ // override type of pointer to list-likes
+ if fieldType.Elem().Kind() == reflect.Slice || fieldType.Elem().Kind() == reflect.Array {
+ var pType = "array"
+ prop.Type = &pType
+ isPrimitive := b.isPrimitiveType(fieldType.Elem().Elem().Name())
+ elemName := b.getElementTypeName(modelName, jsonName, fieldType.Elem().Elem())
+ if isPrimitive {
+ primName := b.jsonSchemaType(elemName)
+ prop.Items = &Item{Ref: &primName}
+ } else {
+ prop.Items = &Item{Ref: &elemName}
+ }
+ if !isPrimitive {
+ // add|overwrite model for element type
+ b.addModel(fieldType.Elem().Elem(), elemName)
+ }
+ } else {
+ // non-array, pointer type
+ fieldTypeName := b.keyFrom(fieldType.Elem())
+ var pType = b.jsonSchemaType(fieldTypeName) // no star, include pkg path
+ if b.isPrimitiveType(fieldTypeName) {
+ prop.Type = &pType
+ prop.Format = b.jsonSchemaFormat(fieldTypeName)
+ return jsonName, prop
+ }
+ prop.Ref = &pType
+ elemName := ""
+ if fieldType.Elem().Name() == "" {
+ elemName = modelName + "." + jsonName
+ prop.Ref = &elemName
+ }
+ b.addModel(fieldType.Elem(), elemName)
+ }
+ return jsonName, prop
+}
+
+func (b modelBuilder) getElementTypeName(modelName, jsonName string, t reflect.Type) string {
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ if t.Name() == "" {
+ return modelName + "." + jsonName
+ }
+ return b.keyFrom(t)
+}
+
+func (b modelBuilder) keyFrom(st reflect.Type) string {
+ key := st.String()
+ if b.Config != nil && b.Config.ModelTypeNameHandler != nil {
+ if name, ok := b.Config.ModelTypeNameHandler(st); ok {
+ key = name
+ }
+ }
+ if len(st.Name()) == 0 { // unnamed type
+ // Swagger UI has special meaning for [
+ key = strings.Replace(key, "[]", "||", -1)
+ }
+ return key
+}
+
+// see also https://golang.org/ref/spec#Numeric_types
+func (b modelBuilder) isPrimitiveType(modelName string) bool {
+ if len(modelName) == 0 {
+ return false
+ }
+ return strings.Contains("uint uint8 uint16 uint32 uint64 int int8 int16 int32 int64 float32 float64 bool string byte rune time.Time", modelName)
+}
+
+// jsonNameOfField returns the name of the field as it should appear in JSON format
+// An empty string indicates that this field is not part of the JSON representation
+func (b modelBuilder) jsonNameOfField(field reflect.StructField) string {
+ if jsonTag := field.Tag.Get("json"); jsonTag != "" {
+ s := strings.Split(jsonTag, ",")
+ if s[0] == "-" {
+ // empty name signals skip property
+ return ""
+ } else if s[0] != "" {
+ return s[0]
+ }
+ }
+ return field.Name
+}
+
+// see also http://json-schema.org/latest/json-schema-core.html#anchor8
+func (b modelBuilder) jsonSchemaType(modelName string) string {
+ schemaMap := map[string]string{
+ "uint": "integer",
+ "uint8": "integer",
+ "uint16": "integer",
+ "uint32": "integer",
+ "uint64": "integer",
+
+ "int": "integer",
+ "int8": "integer",
+ "int16": "integer",
+ "int32": "integer",
+ "int64": "integer",
+
+ "byte": "integer",
+ "float64": "number",
+ "float32": "number",
+ "bool": "boolean",
+ "time.Time": "string",
+ }
+ mapped, ok := schemaMap[modelName]
+ if !ok {
+ return modelName // use as is (custom or struct)
+ }
+ return mapped
+}
+
+func (b modelBuilder) jsonSchemaFormat(modelName string) string {
+ if b.Config != nil && b.Config.SchemaFormatHandler != nil {
+ if mapped := b.Config.SchemaFormatHandler(modelName); mapped != "" {
+ return mapped
+ }
+ }
+ schemaMap := map[string]string{
+ "int": "int32",
+ "int32": "int32",
+ "int64": "int64",
+ "byte": "byte",
+ "uint": "integer",
+ "uint8": "byte",
+ "float64": "double",
+ "float32": "float",
+ "time.Time": "date-time",
+ "*time.Time": "date-time",
+ }
+ mapped, ok := schemaMap[modelName]
+ if !ok {
+ return "" // no format
+ }
+ return mapped
+}
diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_list.go b/vendor/github.com/emicklei/go-restful-swagger12/model_list.go
new file mode 100644
index 000000000..9bb6cb678
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful-swagger12/model_list.go
@@ -0,0 +1,86 @@
+package swagger
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+// NamedModel associates a name with a Model (not using its Id)
+type NamedModel struct {
+ Name string
+ Model Model
+}
+
+// ModelList encapsulates a list of NamedModel (association)
+type ModelList struct {
+ List []NamedModel
+}
+
+// Put adds or replaces a Model by its name
+func (l *ModelList) Put(name string, model Model) {
+ for i, each := range l.List {
+ if each.Name == name {
+ // replace
+ l.List[i] = NamedModel{name, model}
+ return
+ }
+ }
+ // add
+ l.List = append(l.List, NamedModel{name, model})
+}
+
+// At returns a Model by its name, ok is false if absent
+func (l *ModelList) At(name string) (m Model, ok bool) {
+ for _, each := range l.List {
+ if each.Name == name {
+ return each.Model, true
+ }
+ }
+ return m, false
+}
+
+// Do enumerates all the models, each with its assigned name
+func (l *ModelList) Do(block func(name string, value Model)) {
+ for _, each := range l.List {
+ block(each.Name, each.Model)
+ }
+}
+
+// MarshalJSON writes the ModelList as if it was a map[string]Model
+func (l ModelList) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ encoder := json.NewEncoder(&buf)
+ buf.WriteString("{\n")
+ for i, each := range l.List {
+ buf.WriteString("\"")
+ buf.WriteString(each.Name)
+ buf.WriteString("\": ")
+ encoder.Encode(each.Model)
+ if i < len(l.List)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+ buf.WriteString("}")
+ return buf.Bytes(), nil
+}
+
+// UnmarshalJSON reads back a ModelList. This is an expensive operation.
+func (l *ModelList) UnmarshalJSON(data []byte) error {
+ raw := map[string]interface{}{}
+ json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
+ for k, v := range raw {
+ // produces JSON bytes for each value
+ data, err := json.Marshal(v)
+ if err != nil {
+ return err
+ }
+ var m Model
+ json.NewDecoder(bytes.NewReader(data)).Decode(&m)
+ l.Put(k, m)
+ }
+ return nil
+}
diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go b/vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go
new file mode 100644
index 000000000..a433b6b70
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go
@@ -0,0 +1,81 @@
+package swagger
+
+import (
+ "reflect"
+ "strings"
+)
+
+func (prop *ModelProperty) setDescription(field reflect.StructField) {
+ if tag := field.Tag.Get("description"); tag != "" {
+ prop.Description = tag
+ }
+}
+
+func (prop *ModelProperty) setDefaultValue(field reflect.StructField) {
+ if tag := field.Tag.Get("default"); tag != "" {
+ prop.DefaultValue = Special(tag)
+ }
+}
+
+func (prop *ModelProperty) setEnumValues(field reflect.StructField) {
+ // We use | to separate the enum values. This value is chosen
+ // since its unlikely to be useful in actual enumeration values.
+ if tag := field.Tag.Get("enum"); tag != "" {
+ prop.Enum = strings.Split(tag, "|")
+ }
+}
+
+func (prop *ModelProperty) setMaximum(field reflect.StructField) {
+ if tag := field.Tag.Get("maximum"); tag != "" {
+ prop.Maximum = tag
+ }
+}
+
+func (prop *ModelProperty) setType(field reflect.StructField) {
+ if tag := field.Tag.Get("type"); tag != "" {
+ // Check if the first two characters of the type tag are
+ // intended to emulate slice/array behaviour.
+ //
+ // If type is intended to be a slice/array then add the
+ // overriden type to the array item instead of the main property
+ if len(tag) > 2 && tag[0:2] == "[]" {
+ pType := "array"
+ prop.Type = &pType
+ prop.Items = new(Item)
+
+ iType := tag[2:]
+ prop.Items.Type = &iType
+ return
+ }
+
+ prop.Type = &tag
+ }
+}
+
+func (prop *ModelProperty) setMinimum(field reflect.StructField) {
+ if tag := field.Tag.Get("minimum"); tag != "" {
+ prop.Minimum = tag
+ }
+}
+
+func (prop *ModelProperty) setUniqueItems(field reflect.StructField) {
+ tag := field.Tag.Get("unique")
+ switch tag {
+ case "true":
+ v := true
+ prop.UniqueItems = &v
+ case "false":
+ v := false
+ prop.UniqueItems = &v
+ }
+}
+
+func (prop *ModelProperty) setPropertyMetadata(field reflect.StructField) {
+ prop.setDescription(field)
+ prop.setEnumValues(field)
+ prop.setMinimum(field)
+ prop.setMaximum(field)
+ prop.setUniqueItems(field)
+ prop.setDefaultValue(field)
+ prop.setType(field)
+}
diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go b/vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go
new file mode 100644
index 000000000..3babb1944
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go
@@ -0,0 +1,87 @@
+package swagger
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+// NamedModelProperty associates a name to a ModelProperty
+type NamedModelProperty struct {
+ Name string
+ Property ModelProperty
+}
+
+// ModelPropertyList encapsulates a list of NamedModelProperty (association)
+type ModelPropertyList struct {
+ List []NamedModelProperty
+}
+
+// At returns the ModelPropety by its name unless absent, then ok is false
+func (l *ModelPropertyList) At(name string) (p ModelProperty, ok bool) {
+ for _, each := range l.List {
+ if each.Name == name {
+ return each.Property, true
+ }
+ }
+ return p, false
+}
+
+// Put adds or replaces a ModelProperty with this name
+func (l *ModelPropertyList) Put(name string, prop ModelProperty) {
+ // maybe replace existing
+ for i, each := range l.List {
+ if each.Name == name {
+ // replace
+ l.List[i] = NamedModelProperty{Name: name, Property: prop}
+ return
+ }
+ }
+ // add
+ l.List = append(l.List, NamedModelProperty{Name: name, Property: prop})
+}
+
+// Do enumerates all the properties, each with its assigned name
+func (l *ModelPropertyList) Do(block func(name string, value ModelProperty)) {
+ for _, each := range l.List {
+ block(each.Name, each.Property)
+ }
+}
+
+// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
+func (l ModelPropertyList) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ encoder := json.NewEncoder(&buf)
+ buf.WriteString("{\n")
+ for i, each := range l.List {
+ buf.WriteString("\"")
+ buf.WriteString(each.Name)
+ buf.WriteString("\": ")
+ encoder.Encode(each.Property)
+ if i < len(l.List)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+ buf.WriteString("}")
+ return buf.Bytes(), nil
+}
+
+// UnmarshalJSON reads back a ModelPropertyList. This is an expensive operation.
+func (l *ModelPropertyList) UnmarshalJSON(data []byte) error {
+ raw := map[string]interface{}{}
+ json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
+ for k, v := range raw {
+ // produces JSON bytes for each value
+ data, err := json.Marshal(v)
+ if err != nil {
+ return err
+ }
+ var m ModelProperty
+ json.NewDecoder(bytes.NewReader(data)).Decode(&m)
+ l.Put(k, m)
+ }
+ return nil
+}
diff --git a/vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go b/vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go
new file mode 100644
index 000000000..b33ccfbeb
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go
@@ -0,0 +1,36 @@
+package swagger
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import "github.com/emicklei/go-restful"
+
+type orderedRouteMap struct {
+ elements map[string][]restful.Route
+ keys []string
+}
+
+func newOrderedRouteMap() *orderedRouteMap {
+ return &orderedRouteMap{
+ elements: map[string][]restful.Route{},
+ keys: []string{},
+ }
+}
+
+func (o *orderedRouteMap) Add(key string, route restful.Route) {
+ routes, ok := o.elements[key]
+ if ok {
+ routes = append(routes, route)
+ o.elements[key] = routes
+ return
+ }
+ o.elements[key] = []restful.Route{route}
+ o.keys = append(o.keys, key)
+}
+
+func (o *orderedRouteMap) Do(block func(key string, routes []restful.Route)) {
+ for _, k := range o.keys {
+ block(k, o.elements[k])
+ }
+}
diff --git a/vendor/github.com/emicklei/go-restful-swagger12/swagger.go b/vendor/github.com/emicklei/go-restful-swagger12/swagger.go
new file mode 100644
index 000000000..9c40833e7
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful-swagger12/swagger.go
@@ -0,0 +1,185 @@
+// Package swagger implements the structures of the Swagger
+// https://github.com/wordnik/swagger-spec/blob/master/versions/1.2.md
+package swagger
+
+const swaggerVersion = "1.2"
+
+// 4.3.3 Data Type Fields
+type DataTypeFields struct {
+ Type *string `json:"type,omitempty"` // if Ref not used
+ Ref *string `json:"$ref,omitempty"` // if Type not used
+ Format string `json:"format,omitempty"`
+ DefaultValue Special `json:"defaultValue,omitempty"`
+ Enum []string `json:"enum,omitempty"`
+ Minimum string `json:"minimum,omitempty"`
+ Maximum string `json:"maximum,omitempty"`
+ Items *Item `json:"items,omitempty"`
+ UniqueItems *bool `json:"uniqueItems,omitempty"`
+}
+
+type Special string
+
+// 4.3.4 Items Object
+type Item struct {
+ Type *string `json:"type,omitempty"`
+ Ref *string `json:"$ref,omitempty"`
+ Format string `json:"format,omitempty"`
+}
+
+// 5.1 Resource Listing
+type ResourceListing struct {
+ SwaggerVersion string `json:"swaggerVersion"` // e.g 1.2
+ Apis []Resource `json:"apis"`
+ ApiVersion string `json:"apiVersion"`
+ Info Info `json:"info"`
+ Authorizations []Authorization `json:"authorizations,omitempty"`
+}
+
+// 5.1.2 Resource Object
+type Resource struct {
+ Path string `json:"path"` // relative or absolute, must start with /
+ Description string `json:"description"`
+}
+
+// 5.1.3 Info Object
+type Info struct {
+ Title string `json:"title"`
+ Description string `json:"description"`
+ TermsOfServiceUrl string `json:"termsOfServiceUrl,omitempty"`
+ Contact string `json:"contact,omitempty"`
+ License string `json:"license,omitempty"`
+ LicenseUrl string `json:"licenseUrl,omitempty"`
+}
+
+// 5.1.5
+type Authorization struct {
+ Type string `json:"type"`
+ PassAs string `json:"passAs"`
+ Keyname string `json:"keyname"`
+ Scopes []Scope `json:"scopes"`
+ GrantTypes []GrantType `json:"grandTypes"`
+}
+
+// 5.1.6, 5.2.11
+type Scope struct {
+ // Required. The name of the scope.
+ Scope string `json:"scope"`
+ // Recommended. A short description of the scope.
+ Description string `json:"description"`
+}
+
+// 5.1.7
+type GrantType struct {
+ Implicit Implicit `json:"implicit"`
+ AuthorizationCode AuthorizationCode `json:"authorization_code"`
+}
+
+// 5.1.8 Implicit Object
+type Implicit struct {
+ // Required. The login endpoint definition.
+ loginEndpoint LoginEndpoint `json:"loginEndpoint"`
+ // An optional alternative name to standard "access_token" OAuth2 parameter.
+ TokenName string `json:"tokenName"`
+}
+
+// 5.1.9 Authorization Code Object
+type AuthorizationCode struct {
+ TokenRequestEndpoint TokenRequestEndpoint `json:"tokenRequestEndpoint"`
+ TokenEndpoint TokenEndpoint `json:"tokenEndpoint"`
+}
+
+// 5.1.10 Login Endpoint Object
+type LoginEndpoint struct {
+ // Required. The URL of the authorization endpoint for the implicit grant flow. The value SHOULD be in a URL format.
+ Url string `json:"url"`
+}
+
+// 5.1.11 Token Request Endpoint Object
+type TokenRequestEndpoint struct {
+ // Required. The URL of the authorization endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
+ Url string `json:"url"`
+ // An optional alternative name to standard "client_id" OAuth2 parameter.
+ ClientIdName string `json:"clientIdName"`
+ // An optional alternative name to the standard "client_secret" OAuth2 parameter.
+ ClientSecretName string `json:"clientSecretName"`
+}
+
+// 5.1.12 Token Endpoint Object
+type TokenEndpoint struct {
+ // Required. The URL of the token endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
+ Url string `json:"url"`
+ // An optional alternative name to standard "access_token" OAuth2 parameter.
+ TokenName string `json:"tokenName"`
+}
+
+// 5.2 API Declaration
+type ApiDeclaration struct {
+ SwaggerVersion string `json:"swaggerVersion"`
+ ApiVersion string `json:"apiVersion"`
+ BasePath string `json:"basePath"`
+ ResourcePath string `json:"resourcePath"` // must start with /
+ Info Info `json:"info"`
+ Apis []Api `json:"apis,omitempty"`
+ Models ModelList `json:"models,omitempty"`
+ Produces []string `json:"produces,omitempty"`
+ Consumes []string `json:"consumes,omitempty"`
+ Authorizations []Authorization `json:"authorizations,omitempty"`
+}
+
+// 5.2.2 API Object
+type Api struct {
+ Path string `json:"path"` // relative or absolute, must start with /
+ Description string `json:"description"`
+ Operations []Operation `json:"operations,omitempty"`
+}
+
+// 5.2.3 Operation Object
+type Operation struct {
+ DataTypeFields
+ Method string `json:"method"`
+ Summary string `json:"summary,omitempty"`
+ Notes string `json:"notes,omitempty"`
+ Nickname string `json:"nickname"`
+ Authorizations []Authorization `json:"authorizations,omitempty"`
+ Parameters []Parameter `json:"parameters"`
+ ResponseMessages []ResponseMessage `json:"responseMessages,omitempty"` // optional
+ Produces []string `json:"produces,omitempty"`
+ Consumes []string `json:"consumes,omitempty"`
+ Deprecated string `json:"deprecated,omitempty"`
+}
+
+// 5.2.4 Parameter Object
+type Parameter struct {
+ DataTypeFields
+ ParamType string `json:"paramType"` // path,query,body,header,form
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Required bool `json:"required"`
+ AllowMultiple bool `json:"allowMultiple"`
+}
+
+// 5.2.5 Response Message Object
+type ResponseMessage struct {
+ Code int `json:"code"`
+ Message string `json:"message"`
+ ResponseModel string `json:"responseModel,omitempty"`
+}
+
+// 5.2.6, 5.2.7 Models Object
+type Model struct {
+ Id string `json:"id"`
+ Description string `json:"description,omitempty"`
+ Required []string `json:"required,omitempty"`
+ Properties ModelPropertyList `json:"properties"`
+ SubTypes []string `json:"subTypes,omitempty"`
+ Discriminator string `json:"discriminator,omitempty"`
+}
+
+// 5.2.8 Properties Object
+type ModelProperty struct {
+ DataTypeFields
+ Description string `json:"description,omitempty"`
+}
+
+// 5.2.10
+type Authorizations map[string]Authorization
diff --git a/vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go b/vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go
new file mode 100644
index 000000000..05a3c7e76
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go
@@ -0,0 +1,21 @@
+package swagger
+
+type SwaggerBuilder struct {
+ SwaggerService
+}
+
+func NewSwaggerBuilder(config Config) *SwaggerBuilder {
+ return &SwaggerBuilder{*newSwaggerService(config)}
+}
+
+func (sb SwaggerBuilder) ProduceListing() ResourceListing {
+ return sb.SwaggerService.produceListing()
+}
+
+func (sb SwaggerBuilder) ProduceAllDeclarations() map[string]ApiDeclaration {
+ return sb.SwaggerService.produceAllDeclarations()
+}
+
+func (sb SwaggerBuilder) ProduceDeclarations(route string) (*ApiDeclaration, bool) {
+ return sb.SwaggerService.produceDeclarations(route)
+}
diff --git a/vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go b/vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go
new file mode 100644
index 000000000..d90623120
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go
@@ -0,0 +1,443 @@
+package swagger
+
+import (
+ "fmt"
+
+ "github.com/emicklei/go-restful"
+ // "github.com/emicklei/hopwatch"
+ "net/http"
+ "reflect"
+ "sort"
+ "strings"
+
+ "github.com/emicklei/go-restful/log"
+)
+
+type SwaggerService struct {
+ config Config
+ apiDeclarationMap *ApiDeclarationList
+}
+
+func newSwaggerService(config Config) *SwaggerService {
+ sws := &SwaggerService{
+ config: config,
+ apiDeclarationMap: new(ApiDeclarationList)}
+
+ // Build all ApiDeclarations
+ for _, each := range config.WebServices {
+ rootPath := each.RootPath()
+ // skip the api service itself
+ if rootPath != config.ApiPath {
+ if rootPath == "" || rootPath == "/" {
+ // use routes
+ for _, route := range each.Routes() {
+ entry := staticPathFromRoute(route)
+ _, exists := sws.apiDeclarationMap.At(entry)
+ if !exists {
+ sws.apiDeclarationMap.Put(entry, sws.composeDeclaration(each, entry))
+ }
+ }
+ } else { // use root path
+ sws.apiDeclarationMap.Put(each.RootPath(), sws.composeDeclaration(each, each.RootPath()))
+ }
+ }
+ }
+
+ // if specified then call the PostBuilderHandler
+ if config.PostBuildHandler != nil {
+ config.PostBuildHandler(sws.apiDeclarationMap)
+ }
+ return sws
+}
+
+// LogInfo is the function that is called when this package needs to log. It defaults to log.Printf
+var LogInfo = func(format string, v ...interface{}) {
+ // use the restful package-wide logger
+ log.Printf(format, v...)
+}
+
+// InstallSwaggerService add the WebService that provides the API documentation of all services
+// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
+func InstallSwaggerService(aSwaggerConfig Config) {
+ RegisterSwaggerService(aSwaggerConfig, restful.DefaultContainer)
+}
+
+// RegisterSwaggerService add the WebService that provides the API documentation of all services
+// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
+func RegisterSwaggerService(config Config, wsContainer *restful.Container) {
+ sws := newSwaggerService(config)
+ ws := new(restful.WebService)
+ ws.Path(config.ApiPath)
+ ws.Produces(restful.MIME_JSON)
+ if config.DisableCORS {
+ ws.Filter(enableCORS)
+ }
+ ws.Route(ws.GET("/").To(sws.getListing))
+ ws.Route(ws.GET("/{a}").To(sws.getDeclarations))
+ ws.Route(ws.GET("/{a}/{b}").To(sws.getDeclarations))
+ ws.Route(ws.GET("/{a}/{b}/{c}").To(sws.getDeclarations))
+ ws.Route(ws.GET("/{a}/{b}/{c}/{d}").To(sws.getDeclarations))
+ ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}").To(sws.getDeclarations))
+ ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}").To(sws.getDeclarations))
+ ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}/{g}").To(sws.getDeclarations))
+ LogInfo("[restful/swagger] listing is available at %v%v", config.WebServicesUrl, config.ApiPath)
+ wsContainer.Add(ws)
+
+ // Check paths for UI serving
+ if config.StaticHandler == nil && config.SwaggerFilePath != "" && config.SwaggerPath != "" {
+ swaggerPathSlash := config.SwaggerPath
+ // path must end with slash /
+ if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
+ LogInfo("[restful/swagger] use corrected SwaggerPath ; must end with slash (/)")
+ swaggerPathSlash += "/"
+ }
+
+ LogInfo("[restful/swagger] %v%v is mapped to folder %v", config.WebServicesUrl, swaggerPathSlash, config.SwaggerFilePath)
+ wsContainer.Handle(swaggerPathSlash, http.StripPrefix(swaggerPathSlash, http.FileServer(http.Dir(config.SwaggerFilePath))))
+
+ //if we define a custom static handler use it
+ } else if config.StaticHandler != nil && config.SwaggerPath != "" {
+ swaggerPathSlash := config.SwaggerPath
+ // path must end with slash /
+ if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
+ LogInfo("[restful/swagger] use corrected SwaggerFilePath ; must end with slash (/)")
+ swaggerPathSlash += "/"
+
+ }
+ LogInfo("[restful/swagger] %v%v is mapped to custom Handler %T", config.WebServicesUrl, swaggerPathSlash, config.StaticHandler)
+ wsContainer.Handle(swaggerPathSlash, config.StaticHandler)
+
+ } else {
+ LogInfo("[restful/swagger] Swagger(File)Path is empty ; no UI is served")
+ }
+}
+
+func staticPathFromRoute(r restful.Route) string {
+ static := r.Path
+ bracket := strings.Index(static, "{")
+ if bracket <= 1 { // result cannot be empty
+ return static
+ }
+ if bracket != -1 {
+ static = r.Path[:bracket]
+ }
+ if strings.HasSuffix(static, "/") {
+ return static[:len(static)-1]
+ } else {
+ return static
+ }
+}
+
+func enableCORS(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
+ if origin := req.HeaderParameter(restful.HEADER_Origin); origin != "" {
+ // prevent duplicate header
+ if len(resp.Header().Get(restful.HEADER_AccessControlAllowOrigin)) == 0 {
+ resp.AddHeader(restful.HEADER_AccessControlAllowOrigin, origin)
+ }
+ }
+ chain.ProcessFilter(req, resp)
+}
+
+func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) {
+ listing := sws.produceListing()
+ resp.WriteAsJson(listing)
+}
+
+func (sws SwaggerService) produceListing() ResourceListing {
+ listing := ResourceListing{SwaggerVersion: swaggerVersion, ApiVersion: sws.config.ApiVersion, Info: sws.config.Info}
+ sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
+ ref := Resource{Path: k}
+ if len(v.Apis) > 0 { // use description of first (could still be empty)
+ ref.Description = v.Apis[0].Description
+ }
+ listing.Apis = append(listing.Apis, ref)
+ })
+ return listing
+}
+
+func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) {
+ decl, ok := sws.produceDeclarations(composeRootPath(req))
+ if !ok {
+ resp.WriteErrorString(http.StatusNotFound, "ApiDeclaration not found")
+ return
+ }
+ // unless WebServicesUrl is given
+ if len(sws.config.WebServicesUrl) == 0 {
+ // update base path from the actual request
+ // TODO how to detect https? assume http for now
+ var host string
+ // X-Forwarded-Host or Host or Request.Host
+ hostvalues, ok := req.Request.Header["X-Forwarded-Host"] // apache specific?
+ if !ok || len(hostvalues) == 0 {
+ forwarded, ok := req.Request.Header["Host"] // without reverse-proxy
+ if !ok || len(forwarded) == 0 {
+ // fallback to Host field
+ host = req.Request.Host
+ } else {
+ host = forwarded[0]
+ }
+ } else {
+ host = hostvalues[0]
+ }
+ // inspect Referer for the scheme (http vs https)
+ scheme := "http"
+ if referer := req.Request.Header["Referer"]; len(referer) > 0 {
+ if strings.HasPrefix(referer[0], "https") {
+ scheme = "https"
+ }
+ }
+ decl.BasePath = fmt.Sprintf("%s://%s", scheme, host)
+ }
+ resp.WriteAsJson(decl)
+}
+
+func (sws SwaggerService) produceAllDeclarations() map[string]ApiDeclaration {
+ decls := map[string]ApiDeclaration{}
+ sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
+ decls[k] = v
+ })
+ return decls
+}
+
+func (sws SwaggerService) produceDeclarations(route string) (*ApiDeclaration, bool) {
+ decl, ok := sws.apiDeclarationMap.At(route)
+ if !ok {
+ return nil, false
+ }
+ decl.BasePath = sws.config.WebServicesUrl
+ return &decl, true
+}
+
+// composeDeclaration uses all routes and parameters to create a ApiDeclaration
+func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix string) ApiDeclaration {
+ decl := ApiDeclaration{
+ SwaggerVersion: swaggerVersion,
+ BasePath: sws.config.WebServicesUrl,
+ ResourcePath: pathPrefix,
+ Models: ModelList{},
+ ApiVersion: ws.Version()}
+
+ // collect any path parameters
+ rootParams := []Parameter{}
+ for _, param := range ws.PathParameters() {
+ rootParams = append(rootParams, asSwaggerParameter(param.Data()))
+ }
+ // aggregate by path
+ pathToRoutes := newOrderedRouteMap()
+ for _, other := range ws.Routes() {
+ if strings.HasPrefix(other.Path, pathPrefix) {
+ if len(pathPrefix) > 1 && len(other.Path) > len(pathPrefix) && other.Path[len(pathPrefix)] != '/' {
+ continue
+ }
+ pathToRoutes.Add(other.Path, other)
+ }
+ }
+ pathToRoutes.Do(func(path string, routes []restful.Route) {
+ api := Api{Path: strings.TrimSuffix(withoutWildcard(path), "/"), Description: ws.Documentation()}
+ voidString := "void"
+ for _, route := range routes {
+ operation := Operation{
+ Method: route.Method,
+ Summary: route.Doc,
+ Notes: route.Notes,
+ // Type gets overwritten if there is a write sample
+ DataTypeFields: DataTypeFields{Type: &voidString},
+ Parameters: []Parameter{},
+ Nickname: route.Operation,
+ ResponseMessages: composeResponseMessages(route, &decl, &sws.config)}
+
+ operation.Consumes = route.Consumes
+ operation.Produces = route.Produces
+
+ // share root params if any
+ for _, swparam := range rootParams {
+ operation.Parameters = append(operation.Parameters, swparam)
+ }
+ // route specific params
+ for _, param := range route.ParameterDocs {
+ operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data()))
+ }
+
+ sws.addModelsFromRouteTo(&operation, route, &decl)
+ api.Operations = append(api.Operations, operation)
+ }
+ decl.Apis = append(decl.Apis, api)
+ })
+ return decl
+}
+
+func withoutWildcard(path string) string {
+ if strings.HasSuffix(path, ":*}") {
+ return path[0:len(path)-3] + "}"
+ }
+ return path
+}
+
+// composeResponseMessages takes the ResponseErrors (if any) and creates ResponseMessages from them.
+func composeResponseMessages(route restful.Route, decl *ApiDeclaration, config *Config) (messages []ResponseMessage) {
+ if route.ResponseErrors == nil {
+ return messages
+ }
+ // sort by code
+ codes := sort.IntSlice{}
+ for code := range route.ResponseErrors {
+ codes = append(codes, code)
+ }
+ codes.Sort()
+ for _, code := range codes {
+ each := route.ResponseErrors[code]
+ message := ResponseMessage{
+ Code: code,
+ Message: each.Message,
+ }
+ if each.Model != nil {
+ st := reflect.TypeOf(each.Model)
+ isCollection, st := detectCollectionType(st)
+ // collection cannot be in responsemodel
+ if !isCollection {
+ modelName := modelBuilder{}.keyFrom(st)
+ modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "")
+ message.ResponseModel = modelName
+ }
+ }
+ messages = append(messages, message)
+ }
+ return
+}
+
+// addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it.
+func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) {
+ if route.ReadSample != nil {
+ sws.addModelFromSampleTo(operation, false, route.ReadSample, &decl.Models)
+ }
+ if route.WriteSample != nil {
+ sws.addModelFromSampleTo(operation, true, route.WriteSample, &decl.Models)
+ }
+}
+
+func detectCollectionType(st reflect.Type) (bool, reflect.Type) {
+ isCollection := false
+ if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
+ st = st.Elem()
+ isCollection = true
+ } else {
+ if st.Kind() == reflect.Ptr {
+ if st.Elem().Kind() == reflect.Slice || st.Elem().Kind() == reflect.Array {
+ st = st.Elem().Elem()
+ isCollection = true
+ }
+ }
+ }
+ return isCollection, st
+}
+
+// addModelFromSample creates and adds (or overwrites) a Model from a sample resource
+func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) {
+ mb := modelBuilder{Models: models, Config: &sws.config}
+ if isResponse {
+ sampleType, items := asDataType(sample, &sws.config)
+ operation.Type = sampleType
+ operation.Items = items
+ }
+ mb.addModelFrom(sample)
+}
+
+func asSwaggerParameter(param restful.ParameterData) Parameter {
+ return Parameter{
+ DataTypeFields: DataTypeFields{
+ Type: &param.DataType,
+ Format: asFormat(param.DataType, param.DataFormat),
+ DefaultValue: Special(param.DefaultValue),
+ },
+ Name: param.Name,
+ Description: param.Description,
+ ParamType: asParamType(param.Kind),
+
+ Required: param.Required}
+}
+
+// Between 1..7 path parameters is supported
+func composeRootPath(req *restful.Request) string {
+ path := "/" + req.PathParameter("a")
+ b := req.PathParameter("b")
+ if b == "" {
+ return path
+ }
+ path = path + "/" + b
+ c := req.PathParameter("c")
+ if c == "" {
+ return path
+ }
+ path = path + "/" + c
+ d := req.PathParameter("d")
+ if d == "" {
+ return path
+ }
+ path = path + "/" + d
+ e := req.PathParameter("e")
+ if e == "" {
+ return path
+ }
+ path = path + "/" + e
+ f := req.PathParameter("f")
+ if f == "" {
+ return path
+ }
+ path = path + "/" + f
+ g := req.PathParameter("g")
+ if g == "" {
+ return path
+ }
+ return path + "/" + g
+}
+
+func asFormat(dataType string, dataFormat string) string {
+ if dataFormat != "" {
+ return dataFormat
+ }
+ return "" // TODO
+}
+
+func asParamType(kind int) string {
+ switch {
+ case kind == restful.PathParameterKind:
+ return "path"
+ case kind == restful.QueryParameterKind:
+ return "query"
+ case kind == restful.BodyParameterKind:
+ return "body"
+ case kind == restful.HeaderParameterKind:
+ return "header"
+ case kind == restful.FormParameterKind:
+ return "form"
+ }
+ return ""
+}
+
+func asDataType(any interface{}, config *Config) (*string, *Item) {
+ // If it's not a collection, return the suggested model name
+ st := reflect.TypeOf(any)
+ isCollection, st := detectCollectionType(st)
+ modelName := modelBuilder{}.keyFrom(st)
+ // if it's not a collection we are done
+ if !isCollection {
+ return &modelName, nil
+ }
+
+ // XXX: This is not very elegant
+ // We create an Item object referring to the given model
+ models := ModelList{}
+ mb := modelBuilder{Models: &models, Config: config}
+ mb.addModelFrom(any)
+
+ elemTypeName := mb.getElementTypeName(modelName, "", st)
+ item := new(Item)
+ if mb.isPrimitiveType(elemTypeName) {
+ mapped := mb.jsonSchemaType(elemTypeName)
+ item.Type = &mapped
+ } else {
+ item.Ref = &elemTypeName
+ }
+ tmp := "array"
+ return &tmp, item
+}
diff --git a/vendor/github.com/emicklei/go-restful/LICENSE b/vendor/github.com/emicklei/go-restful/LICENSE
new file mode 100644
index 000000000..ece7ec61e
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2012,2013 Ernest Micklei
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/vendor/github.com/emicklei/go-restful/README.md b/vendor/github.com/emicklei/go-restful/README.md
new file mode 100644
index 000000000..cd1f2d0cc
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/README.md
@@ -0,0 +1,74 @@
+go-restful
+==========
+package for building REST-style Web Services using Google Go
+
+[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful)
+[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful)
+[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://godoc.org/github.com/emicklei/go-restful)
+
+- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
+
+REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
+
+- GET = Retrieve a representation of a resource
+- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm.
+- PUT = Create if you are sending the full content of the specified resource (URI).
+- PUT = Update if you are updating the full content of the specified resource.
+- DELETE = Delete if you are requesting the server to delete the resource
+- PATCH = Update partial content of a resource
+- OPTIONS = Get information about the communication options for the request URI
+
+### Example
+
+```Go
+ws := new(restful.WebService)
+ws.
+ Path("/users").
+ Consumes(restful.MIME_XML, restful.MIME_JSON).
+ Produces(restful.MIME_JSON, restful.MIME_XML)
+
+ws.Route(ws.GET("/{user-id}").To(u.findUser).
+ Doc("get a user").
+ Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
+ Writes(User{}))
+...
+
+func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
+ id := request.PathParameter("user-id")
+ ...
+}
+```
+
+[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go)
+
+### Features
+
+- Routes for request &#8594; function mapping with path parameter (e.g. {id}) support
+- Configurable router:
+ - (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}
+ - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions
+- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
+- Response API for writing structs to JSON/XML and setting headers
+- Customizable encoding using EntityReaderWriter registration
+- Filters for intercepting the request &#8594; response flow on Service or Route level
+- Request-scoped variables using attributes
+- Containers for WebServices on different HTTP endpoints
+- Content encoding (gzip,deflate) of request and response payloads
+- Automatic responses on OPTIONS (using a filter)
+- Automatic CORS request handling (using a filter)
+- API declaration for Swagger UI (see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12),[go-restful-openapi](https://github.com/emicklei/go-restful-openapi))
+- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
+- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
+- Configurable (trace) logging
+- Customizable gzip/deflate readers and writers using CompressorProvider registration
+
+### Resources
+
+- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
+- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
+- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
+- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
+
+Type ```git shortlog -s``` for a full list of contributors.
+
+© 2012 - 2017, http://ernestmicklei.com. MIT License. Contributions are welcome. \ No newline at end of file
diff --git a/vendor/github.com/emicklei/go-restful/compress.go b/vendor/github.com/emicklei/go-restful/compress.go
new file mode 100644
index 000000000..220b37712
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/compress.go
@@ -0,0 +1,123 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bufio"
+ "compress/gzip"
+ "compress/zlib"
+ "errors"
+ "io"
+ "net"
+ "net/http"
+ "strings"
+)
+
+// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting.
+var EnableContentEncoding = false
+
+// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib)
+type CompressingResponseWriter struct {
+ writer http.ResponseWriter
+ compressor io.WriteCloser
+ encoding string
+}
+
+// Header is part of http.ResponseWriter interface
+func (c *CompressingResponseWriter) Header() http.Header {
+ return c.writer.Header()
+}
+
+// WriteHeader is part of http.ResponseWriter interface
+func (c *CompressingResponseWriter) WriteHeader(status int) {
+ c.writer.WriteHeader(status)
+}
+
+// Write is part of http.ResponseWriter interface
+// It is passed through the compressor
+func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) {
+ if c.isCompressorClosed() {
+ return -1, errors.New("Compressing error: tried to write data using closed compressor")
+ }
+ return c.compressor.Write(bytes)
+}
+
+// CloseNotify is part of http.CloseNotifier interface
+func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
+ return c.writer.(http.CloseNotifier).CloseNotify()
+}
+
+// Close the underlying compressor
+func (c *CompressingResponseWriter) Close() error {
+ if c.isCompressorClosed() {
+ return errors.New("Compressing error: tried to close already closed compressor")
+ }
+
+ c.compressor.Close()
+ if ENCODING_GZIP == c.encoding {
+ currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer))
+ }
+ if ENCODING_DEFLATE == c.encoding {
+ currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer))
+ }
+ // gc hint needed?
+ c.compressor = nil
+ return nil
+}
+
+func (c *CompressingResponseWriter) isCompressorClosed() bool {
+ return nil == c.compressor
+}
+
+// Hijack implements the Hijacker interface
+// This is especially useful when combining Container.EnabledContentEncoding
+// in combination with websockets (for instance gorilla/websocket)
+func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ hijacker, ok := c.writer.(http.Hijacker)
+ if !ok {
+ return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface")
+ }
+ return hijacker.Hijack()
+}
+
+// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
+func wantsCompressedResponse(httpRequest *http.Request) (bool, string) {
+ header := httpRequest.Header.Get(HEADER_AcceptEncoding)
+ gi := strings.Index(header, ENCODING_GZIP)
+ zi := strings.Index(header, ENCODING_DEFLATE)
+ // use in order of appearance
+ if gi == -1 {
+ return zi != -1, ENCODING_DEFLATE
+ } else if zi == -1 {
+ return gi != -1, ENCODING_GZIP
+ } else {
+ if gi < zi {
+ return true, ENCODING_GZIP
+ }
+ return true, ENCODING_DEFLATE
+ }
+}
+
+// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate}
+func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) {
+ httpWriter.Header().Set(HEADER_ContentEncoding, encoding)
+ c := new(CompressingResponseWriter)
+ c.writer = httpWriter
+ var err error
+ if ENCODING_GZIP == encoding {
+ w := currentCompressorProvider.AcquireGzipWriter()
+ w.Reset(httpWriter)
+ c.compressor = w
+ c.encoding = ENCODING_GZIP
+ } else if ENCODING_DEFLATE == encoding {
+ w := currentCompressorProvider.AcquireZlibWriter()
+ w.Reset(httpWriter)
+ c.compressor = w
+ c.encoding = ENCODING_DEFLATE
+ } else {
+ return nil, errors.New("Unknown encoding:" + encoding)
+ }
+ return c, err
+}
diff --git a/vendor/github.com/emicklei/go-restful/compressor_cache.go b/vendor/github.com/emicklei/go-restful/compressor_cache.go
new file mode 100644
index 000000000..ee426010a
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/compressor_cache.go
@@ -0,0 +1,103 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "compress/gzip"
+ "compress/zlib"
+)
+
+// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount
+// of writers and readers (resources).
+// If a new resource is acquired and all are in use, it will return a new unmanaged resource.
+type BoundedCachedCompressors struct {
+ gzipWriters chan *gzip.Writer
+ gzipReaders chan *gzip.Reader
+ zlibWriters chan *zlib.Writer
+ writersCapacity int
+ readersCapacity int
+}
+
+// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors.
+func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors {
+ b := &BoundedCachedCompressors{
+ gzipWriters: make(chan *gzip.Writer, writersCapacity),
+ gzipReaders: make(chan *gzip.Reader, readersCapacity),
+ zlibWriters: make(chan *zlib.Writer, writersCapacity),
+ writersCapacity: writersCapacity,
+ readersCapacity: readersCapacity,
+ }
+ for ix := 0; ix < writersCapacity; ix++ {
+ b.gzipWriters <- newGzipWriter()
+ b.zlibWriters <- newZlibWriter()
+ }
+ for ix := 0; ix < readersCapacity; ix++ {
+ b.gzipReaders <- newGzipReader()
+ }
+ return b
+}
+
+// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released.
+func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer {
+ var writer *gzip.Writer
+ select {
+ case writer, _ = <-b.gzipWriters:
+ default:
+ // return a new unmanaged one
+ writer = newGzipWriter()
+ }
+ return writer
+}
+
+// ReleaseGzipWriter accepts a writer (does not have to be one that was cached)
+// only when the cache has room for it. It will ignore it otherwise.
+func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) {
+ // forget the unmanaged ones
+ if len(b.gzipWriters) < b.writersCapacity {
+ b.gzipWriters <- w
+ }
+}
+
+// AcquireGzipReader returns a *gzip.Reader. Needs to be released.
+func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader {
+ var reader *gzip.Reader
+ select {
+ case reader, _ = <-b.gzipReaders:
+ default:
+ // return a new unmanaged one
+ reader = newGzipReader()
+ }
+ return reader
+}
+
+// ReleaseGzipReader accepts a reader (does not have to be one that was cached)
+// only when the cache has room for it. It will ignore it otherwise.
+func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) {
+ // forget the unmanaged ones
+ if len(b.gzipReaders) < b.readersCapacity {
+ b.gzipReaders <- r
+ }
+}
+
+// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released.
+func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer {
+ var writer *zlib.Writer
+ select {
+ case writer, _ = <-b.zlibWriters:
+ default:
+ // return a new unmanaged one
+ writer = newZlibWriter()
+ }
+ return writer
+}
+
+// ReleaseZlibWriter accepts a writer (does not have to be one that was cached)
+// only when the cache has room for it. It will ignore it otherwise.
+func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) {
+ // forget the unmanaged ones
+ if len(b.zlibWriters) < b.writersCapacity {
+ b.zlibWriters <- w
+ }
+}
diff --git a/vendor/github.com/emicklei/go-restful/compressor_pools.go b/vendor/github.com/emicklei/go-restful/compressor_pools.go
new file mode 100644
index 000000000..d866ce64b
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/compressor_pools.go
@@ -0,0 +1,91 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "compress/gzip"
+ "compress/zlib"
+ "sync"
+)
+
+// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool.
+type SyncPoolCompessors struct {
+ GzipWriterPool *sync.Pool
+ GzipReaderPool *sync.Pool
+ ZlibWriterPool *sync.Pool
+}
+
+// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors.
+func NewSyncPoolCompessors() *SyncPoolCompessors {
+ return &SyncPoolCompessors{
+ GzipWriterPool: &sync.Pool{
+ New: func() interface{} { return newGzipWriter() },
+ },
+ GzipReaderPool: &sync.Pool{
+ New: func() interface{} { return newGzipReader() },
+ },
+ ZlibWriterPool: &sync.Pool{
+ New: func() interface{} { return newZlibWriter() },
+ },
+ }
+}
+
+func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer {
+ return s.GzipWriterPool.Get().(*gzip.Writer)
+}
+
+func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) {
+ s.GzipWriterPool.Put(w)
+}
+
+func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader {
+ return s.GzipReaderPool.Get().(*gzip.Reader)
+}
+
+func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) {
+ s.GzipReaderPool.Put(r)
+}
+
+func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer {
+ return s.ZlibWriterPool.Get().(*zlib.Writer)
+}
+
+func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) {
+ s.ZlibWriterPool.Put(w)
+}
+
+func newGzipWriter() *gzip.Writer {
+ // create with an empty bytes writer; it will be replaced before using the gzipWriter
+ writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
+ if err != nil {
+ panic(err.Error())
+ }
+ return writer
+}
+
+func newGzipReader() *gzip.Reader {
+ // create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader
+ // we can safely use currentCompressProvider because it is set on package initialization.
+ w := currentCompressorProvider.AcquireGzipWriter()
+ defer currentCompressorProvider.ReleaseGzipWriter(w)
+ b := new(bytes.Buffer)
+ w.Reset(b)
+ w.Flush()
+ w.Close()
+ reader, err := gzip.NewReader(bytes.NewReader(b.Bytes()))
+ if err != nil {
+ panic(err.Error())
+ }
+ return reader
+}
+
+func newZlibWriter() *zlib.Writer {
+ writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
+ if err != nil {
+ panic(err.Error())
+ }
+ return writer
+}
diff --git a/vendor/github.com/emicklei/go-restful/compressors.go b/vendor/github.com/emicklei/go-restful/compressors.go
new file mode 100644
index 000000000..cb32f7ef5
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/compressors.go
@@ -0,0 +1,54 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "compress/gzip"
+ "compress/zlib"
+)
+
+// CompressorProvider describes a component that can provider compressors for the std methods.
+type CompressorProvider interface {
+ // Returns a *gzip.Writer which needs to be released later.
+ // Before using it, call Reset().
+ AcquireGzipWriter() *gzip.Writer
+
+ // Releases an aqcuired *gzip.Writer.
+ ReleaseGzipWriter(w *gzip.Writer)
+
+ // Returns a *gzip.Reader which needs to be released later.
+ AcquireGzipReader() *gzip.Reader
+
+ // Releases an aqcuired *gzip.Reader.
+ ReleaseGzipReader(w *gzip.Reader)
+
+ // Returns a *zlib.Writer which needs to be released later.
+ // Before using it, call Reset().
+ AcquireZlibWriter() *zlib.Writer
+
+ // Releases an aqcuired *zlib.Writer.
+ ReleaseZlibWriter(w *zlib.Writer)
+}
+
+// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip).
+var currentCompressorProvider CompressorProvider
+
+func init() {
+ currentCompressorProvider = NewSyncPoolCompessors()
+}
+
+// CurrentCompressorProvider returns the current CompressorProvider.
+// It is initialized using a SyncPoolCompessors.
+func CurrentCompressorProvider() CompressorProvider {
+ return currentCompressorProvider
+}
+
+// CompressorProvider sets the actual provider of compressors (zlib or gzip).
+func SetCompressorProvider(p CompressorProvider) {
+ if p == nil {
+ panic("cannot set compressor provider to nil")
+ }
+ currentCompressorProvider = p
+}
diff --git a/vendor/github.com/emicklei/go-restful/constants.go b/vendor/github.com/emicklei/go-restful/constants.go
new file mode 100644
index 000000000..203439c5e
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/constants.go
@@ -0,0 +1,30 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+const (
+ MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
+ MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
+ MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
+
+ HEADER_Allow = "Allow"
+ HEADER_Accept = "Accept"
+ HEADER_Origin = "Origin"
+ HEADER_ContentType = "Content-Type"
+ HEADER_LastModified = "Last-Modified"
+ HEADER_AcceptEncoding = "Accept-Encoding"
+ HEADER_ContentEncoding = "Content-Encoding"
+ HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers"
+ HEADER_AccessControlRequestMethod = "Access-Control-Request-Method"
+ HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers"
+ HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods"
+ HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin"
+ HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials"
+ HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers"
+ HEADER_AccessControlMaxAge = "Access-Control-Max-Age"
+
+ ENCODING_GZIP = "gzip"
+ ENCODING_DEFLATE = "deflate"
+)
diff --git a/vendor/github.com/emicklei/go-restful/container.go b/vendor/github.com/emicklei/go-restful/container.go
new file mode 100644
index 000000000..657d5b6dd
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/container.go
@@ -0,0 +1,366 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+
+ "github.com/emicklei/go-restful/log"
+)
+
+// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests.
+// The requests are further dispatched to routes of WebServices using a RouteSelector
+type Container struct {
+ webServicesLock sync.RWMutex
+ webServices []*WebService
+ ServeMux *http.ServeMux
+ isRegisteredOnRoot bool
+ containerFilters []FilterFunction
+ doNotRecover bool // default is true
+ recoverHandleFunc RecoverHandleFunction
+ serviceErrorHandleFunc ServiceErrorHandleFunction
+ router RouteSelector // default is a CurlyRouter (RouterJSR311 is a slower alternative)
+ contentEncodingEnabled bool // default is false
+}
+
+// NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter)
+func NewContainer() *Container {
+ return &Container{
+ webServices: []*WebService{},
+ ServeMux: http.NewServeMux(),
+ isRegisteredOnRoot: false,
+ containerFilters: []FilterFunction{},
+ doNotRecover: true,
+ recoverHandleFunc: logStackOnRecover,
+ serviceErrorHandleFunc: writeServiceError,
+ router: CurlyRouter{},
+ contentEncodingEnabled: false}
+}
+
+// RecoverHandleFunction declares functions that can be used to handle a panic situation.
+// The first argument is what recover() returns. The second must be used to communicate an error response.
+type RecoverHandleFunction func(interface{}, http.ResponseWriter)
+
+// RecoverHandler changes the default function (logStackOnRecover) to be called
+// when a panic is detected. DoNotRecover must be have its default value (=false).
+func (c *Container) RecoverHandler(handler RecoverHandleFunction) {
+ c.recoverHandleFunc = handler
+}
+
+// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation.
+// The first argument is the service error, the second is the request that resulted in the error and
+// the third must be used to communicate an error response.
+type ServiceErrorHandleFunction func(ServiceError, *Request, *Response)
+
+// ServiceErrorHandler changes the default function (writeServiceError) to be called
+// when a ServiceError is detected.
+func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) {
+ c.serviceErrorHandleFunc = handler
+}
+
+// DoNotRecover controls whether panics will be caught to return HTTP 500.
+// If set to true, Route functions are responsible for handling any error situation.
+// Default value is true.
+func (c *Container) DoNotRecover(doNot bool) {
+ c.doNotRecover = doNot
+}
+
+// Router changes the default Router (currently CurlyRouter)
+func (c *Container) Router(aRouter RouteSelector) {
+ c.router = aRouter
+}
+
+// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses.
+func (c *Container) EnableContentEncoding(enabled bool) {
+ c.contentEncodingEnabled = enabled
+}
+
+// Add a WebService to the Container. It will detect duplicate root paths and exit in that case.
+func (c *Container) Add(service *WebService) *Container {
+ c.webServicesLock.Lock()
+ defer c.webServicesLock.Unlock()
+
+ // if rootPath was not set then lazy initialize it
+ if len(service.rootPath) == 0 {
+ service.Path("/")
+ }
+
+ // cannot have duplicate root paths
+ for _, each := range c.webServices {
+ if each.RootPath() == service.RootPath() {
+ log.Printf("[restful] WebService with duplicate root path detected:['%v']", each)
+ os.Exit(1)
+ }
+ }
+
+ // If not registered on root then add specific mapping
+ if !c.isRegisteredOnRoot {
+ c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux)
+ }
+ c.webServices = append(c.webServices, service)
+ return c
+}
+
+// addHandler may set a new HandleFunc for the serveMux
+// this function must run inside the critical region protected by the webServicesLock.
+// returns true if the function was registered on root ("/")
+func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool {
+ pattern := fixedPrefixPath(service.RootPath())
+ // check if root path registration is needed
+ if "/" == pattern || "" == pattern {
+ serveMux.HandleFunc("/", c.dispatch)
+ return true
+ }
+ // detect if registration already exists
+ alreadyMapped := false
+ for _, each := range c.webServices {
+ if each.RootPath() == service.RootPath() {
+ alreadyMapped = true
+ break
+ }
+ }
+ if !alreadyMapped {
+ serveMux.HandleFunc(pattern, c.dispatch)
+ if !strings.HasSuffix(pattern, "/") {
+ serveMux.HandleFunc(pattern+"/", c.dispatch)
+ }
+ }
+ return false
+}
+
+func (c *Container) Remove(ws *WebService) error {
+ if c.ServeMux == http.DefaultServeMux {
+ errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws)
+ log.Printf(errMsg)
+ return errors.New(errMsg)
+ }
+ c.webServicesLock.Lock()
+ defer c.webServicesLock.Unlock()
+ // build a new ServeMux and re-register all WebServices
+ newServeMux := http.NewServeMux()
+ newServices := []*WebService{}
+ newIsRegisteredOnRoot := false
+ for _, each := range c.webServices {
+ if each.rootPath != ws.rootPath {
+ // If not registered on root then add specific mapping
+ if !newIsRegisteredOnRoot {
+ newIsRegisteredOnRoot = c.addHandler(each, newServeMux)
+ }
+ newServices = append(newServices, each)
+ }
+ }
+ c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot
+ return nil
+}
+
+// logStackOnRecover is the default RecoverHandleFunction and is called
+// when DoNotRecover is false and the recoverHandleFunc is not set for the container.
+// Default implementation logs the stacktrace and writes the stacktrace on the response.
+// This may be a security issue as it exposes sourcecode information.
+func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) {
+ var buffer bytes.Buffer
+ buffer.WriteString(fmt.Sprintf("[restful] recover from panic situation: - %v\r\n", panicReason))
+ for i := 2; ; i += 1 {
+ _, file, line, ok := runtime.Caller(i)
+ if !ok {
+ break
+ }
+ buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line))
+ }
+ log.Print(buffer.String())
+ httpWriter.WriteHeader(http.StatusInternalServerError)
+ httpWriter.Write(buffer.Bytes())
+}
+
+// writeServiceError is the default ServiceErrorHandleFunction and is called
+// when a ServiceError is returned during route selection. Default implementation
+// calls resp.WriteErrorString(err.Code, err.Message)
+func writeServiceError(err ServiceError, req *Request, resp *Response) {
+ resp.WriteErrorString(err.Code, err.Message)
+}
+
+// Dispatch the incoming Http Request to a matching WebService.
+func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
+ if httpWriter == nil {
+ panic("httpWriter cannot be nil")
+ }
+ if httpRequest == nil {
+ panic("httpRequest cannot be nil")
+ }
+ c.dispatch(httpWriter, httpRequest)
+}
+
+// Dispatch the incoming Http Request to a matching WebService.
+func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
+ writer := httpWriter
+
+ // CompressingResponseWriter should be closed after all operations are done
+ defer func() {
+ if compressWriter, ok := writer.(*CompressingResponseWriter); ok {
+ compressWriter.Close()
+ }
+ }()
+
+ // Instal panic recovery unless told otherwise
+ if !c.doNotRecover { // catch all for 500 response
+ defer func() {
+ if r := recover(); r != nil {
+ c.recoverHandleFunc(r, writer)
+ return
+ }
+ }()
+ }
+
+ // Detect if compression is needed
+ // assume without compression, test for override
+ if c.contentEncodingEnabled {
+ doCompress, encoding := wantsCompressedResponse(httpRequest)
+ if doCompress {
+ var err error
+ writer, err = NewCompressingResponseWriter(httpWriter, encoding)
+ if err != nil {
+ log.Print("[restful] unable to install compressor: ", err)
+ httpWriter.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ }
+ }
+ // Find best match Route ; err is non nil if no match was found
+ var webService *WebService
+ var route *Route
+ var err error
+ func() {
+ c.webServicesLock.RLock()
+ defer c.webServicesLock.RUnlock()
+ webService, route, err = c.router.SelectRoute(
+ c.webServices,
+ httpRequest)
+ }()
+ if err != nil {
+ // a non-200 response has already been written
+ // run container filters anyway ; they should not touch the response...
+ chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
+ switch err.(type) {
+ case ServiceError:
+ ser := err.(ServiceError)
+ c.serviceErrorHandleFunc(ser, req, resp)
+ }
+ // TODO
+ }}
+ chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer))
+ return
+ }
+ wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest)
+ // pass through filters (if any)
+ if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 {
+ // compose filter chain
+ allFilters := []FilterFunction{}
+ allFilters = append(allFilters, c.containerFilters...)
+ allFilters = append(allFilters, webService.filters...)
+ allFilters = append(allFilters, route.Filters...)
+ chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) {
+ // handle request by route after passing all filters
+ route.Function(wrappedRequest, wrappedResponse)
+ }}
+ chain.ProcessFilter(wrappedRequest, wrappedResponse)
+ } else {
+ // no filters, handle request by route
+ route.Function(wrappedRequest, wrappedResponse)
+ }
+}
+
+// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {}
+func fixedPrefixPath(pathspec string) string {
+ varBegin := strings.Index(pathspec, "{")
+ if -1 == varBegin {
+ return pathspec
+ }
+ return pathspec[:varBegin]
+}
+
+// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
+func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) {
+ c.ServeMux.ServeHTTP(httpwriter, httpRequest)
+}
+
+// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
+func (c *Container) Handle(pattern string, handler http.Handler) {
+ c.ServeMux.Handle(pattern, handler)
+}
+
+// HandleWithFilter registers the handler for the given pattern.
+// Container's filter chain is applied for handler.
+// If a handler already exists for pattern, HandleWithFilter panics.
+func (c *Container) HandleWithFilter(pattern string, handler http.Handler) {
+ f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) {
+ if len(c.containerFilters) == 0 {
+ handler.ServeHTTP(httpResponse, httpRequest)
+ return
+ }
+
+ chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
+ handler.ServeHTTP(httpResponse, httpRequest)
+ }}
+ chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse))
+ }
+
+ c.Handle(pattern, http.HandlerFunc(f))
+}
+
+// Filter appends a container FilterFunction. These are called before dispatching
+// a http.Request to a WebService from the container
+func (c *Container) Filter(filter FilterFunction) {
+ c.containerFilters = append(c.containerFilters, filter)
+}
+
+// RegisteredWebServices returns the collections of added WebServices
+func (c *Container) RegisteredWebServices() []*WebService {
+ c.webServicesLock.RLock()
+ defer c.webServicesLock.RUnlock()
+ result := make([]*WebService, len(c.webServices))
+ for ix := range c.webServices {
+ result[ix] = c.webServices[ix]
+ }
+ return result
+}
+
+// computeAllowedMethods returns a list of HTTP methods that are valid for a Request
+func (c *Container) computeAllowedMethods(req *Request) []string {
+ // Go through all RegisteredWebServices() and all its Routes to collect the options
+ methods := []string{}
+ requestPath := req.Request.URL.Path
+ for _, ws := range c.RegisteredWebServices() {
+ matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath)
+ if matches != nil {
+ finalMatch := matches[len(matches)-1]
+ for _, rt := range ws.Routes() {
+ matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch)
+ if matches != nil {
+ lastMatch := matches[len(matches)-1]
+ if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
+ methods = append(methods, rt.Method)
+ }
+ }
+ }
+ }
+ }
+ // methods = append(methods, "OPTIONS") not sure about this
+ return methods
+}
+
+// newBasicRequestResponse creates a pair of Request,Response from its http versions.
+// It is basic because no parameter or (produces) content-type information is given.
+func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
+ resp := NewResponse(httpWriter)
+ resp.requestAccept = httpRequest.Header.Get(HEADER_Accept)
+ return NewRequest(httpRequest), resp
+}
diff --git a/vendor/github.com/emicklei/go-restful/cors_filter.go b/vendor/github.com/emicklei/go-restful/cors_filter.go
new file mode 100644
index 000000000..1efeef072
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/cors_filter.go
@@ -0,0 +1,202 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// CrossOriginResourceSharing is used to create a Container Filter that implements CORS.
+// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page
+// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from.
+//
+// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
+// http://enable-cors.org/server.html
+// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
+type CrossOriginResourceSharing struct {
+ ExposeHeaders []string // list of Header names
+ AllowedHeaders []string // list of Header names
+ AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed.
+ AllowedMethods []string
+ MaxAge int // number of seconds before requiring new Options request
+ CookiesAllowed bool
+ Container *Container
+
+ allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check.
+}
+
+// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html
+// and http://www.html5rocks.com/static/images/cors_server_flowchart.png
+func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) {
+ origin := req.Request.Header.Get(HEADER_Origin)
+ if len(origin) == 0 {
+ if trace {
+ traceLogger.Print("no Http header Origin set")
+ }
+ chain.ProcessFilter(req, resp)
+ return
+ }
+ if !c.isOriginAllowed(origin) { // check whether this origin is allowed
+ if trace {
+ traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns)
+ }
+ chain.ProcessFilter(req, resp)
+ return
+ }
+ if req.Request.Method != "OPTIONS" {
+ c.doActualRequest(req, resp)
+ chain.ProcessFilter(req, resp)
+ return
+ }
+ if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" {
+ c.doPreflightRequest(req, resp)
+ } else {
+ c.doActualRequest(req, resp)
+ chain.ProcessFilter(req, resp)
+ return
+ }
+}
+
+func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) {
+ c.setOptionsHeaders(req, resp)
+ // continue processing the response
+}
+
+func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) {
+ if len(c.AllowedMethods) == 0 {
+ if c.Container == nil {
+ c.AllowedMethods = DefaultContainer.computeAllowedMethods(req)
+ } else {
+ c.AllowedMethods = c.Container.computeAllowedMethods(req)
+ }
+ }
+
+ acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod)
+ if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) {
+ if trace {
+ traceLogger.Printf("Http header %s:%s is not in %v",
+ HEADER_AccessControlRequestMethod,
+ acrm,
+ c.AllowedMethods)
+ }
+ return
+ }
+ acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
+ if len(acrhs) > 0 {
+ for _, each := range strings.Split(acrhs, ",") {
+ if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) {
+ if trace {
+ traceLogger.Printf("Http header %s:%s is not in %v",
+ HEADER_AccessControlRequestHeaders,
+ acrhs,
+ c.AllowedHeaders)
+ }
+ return
+ }
+ }
+ }
+ resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ","))
+ resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs)
+ c.setOptionsHeaders(req, resp)
+
+ // return http 200 response, no body
+}
+
+func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) {
+ c.checkAndSetExposeHeaders(resp)
+ c.setAllowOriginHeader(req, resp)
+ c.checkAndSetAllowCredentials(resp)
+ if c.MaxAge > 0 {
+ resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge))
+ }
+}
+
+func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
+ if len(origin) == 0 {
+ return false
+ }
+ if len(c.AllowedDomains) == 0 {
+ return true
+ }
+
+ allowed := false
+ for _, domain := range c.AllowedDomains {
+ if domain == origin {
+ allowed = true
+ break
+ }
+ }
+
+ if !allowed {
+ if len(c.allowedOriginPatterns) == 0 {
+ // compile allowed domains to allowed origin patterns
+ allowedOriginRegexps, err := compileRegexps(c.AllowedDomains)
+ if err != nil {
+ return false
+ }
+ c.allowedOriginPatterns = allowedOriginRegexps
+ }
+
+ for _, pattern := range c.allowedOriginPatterns {
+ if allowed = pattern.MatchString(origin); allowed {
+ break
+ }
+ }
+ }
+
+ return allowed
+}
+
+func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) {
+ origin := req.Request.Header.Get(HEADER_Origin)
+ if c.isOriginAllowed(origin) {
+ resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
+ }
+}
+
+func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) {
+ if len(c.ExposeHeaders) > 0 {
+ resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ","))
+ }
+}
+
+func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) {
+ if c.CookiesAllowed {
+ resp.AddHeader(HEADER_AccessControlAllowCredentials, "true")
+ }
+}
+
+func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool {
+ for _, each := range allowedMethods {
+ if each == method {
+ return true
+ }
+ }
+ return false
+}
+
+func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool {
+ for _, each := range c.AllowedHeaders {
+ if strings.ToLower(each) == strings.ToLower(header) {
+ return true
+ }
+ }
+ return false
+}
+
+// Take a list of strings and compile them into a list of regular expressions.
+func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
+ regexps := []*regexp.Regexp{}
+ for _, regexpStr := range regexpStrings {
+ r, err := regexp.Compile(regexpStr)
+ if err != nil {
+ return regexps, err
+ }
+ regexps = append(regexps, r)
+ }
+ return regexps, nil
+}
diff --git a/vendor/github.com/emicklei/go-restful/curly.go b/vendor/github.com/emicklei/go-restful/curly.go
new file mode 100644
index 000000000..79f1f5aa2
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/curly.go
@@ -0,0 +1,164 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "net/http"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets.
+type CurlyRouter struct{}
+
+// SelectRoute is part of the Router interface and returns the best match
+// for the WebService and its Route for the given Request.
+func (c CurlyRouter) SelectRoute(
+ webServices []*WebService,
+ httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) {
+
+ requestTokens := tokenizePath(httpRequest.URL.Path)
+
+ detectedService := c.detectWebService(requestTokens, webServices)
+ if detectedService == nil {
+ if trace {
+ traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path)
+ }
+ return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found")
+ }
+ candidateRoutes := c.selectRoutes(detectedService, requestTokens)
+ if len(candidateRoutes) == 0 {
+ if trace {
+ traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path)
+ }
+ return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found")
+ }
+ selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest)
+ if selectedRoute == nil {
+ return detectedService, nil, err
+ }
+ return detectedService, selectedRoute, nil
+}
+
+// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
+func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
+ candidates := sortableCurlyRoutes{}
+ for _, each := range ws.routes {
+ matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens)
+ if matches {
+ candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
+ }
+ }
+ sort.Sort(sort.Reverse(candidates))
+ return candidates
+}
+
+// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
+func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) {
+ if len(routeTokens) < len(requestTokens) {
+ // proceed in matching only if last routeToken is wildcard
+ count := len(routeTokens)
+ if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") {
+ return false, 0, 0
+ }
+ // proceed
+ }
+ for i, routeToken := range routeTokens {
+ if i == len(requestTokens) {
+ // reached end of request path
+ return false, 0, 0
+ }
+ requestToken := requestTokens[i]
+ if strings.HasPrefix(routeToken, "{") {
+ paramCount++
+ if colon := strings.Index(routeToken, ":"); colon != -1 {
+ // match by regex
+ matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken)
+ if !matchesToken {
+ return false, 0, 0
+ }
+ if matchesRemainder {
+ break
+ }
+ }
+ } else { // no { prefix
+ if requestToken != routeToken {
+ return false, 0, 0
+ }
+ staticCount++
+ }
+ }
+ return true, paramCount, staticCount
+}
+
+// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens
+// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]}
+func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) {
+ regPart := routeToken[colon+1 : len(routeToken)-1]
+ if regPart == "*" {
+ if trace {
+ traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken)
+ }
+ return true, true
+ }
+ matched, err := regexp.MatchString(regPart, requestToken)
+ return (matched && err == nil), false
+}
+
+var jsr311Router = RouterJSR311{}
+
+// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type
+// headers of the Request. See also RouterJSR311 in jsr311.go
+func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) {
+ // tracing is done inside detectRoute
+ return jsr311Router.detectRoute(candidateRoutes.routes(), httpRequest)
+}
+
+// detectWebService returns the best matching webService given the list of path tokens.
+// see also computeWebserviceScore
+func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
+ var best *WebService
+ score := -1
+ for _, each := range webServices {
+ matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
+ if matches && (eachScore > score) {
+ best = each
+ score = eachScore
+ }
+ }
+ return best
+}
+
+// computeWebserviceScore returns whether tokens match and
+// the weighted score of the longest matching consecutive tokens from the beginning.
+func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
+ if len(tokens) > len(requestTokens) {
+ return false, 0
+ }
+ score := 0
+ for i := 0; i < len(tokens); i++ {
+ each := requestTokens[i]
+ other := tokens[i]
+ if len(each) == 0 && len(other) == 0 {
+ score++
+ continue
+ }
+ if len(other) > 0 && strings.HasPrefix(other, "{") {
+ // no empty match
+ if len(each) == 0 {
+ return false, score
+ }
+ score += 1
+ } else {
+ // not a parameter
+ if each != other {
+ return false, score
+ }
+ score += (len(tokens) - i) * 10 //fuzzy
+ }
+ }
+ return true, score
+}
diff --git a/vendor/github.com/emicklei/go-restful/curly_route.go b/vendor/github.com/emicklei/go-restful/curly_route.go
new file mode 100644
index 000000000..296f94650
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/curly_route.go
@@ -0,0 +1,52 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements.
+type curlyRoute struct {
+ route Route
+ paramCount int
+ staticCount int
+}
+
+type sortableCurlyRoutes []curlyRoute
+
+func (s *sortableCurlyRoutes) add(route curlyRoute) {
+ *s = append(*s, route)
+}
+
+func (s sortableCurlyRoutes) routes() (routes []Route) {
+ for _, each := range s {
+ routes = append(routes, each.route) // TODO change return type
+ }
+ return routes
+}
+
+func (s sortableCurlyRoutes) Len() int {
+ return len(s)
+}
+func (s sortableCurlyRoutes) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+func (s sortableCurlyRoutes) Less(i, j int) bool {
+ ci := s[i]
+ cj := s[j]
+
+ // primary key
+ if ci.staticCount < cj.staticCount {
+ return true
+ }
+ if ci.staticCount > cj.staticCount {
+ return false
+ }
+ // secundary key
+ if ci.paramCount < cj.paramCount {
+ return true
+ }
+ if ci.paramCount > cj.paramCount {
+ return false
+ }
+ return ci.route.Path < cj.route.Path
+}
diff --git a/vendor/github.com/emicklei/go-restful/doc.go b/vendor/github.com/emicklei/go-restful/doc.go
new file mode 100644
index 000000000..f7c16b01f
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/doc.go
@@ -0,0 +1,185 @@
+/*
+Package restful , a lean package for creating REST-style WebServices without magic.
+
+WebServices and Routes
+
+A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls.
+Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes.
+WebServices must be added to a container (see below) in order to handler Http requests from a server.
+
+A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept).
+This package has the logic to find the best matching Route and if found, call its Function.
+
+ ws := new(restful.WebService)
+ ws.
+ Path("/users").
+ Consumes(restful.MIME_JSON, restful.MIME_XML).
+ Produces(restful.MIME_JSON, restful.MIME_XML)
+
+ ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource
+
+ ...
+
+ // GET http://localhost:8080/users/1
+ func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
+ id := request.PathParameter("user-id")
+ ...
+ }
+
+The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response.
+
+See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation.
+
+Regular expression matching Routes
+
+A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path.
+For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters.
+Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax)
+This feature requires the use of a CurlyRouter.
+
+Containers
+
+A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests.
+Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container.
+The Default container of go-restful uses the http.DefaultServeMux.
+You can create your own Container and create a new http.Server for that particular container.
+
+ container := restful.NewContainer()
+ server := &http.Server{Addr: ":8081", Handler: container}
+
+Filters
+
+A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses.
+You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc.
+In the restful package there are three hooks into the request,response flow where filters can be added.
+Each filter must define a FilterFunction:
+
+ func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain)
+
+Use the following statement to pass the request,response pair to the next filter or RouteFunction
+
+ chain.ProcessFilter(req, resp)
+
+Container Filters
+
+These are processed before any registered WebService.
+
+ // install a (global) filter for the default container (processed before any webservice)
+ restful.Filter(globalLogging)
+
+WebService Filters
+
+These are processed before any Route of a WebService.
+
+ // install a webservice filter (processed before any route)
+ ws.Filter(webserviceLogging).Filter(measureTime)
+
+
+Route Filters
+
+These are processed before calling the function associated with the Route.
+
+ // install 2 chained route filters (processed before calling findUser)
+ ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
+
+See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations.
+
+Response Encoding
+
+Two encodings are supported: gzip and deflate. To enable this for all responses:
+
+ restful.DefaultContainer.EnableContentEncoding(true)
+
+If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding.
+Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route.
+
+See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go
+
+OPTIONS support
+
+By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request.
+
+ Filter(OPTIONSFilter())
+
+CORS
+
+By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests.
+
+ cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
+ Filter(cors.Filter)
+
+Error Handling
+
+Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why.
+For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation.
+
+ 400: Bad Request
+
+If path or query parameters are not valid (content or type) then use http.StatusBadRequest.
+
+ 404: Not Found
+
+Despite a valid URI, the resource requested may not be available
+
+ 500: Internal Server Error
+
+If the application logic could not process the request (or write the response) then use http.StatusInternalServerError.
+
+ 405: Method Not Allowed
+
+The request has a valid URL but the method (GET,PUT,POST,...) is not allowed.
+
+ 406: Not Acceptable
+
+The request does not have or has an unknown Accept Header set for this operation.
+
+ 415: Unsupported Media Type
+
+The request does not have or has an unknown Content-Type Header set for this operation.
+
+ServiceError
+
+In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response.
+
+Performance options
+
+This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
+
+ restful.DefaultContainer.DoNotRecover(false)
+
+DoNotRecover controls whether panics will be caught to return HTTP 500.
+If set to false, the container will recover from panics.
+Default value is true
+
+ restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20))
+
+If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool.
+Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation.
+
+Trouble shooting
+
+This package has the means to produce detail logging of the complete Http request matching process and filter invocation.
+Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as:
+
+ restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile))
+
+Logging
+
+The restful.SetLogger() method allows you to override the logger used by the package. By default restful
+uses the standard library `log` package and logs to stdout. Different logging packages are supported as
+long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your
+preferred package is simple.
+
+Resources
+
+[project]: https://github.com/emicklei/go-restful
+
+[examples]: https://github.com/emicklei/go-restful/blob/master/examples
+
+[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/
+
+[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape
+
+(c) 2012-2015, http://ernestmicklei.com. MIT License
+*/
+package restful
diff --git a/vendor/github.com/emicklei/go-restful/entity_accessors.go b/vendor/github.com/emicklei/go-restful/entity_accessors.go
new file mode 100644
index 000000000..6ecf6c7f8
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/entity_accessors.go
@@ -0,0 +1,163 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "encoding/json"
+ "encoding/xml"
+ "strings"
+ "sync"
+)
+
+// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
+type EntityReaderWriter interface {
+ // Read a serialized version of the value from the request.
+ // The Request may have a decompressing reader. Depends on Content-Encoding.
+ Read(req *Request, v interface{}) error
+
+ // Write a serialized version of the value on the response.
+ // The Response may have a compressing writer. Depends on Accept-Encoding.
+ // status should be a valid Http Status code
+ Write(resp *Response, status int, v interface{}) error
+}
+
+// entityAccessRegistry is a singleton
+var entityAccessRegistry = &entityReaderWriters{
+ protection: new(sync.RWMutex),
+ accessors: map[string]EntityReaderWriter{},
+}
+
+// entityReaderWriters associates MIME to an EntityReaderWriter
+type entityReaderWriters struct {
+ protection *sync.RWMutex
+ accessors map[string]EntityReaderWriter
+}
+
+func init() {
+ RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON))
+ RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML))
+}
+
+// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type.
+func RegisterEntityAccessor(mime string, erw EntityReaderWriter) {
+ entityAccessRegistry.protection.Lock()
+ defer entityAccessRegistry.protection.Unlock()
+ entityAccessRegistry.accessors[mime] = erw
+}
+
+// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content.
+// This package is already initialized with such an accessor using the MIME_JSON contentType.
+func NewEntityAccessorJSON(contentType string) EntityReaderWriter {
+ return entityJSONAccess{ContentType: contentType}
+}
+
+// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content.
+// This package is already initialized with such an accessor using the MIME_XML contentType.
+func NewEntityAccessorXML(contentType string) EntityReaderWriter {
+ return entityXMLAccess{ContentType: contentType}
+}
+
+// accessorAt returns the registered ReaderWriter for this MIME type.
+func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) {
+ r.protection.RLock()
+ defer r.protection.RUnlock()
+ er, ok := r.accessors[mime]
+ if !ok {
+ // retry with reverse lookup
+ // more expensive but we are in an exceptional situation anyway
+ for k, v := range r.accessors {
+ if strings.Contains(mime, k) {
+ return v, true
+ }
+ }
+ }
+ return er, ok
+}
+
+// entityXMLAccess is a EntityReaderWriter for XML encoding
+type entityXMLAccess struct {
+ // This is used for setting the Content-Type header when writing
+ ContentType string
+}
+
+// Read unmarshalls the value from XML
+func (e entityXMLAccess) Read(req *Request, v interface{}) error {
+ return xml.NewDecoder(req.Request.Body).Decode(v)
+}
+
+// Write marshalls the value to JSON and set the Content-Type Header.
+func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error {
+ return writeXML(resp, status, e.ContentType, v)
+}
+
+// writeXML marshalls the value to JSON and set the Content-Type Header.
+func writeXML(resp *Response, status int, contentType string, v interface{}) error {
+ if v == nil {
+ resp.WriteHeader(status)
+ // do not write a nil representation
+ return nil
+ }
+ if resp.prettyPrint {
+ // pretty output must be created and written explicitly
+ output, err := xml.MarshalIndent(v, " ", " ")
+ if err != nil {
+ return err
+ }
+ resp.Header().Set(HEADER_ContentType, contentType)
+ resp.WriteHeader(status)
+ _, err = resp.Write([]byte(xml.Header))
+ if err != nil {
+ return err
+ }
+ _, err = resp.Write(output)
+ return err
+ }
+ // not-so-pretty
+ resp.Header().Set(HEADER_ContentType, contentType)
+ resp.WriteHeader(status)
+ return xml.NewEncoder(resp).Encode(v)
+}
+
+// entityJSONAccess is a EntityReaderWriter for JSON encoding
+type entityJSONAccess struct {
+ // This is used for setting the Content-Type header when writing
+ ContentType string
+}
+
+// Read unmarshalls the value from JSON
+func (e entityJSONAccess) Read(req *Request, v interface{}) error {
+ decoder := json.NewDecoder(req.Request.Body)
+ decoder.UseNumber()
+ return decoder.Decode(v)
+}
+
+// Write marshalls the value to JSON and set the Content-Type Header.
+func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error {
+ return writeJSON(resp, status, e.ContentType, v)
+}
+
+// write marshalls the value to JSON and set the Content-Type Header.
+func writeJSON(resp *Response, status int, contentType string, v interface{}) error {
+ if v == nil {
+ resp.WriteHeader(status)
+ // do not write a nil representation
+ return nil
+ }
+ if resp.prettyPrint {
+ // pretty output must be created and written explicitly
+ output, err := json.MarshalIndent(v, " ", " ")
+ if err != nil {
+ return err
+ }
+ resp.Header().Set(HEADER_ContentType, contentType)
+ resp.WriteHeader(status)
+ _, err = resp.Write(output)
+ return err
+ }
+ // not-so-pretty
+ resp.Header().Set(HEADER_ContentType, contentType)
+ resp.WriteHeader(status)
+ return json.NewEncoder(resp).Encode(v)
+}
diff --git a/vendor/github.com/emicklei/go-restful/filter.go b/vendor/github.com/emicklei/go-restful/filter.go
new file mode 100644
index 000000000..c23bfb591
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/filter.go
@@ -0,0 +1,35 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction.
+type FilterChain struct {
+ Filters []FilterFunction // ordered list of FilterFunction
+ Index int // index into filters that is currently in progress
+ Target RouteFunction // function to call after passing all filters
+}
+
+// ProcessFilter passes the request,response pair through the next of Filters.
+// Each filter can decide to proceed to the next Filter or handle the Response itself.
+func (f *FilterChain) ProcessFilter(request *Request, response *Response) {
+ if f.Index < len(f.Filters) {
+ f.Index++
+ f.Filters[f.Index-1](request, response, f)
+ } else {
+ f.Target(request, response)
+ }
+}
+
+// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction
+type FilterFunction func(*Request, *Response, *FilterChain)
+
+// NoBrowserCacheFilter is a filter function to set HTTP headers that disable browser caching
+// See examples/restful-no-cache-filter.go for usage
+func NoBrowserCacheFilter(req *Request, resp *Response, chain *FilterChain) {
+ resp.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1.
+ resp.Header().Set("Pragma", "no-cache") // HTTP 1.0.
+ resp.Header().Set("Expires", "0") // Proxies.
+ chain.ProcessFilter(req, resp)
+}
diff --git a/vendor/github.com/emicklei/go-restful/jsr311.go b/vendor/github.com/emicklei/go-restful/jsr311.go
new file mode 100644
index 000000000..511444ac6
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/jsr311.go
@@ -0,0 +1,248 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "sort"
+)
+
+// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions)
+// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html.
+// RouterJSR311 implements the Router interface.
+// Concept of locators is not implemented.
+type RouterJSR311 struct{}
+
+// SelectRoute is part of the Router interface and returns the best match
+// for the WebService and its Route for the given Request.
+func (r RouterJSR311) SelectRoute(
+ webServices []*WebService,
+ httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) {
+
+ // Identify the root resource class (WebService)
+ dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices)
+ if err != nil {
+ return nil, nil, NewError(http.StatusNotFound, "")
+ }
+ // Obtain the set of candidate methods (Routes)
+ routes := r.selectRoutes(dispatcher, finalMatch)
+ if len(routes) == 0 {
+ return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found")
+ }
+
+ // Identify the method (Route) that will handle the request
+ route, ok := r.detectRoute(routes, httpRequest)
+ return dispatcher, route, ok
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
+func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
+ // http method
+ methodOk := []Route{}
+ for _, each := range routes {
+ if httpRequest.Method == each.Method {
+ methodOk = append(methodOk, each)
+ }
+ }
+ if len(methodOk) == 0 {
+ if trace {
+ traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(routes), httpRequest.Method)
+ }
+ return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed")
+ }
+ inputMediaOk := methodOk
+
+ // content-type
+ contentType := httpRequest.Header.Get(HEADER_ContentType)
+ inputMediaOk = []Route{}
+ for _, each := range methodOk {
+ if each.matchesContentType(contentType) {
+ inputMediaOk = append(inputMediaOk, each)
+ }
+ }
+ if len(inputMediaOk) == 0 {
+ if trace {
+ traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(methodOk), contentType)
+ }
+ return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
+ }
+
+ // accept
+ outputMediaOk := []Route{}
+ accept := httpRequest.Header.Get(HEADER_Accept)
+ if len(accept) == 0 {
+ accept = "*/*"
+ }
+ for _, each := range inputMediaOk {
+ if each.matchesAccept(accept) {
+ outputMediaOk = append(outputMediaOk, each)
+ }
+ }
+ if len(outputMediaOk) == 0 {
+ if trace {
+ traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(inputMediaOk), accept)
+ }
+ return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable")
+ }
+ // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
+ return &outputMediaOk[0], nil
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
+// n/m > n/* > */*
+func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route {
+ // TODO
+ return &routes[0]
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 2)
+func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route {
+ filtered := &sortableRouteCandidates{}
+ for _, each := range dispatcher.Routes() {
+ pathExpr := each.pathExpr
+ matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder)
+ if matches != nil {
+ lastMatch := matches[len(matches)-1]
+ if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
+ filtered.candidates = append(filtered.candidates,
+ routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount})
+ }
+ }
+ }
+ if len(filtered.candidates) == 0 {
+ if trace {
+ traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder)
+ }
+ return []Route{}
+ }
+ sort.Sort(sort.Reverse(filtered))
+
+ // select other routes from candidates whoes expression matches rmatch
+ matchingRoutes := []Route{filtered.candidates[0].route}
+ for c := 1; c < len(filtered.candidates); c++ {
+ each := filtered.candidates[c]
+ if each.route.pathExpr.Matcher.MatchString(pathRemainder) {
+ matchingRoutes = append(matchingRoutes, each.route)
+ }
+ }
+ return matchingRoutes
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1)
+func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) {
+ filtered := &sortableDispatcherCandidates{}
+ for _, each := range dispatchers {
+ matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath)
+ if matches != nil {
+ filtered.candidates = append(filtered.candidates,
+ dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount})
+ }
+ }
+ if len(filtered.candidates) == 0 {
+ if trace {
+ traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath)
+ }
+ return nil, "", errors.New("not found")
+ }
+ sort.Sort(sort.Reverse(filtered))
+ return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil
+}
+
+// Types and functions to support the sorting of Routes
+
+type routeCandidate struct {
+ route Route
+ matchesCount int // the number of capturing groups
+ literalCount int // the number of literal characters (means those not resulting from template variable substitution)
+ nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’)
+}
+
+func (r routeCandidate) expressionToMatch() string {
+ return r.route.pathExpr.Source
+}
+
+func (r routeCandidate) String() string {
+ return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount)
+}
+
+type sortableRouteCandidates struct {
+ candidates []routeCandidate
+}
+
+func (rcs *sortableRouteCandidates) Len() int {
+ return len(rcs.candidates)
+}
+func (rcs *sortableRouteCandidates) Swap(i, j int) {
+ rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i]
+}
+func (rcs *sortableRouteCandidates) Less(i, j int) bool {
+ ci := rcs.candidates[i]
+ cj := rcs.candidates[j]
+ // primary key
+ if ci.literalCount < cj.literalCount {
+ return true
+ }
+ if ci.literalCount > cj.literalCount {
+ return false
+ }
+ // secundary key
+ if ci.matchesCount < cj.matchesCount {
+ return true
+ }
+ if ci.matchesCount > cj.matchesCount {
+ return false
+ }
+ // tertiary key
+ if ci.nonDefaultCount < cj.nonDefaultCount {
+ return true
+ }
+ if ci.nonDefaultCount > cj.nonDefaultCount {
+ return false
+ }
+ // quaternary key ("source" is interpreted as Path)
+ return ci.route.Path < cj.route.Path
+}
+
+// Types and functions to support the sorting of Dispatchers
+
+type dispatcherCandidate struct {
+ dispatcher *WebService
+ finalMatch string
+ matchesCount int // the number of capturing groups
+ literalCount int // the number of literal characters (means those not resulting from template variable substitution)
+ nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’)
+}
+type sortableDispatcherCandidates struct {
+ candidates []dispatcherCandidate
+}
+
+func (dc *sortableDispatcherCandidates) Len() int {
+ return len(dc.candidates)
+}
+func (dc *sortableDispatcherCandidates) Swap(i, j int) {
+ dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i]
+}
+func (dc *sortableDispatcherCandidates) Less(i, j int) bool {
+ ci := dc.candidates[i]
+ cj := dc.candidates[j]
+ // primary key
+ if ci.matchesCount < cj.matchesCount {
+ return true
+ }
+ if ci.matchesCount > cj.matchesCount {
+ return false
+ }
+ // secundary key
+ if ci.literalCount < cj.literalCount {
+ return true
+ }
+ if ci.literalCount > cj.literalCount {
+ return false
+ }
+ // tertiary key
+ return ci.nonDefaultCount < cj.nonDefaultCount
+}
diff --git a/vendor/github.com/emicklei/go-restful/log/log.go b/vendor/github.com/emicklei/go-restful/log/log.go
new file mode 100644
index 000000000..6cd44c7a5
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/log/log.go
@@ -0,0 +1,34 @@
+package log
+
+import (
+ stdlog "log"
+ "os"
+)
+
+// StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
+type StdLogger interface {
+ Print(v ...interface{})
+ Printf(format string, v ...interface{})
+}
+
+var Logger StdLogger
+
+func init() {
+ // default Logger
+ SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile))
+}
+
+// SetLogger sets the logger for this package
+func SetLogger(customLogger StdLogger) {
+ Logger = customLogger
+}
+
+// Print delegates to the Logger
+func Print(v ...interface{}) {
+ Logger.Print(v...)
+}
+
+// Printf delegates to the Logger
+func Printf(format string, v ...interface{}) {
+ Logger.Printf(format, v...)
+}
diff --git a/vendor/github.com/emicklei/go-restful/logger.go b/vendor/github.com/emicklei/go-restful/logger.go
new file mode 100644
index 000000000..3f1c4db86
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/logger.go
@@ -0,0 +1,32 @@
+package restful
+
+// Copyright 2014 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+import (
+ "github.com/emicklei/go-restful/log"
+)
+
+var trace bool = false
+var traceLogger log.StdLogger
+
+func init() {
+ traceLogger = log.Logger // use the package logger by default
+}
+
+// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set.
+// You may call EnableTracing() directly to enable trace logging to the package-wide logger.
+func TraceLogger(logger log.StdLogger) {
+ traceLogger = logger
+ EnableTracing(logger != nil)
+}
+
+// expose the setter for the global logger on the top-level package
+func SetLogger(customLogger log.StdLogger) {
+ log.SetLogger(customLogger)
+}
+
+// EnableTracing can be used to Trace logging on and off.
+func EnableTracing(enabled bool) {
+ trace = enabled
+}
diff --git a/vendor/github.com/emicklei/go-restful/mime.go b/vendor/github.com/emicklei/go-restful/mime.go
new file mode 100644
index 000000000..d7ea2b615
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/mime.go
@@ -0,0 +1,45 @@
+package restful
+
+import (
+ "strconv"
+ "strings"
+)
+
+type mime struct {
+ media string
+ quality float64
+}
+
+// insertMime adds a mime to a list and keeps it sorted by quality.
+func insertMime(l []mime, e mime) []mime {
+ for i, each := range l {
+ // if current mime has lower quality then insert before
+ if e.quality > each.quality {
+ left := append([]mime{}, l[0:i]...)
+ return append(append(left, e), l[i:]...)
+ }
+ }
+ return append(l, e)
+}
+
+// sortedMimes returns a list of mime sorted (desc) by its specified quality.
+func sortedMimes(accept string) (sorted []mime) {
+ for _, each := range strings.Split(accept, ",") {
+ typeAndQuality := strings.Split(strings.Trim(each, " "), ";")
+ if len(typeAndQuality) == 1 {
+ sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
+ } else {
+ // take factor
+ parts := strings.Split(typeAndQuality[1], "=")
+ if len(parts) == 2 {
+ f, err := strconv.ParseFloat(parts[1], 64)
+ if err != nil {
+ traceLogger.Printf("unable to parse quality in %s, %v", each, err)
+ } else {
+ sorted = insertMime(sorted, mime{typeAndQuality[0], f})
+ }
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/emicklei/go-restful/options_filter.go b/vendor/github.com/emicklei/go-restful/options_filter.go
new file mode 100644
index 000000000..4514eadcf
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/options_filter.go
@@ -0,0 +1,26 @@
+package restful
+
+import "strings"
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
+// and provides the response with a set of allowed methods for the request URL Path.
+// As for any filter, you can also install it for a particular WebService within a Container.
+// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
+func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) {
+ if "OPTIONS" != req.Request.Method {
+ chain.ProcessFilter(req, resp)
+ return
+ }
+ resp.AddHeader(HEADER_Allow, strings.Join(c.computeAllowedMethods(req), ","))
+}
+
+// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
+// and provides the response with a set of allowed methods for the request URL Path.
+// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
+func OPTIONSFilter() FilterFunction {
+ return DefaultContainer.OPTIONSFilter
+}
diff --git a/vendor/github.com/emicklei/go-restful/parameter.go b/vendor/github.com/emicklei/go-restful/parameter.go
new file mode 100644
index 000000000..e11c8162a
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/parameter.go
@@ -0,0 +1,114 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+const (
+ // PathParameterKind = indicator of Request parameter type "path"
+ PathParameterKind = iota
+
+ // QueryParameterKind = indicator of Request parameter type "query"
+ QueryParameterKind
+
+ // BodyParameterKind = indicator of Request parameter type "body"
+ BodyParameterKind
+
+ // HeaderParameterKind = indicator of Request parameter type "header"
+ HeaderParameterKind
+
+ // FormParameterKind = indicator of Request parameter type "form"
+ FormParameterKind
+)
+
+// Parameter is for documententing the parameter used in a Http Request
+// ParameterData kinds are Path,Query and Body
+type Parameter struct {
+ data *ParameterData
+}
+
+// ParameterData represents the state of a Parameter.
+// It is made public to make it accessible to e.g. the Swagger package.
+type ParameterData struct {
+ Name, Description, DataType, DataFormat string
+ Kind int
+ Required bool
+ AllowableValues map[string]string
+ AllowMultiple bool
+ DefaultValue string
+}
+
+// Data returns the state of the Parameter
+func (p *Parameter) Data() ParameterData {
+ return *p.data
+}
+
+// Kind returns the parameter type indicator (see const for valid values)
+func (p *Parameter) Kind() int {
+ return p.data.Kind
+}
+
+func (p *Parameter) bePath() *Parameter {
+ p.data.Kind = PathParameterKind
+ return p
+}
+func (p *Parameter) beQuery() *Parameter {
+ p.data.Kind = QueryParameterKind
+ return p
+}
+func (p *Parameter) beBody() *Parameter {
+ p.data.Kind = BodyParameterKind
+ return p
+}
+
+func (p *Parameter) beHeader() *Parameter {
+ p.data.Kind = HeaderParameterKind
+ return p
+}
+
+func (p *Parameter) beForm() *Parameter {
+ p.data.Kind = FormParameterKind
+ return p
+}
+
+// Required sets the required field and returns the receiver
+func (p *Parameter) Required(required bool) *Parameter {
+ p.data.Required = required
+ return p
+}
+
+// AllowMultiple sets the allowMultiple field and returns the receiver
+func (p *Parameter) AllowMultiple(multiple bool) *Parameter {
+ p.data.AllowMultiple = multiple
+ return p
+}
+
+// AllowableValues sets the allowableValues field and returns the receiver
+func (p *Parameter) AllowableValues(values map[string]string) *Parameter {
+ p.data.AllowableValues = values
+ return p
+}
+
+// DataType sets the dataType field and returns the receiver
+func (p *Parameter) DataType(typeName string) *Parameter {
+ p.data.DataType = typeName
+ return p
+}
+
+// DataFormat sets the dataFormat field for Swagger UI
+func (p *Parameter) DataFormat(formatName string) *Parameter {
+ p.data.DataFormat = formatName
+ return p
+}
+
+// DefaultValue sets the default value field and returns the receiver
+func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter {
+ p.data.DefaultValue = stringRepresentation
+ return p
+}
+
+// Description sets the description value field and returns the receiver
+func (p *Parameter) Description(doc string) *Parameter {
+ p.data.Description = doc
+ return p
+}
diff --git a/vendor/github.com/emicklei/go-restful/path_expression.go b/vendor/github.com/emicklei/go-restful/path_expression.go
new file mode 100644
index 000000000..a921e6f22
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/path_expression.go
@@ -0,0 +1,69 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// PathExpression holds a compiled path expression (RegExp) needed to match against
+// Http request paths and to extract path parameter values.
+type pathExpression struct {
+ LiteralCount int // the number of literal characters (means those not resulting from template variable substitution)
+ VarCount int // the number of named parameters (enclosed by {}) in the path
+ Matcher *regexp.Regexp
+ Source string // Path as defined by the RouteBuilder
+ tokens []string
+}
+
+// NewPathExpression creates a PathExpression from the input URL path.
+// Returns an error if the path is invalid.
+func newPathExpression(path string) (*pathExpression, error) {
+ expression, literalCount, varCount, tokens := templateToRegularExpression(path)
+ compiled, err := regexp.Compile(expression)
+ if err != nil {
+ return nil, err
+ }
+ return &pathExpression{literalCount, varCount, compiled, expression, tokens}, nil
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3
+func templateToRegularExpression(template string) (expression string, literalCount int, varCount int, tokens []string) {
+ var buffer bytes.Buffer
+ buffer.WriteString("^")
+ //tokens = strings.Split(template, "/")
+ tokens = tokenizePath(template)
+ for _, each := range tokens {
+ if each == "" {
+ continue
+ }
+ buffer.WriteString("/")
+ if strings.HasPrefix(each, "{") {
+ // check for regular expression in variable
+ colon := strings.Index(each, ":")
+ if colon != -1 {
+ // extract expression
+ paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1])
+ if paramExpr == "*" { // special case
+ buffer.WriteString("(.*)")
+ } else {
+ buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache
+ }
+ } else {
+ // plain var
+ buffer.WriteString("([^/]+?)")
+ }
+ varCount += 1
+ } else {
+ literalCount += len(each)
+ encoded := each // TODO URI encode
+ buffer.WriteString(regexp.QuoteMeta(encoded))
+ }
+ }
+ return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varCount, tokens
+}
diff --git a/vendor/github.com/emicklei/go-restful/request.go b/vendor/github.com/emicklei/go-restful/request.go
new file mode 100644
index 000000000..8c23af12c
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/request.go
@@ -0,0 +1,113 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "compress/zlib"
+ "net/http"
+)
+
+var defaultRequestContentType string
+
+// Request is a wrapper for a http Request that provides convenience methods
+type Request struct {
+ Request *http.Request
+ pathParameters map[string]string
+ attributes map[string]interface{} // for storing request-scoped values
+ selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees
+}
+
+func NewRequest(httpRequest *http.Request) *Request {
+ return &Request{
+ Request: httpRequest,
+ pathParameters: map[string]string{},
+ attributes: map[string]interface{}{},
+ } // empty parameters, attributes
+}
+
+// If ContentType is missing or */* is given then fall back to this type, otherwise
+// a "Unable to unmarshal content of type:" response is returned.
+// Valid values are restful.MIME_JSON and restful.MIME_XML
+// Example:
+// restful.DefaultRequestContentType(restful.MIME_JSON)
+func DefaultRequestContentType(mime string) {
+ defaultRequestContentType = mime
+}
+
+// PathParameter accesses the Path parameter value by its name
+func (r *Request) PathParameter(name string) string {
+ return r.pathParameters[name]
+}
+
+// PathParameters accesses the Path parameter values
+func (r *Request) PathParameters() map[string]string {
+ return r.pathParameters
+}
+
+// QueryParameter returns the (first) Query parameter value by its name
+func (r *Request) QueryParameter(name string) string {
+ return r.Request.FormValue(name)
+}
+
+// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error.
+func (r *Request) BodyParameter(name string) (string, error) {
+ err := r.Request.ParseForm()
+ if err != nil {
+ return "", err
+ }
+ return r.Request.PostFormValue(name), nil
+}
+
+// HeaderParameter returns the HTTP Header value of a Header name or empty if missing
+func (r *Request) HeaderParameter(name string) string {
+ return r.Request.Header.Get(name)
+}
+
+// ReadEntity checks the Accept header and reads the content into the entityPointer.
+func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
+ contentType := r.Request.Header.Get(HEADER_ContentType)
+ contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding)
+
+ // check if the request body needs decompression
+ if ENCODING_GZIP == contentEncoding {
+ gzipReader := currentCompressorProvider.AcquireGzipReader()
+ defer currentCompressorProvider.ReleaseGzipReader(gzipReader)
+ gzipReader.Reset(r.Request.Body)
+ r.Request.Body = gzipReader
+ } else if ENCODING_DEFLATE == contentEncoding {
+ zlibReader, err := zlib.NewReader(r.Request.Body)
+ if err != nil {
+ return err
+ }
+ r.Request.Body = zlibReader
+ }
+
+ // lookup the EntityReader, use defaultRequestContentType if needed and provided
+ entityReader, ok := entityAccessRegistry.accessorAt(contentType)
+ if !ok {
+ if len(defaultRequestContentType) != 0 {
+ entityReader, ok = entityAccessRegistry.accessorAt(defaultRequestContentType)
+ }
+ if !ok {
+ return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType)
+ }
+ }
+ return entityReader.Read(r, entityPointer)
+}
+
+// SetAttribute adds or replaces the attribute with the given value.
+func (r *Request) SetAttribute(name string, value interface{}) {
+ r.attributes[name] = value
+}
+
+// Attribute returns the value associated to the given name. Returns nil if absent.
+func (r Request) Attribute(name string) interface{} {
+ return r.attributes[name]
+}
+
+// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees
+func (r Request) SelectedRoutePath() string {
+ return r.selectedRoutePath
+}
diff --git a/vendor/github.com/emicklei/go-restful/response.go b/vendor/github.com/emicklei/go-restful/response.go
new file mode 100644
index 000000000..3b33ab220
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/response.go
@@ -0,0 +1,236 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "errors"
+ "net/http"
+)
+
+// DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime)
+var DefaultResponseMimeType string
+
+//PrettyPrintResponses controls the indentation feature of XML and JSON serialization
+var PrettyPrintResponses = true
+
+// Response is a wrapper on the actual http ResponseWriter
+// It provides several convenience methods to prepare and write response content.
+type Response struct {
+ http.ResponseWriter
+ requestAccept string // mime-type what the Http Request says it wants to receive
+ routeProduces []string // mime-types what the Route says it can produce
+ statusCode int // HTTP status code that has been written explicity (if zero then net/http has written 200)
+ contentLength int // number of bytes written for the response body
+ prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses.
+ err error // err property is kept when WriteError is called
+}
+
+// NewResponse creates a new response based on a http ResponseWriter.
+func NewResponse(httpWriter http.ResponseWriter) *Response {
+ return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types
+}
+
+// DefaultResponseContentType set a default.
+// If Accept header matching fails, fall back to this type.
+// Valid values are restful.MIME_JSON and restful.MIME_XML
+// Example:
+// restful.DefaultResponseContentType(restful.MIME_JSON)
+func DefaultResponseContentType(mime string) {
+ DefaultResponseMimeType = mime
+}
+
+// InternalServerError writes the StatusInternalServerError header.
+// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason)
+func (r Response) InternalServerError() Response {
+ r.WriteHeader(http.StatusInternalServerError)
+ return r
+}
+
+// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output.
+func (r *Response) PrettyPrint(bePretty bool) {
+ r.prettyPrint = bePretty
+}
+
+// AddHeader is a shortcut for .Header().Add(header,value)
+func (r Response) AddHeader(header string, value string) Response {
+ r.Header().Add(header, value)
+ return r
+}
+
+// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing.
+func (r *Response) SetRequestAccepts(mime string) {
+ r.requestAccept = mime
+}
+
+// EntityWriter returns the registered EntityWriter that the entity (requested resource)
+// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say.
+// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable.
+func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
+ sorted := sortedMimes(r.requestAccept)
+ for _, eachAccept := range sorted {
+ for _, eachProduce := range r.routeProduces {
+ if eachProduce == eachAccept.media {
+ if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok {
+ return w, true
+ }
+ }
+ }
+ if eachAccept.media == "*/*" {
+ for _, each := range r.routeProduces {
+ if w, ok := entityAccessRegistry.accessorAt(each); ok {
+ return w, true
+ }
+ }
+ }
+ }
+ // if requestAccept is empty
+ writer, ok := entityAccessRegistry.accessorAt(r.requestAccept)
+ if !ok {
+ // if not registered then fallback to the defaults (if set)
+ if DefaultResponseMimeType == MIME_JSON {
+ return entityAccessRegistry.accessorAt(MIME_JSON)
+ }
+ if DefaultResponseMimeType == MIME_XML {
+ return entityAccessRegistry.accessorAt(MIME_XML)
+ }
+ // Fallback to whatever the route says it can produce.
+ // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+ for _, each := range r.routeProduces {
+ if w, ok := entityAccessRegistry.accessorAt(each); ok {
+ return w, true
+ }
+ }
+ if trace {
+ traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept)
+ }
+ }
+ return writer, ok
+}
+
+// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200)
+func (r *Response) WriteEntity(value interface{}) error {
+ return r.WriteHeaderAndEntity(http.StatusOK, value)
+}
+
+// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters.
+// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces.
+// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header.
+// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead.
+// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written.
+// Current implementation ignores any q-parameters in the Accept Header.
+// Returns an error if the value could not be written on the response.
+func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error {
+ writer, ok := r.EntityWriter()
+ if !ok {
+ r.WriteHeader(http.StatusNotAcceptable)
+ return nil
+ }
+ return writer.Write(r, status, value)
+}
+
+// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value)
+// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteAsXml(value interface{}) error {
+ return writeXML(r, http.StatusOK, MIME_XML, value)
+}
+
+// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value)
+// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteHeaderAndXml(status int, value interface{}) error {
+ return writeXML(r, status, MIME_XML, value)
+}
+
+// WriteAsJson is a convenience method for writing a value in json.
+// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteAsJson(value interface{}) error {
+ return writeJSON(r, http.StatusOK, MIME_JSON, value)
+}
+
+// WriteJson is a convenience method for writing a value in Json with a given Content-Type.
+// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteJson(value interface{}, contentType string) error {
+ return writeJSON(r, http.StatusOK, contentType, value)
+}
+
+// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type.
+// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error {
+ return writeJSON(r, status, contentType, value)
+}
+
+// WriteError write the http status and the error string on the response.
+func (r *Response) WriteError(httpStatus int, err error) error {
+ r.err = err
+ return r.WriteErrorString(httpStatus, err.Error())
+}
+
+// WriteServiceError is a convenience method for a responding with a status and a ServiceError
+func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error {
+ r.err = err
+ return r.WriteHeaderAndEntity(httpStatus, err)
+}
+
+// WriteErrorString is a convenience method for an error status with the actual error
+func (r *Response) WriteErrorString(httpStatus int, errorReason string) error {
+ if r.err == nil {
+ // if not called from WriteError
+ r.err = errors.New(errorReason)
+ }
+ r.WriteHeader(httpStatus)
+ if _, err := r.Write([]byte(errorReason)); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Flush implements http.Flusher interface, which sends any buffered data to the client.
+func (r *Response) Flush() {
+ if f, ok := r.ResponseWriter.(http.Flusher); ok {
+ f.Flush()
+ } else if trace {
+ traceLogger.Printf("ResponseWriter %v doesn't support Flush", r)
+ }
+}
+
+// WriteHeader is overridden to remember the Status Code that has been written.
+// Changes to the Header of the response have no effect after this.
+func (r *Response) WriteHeader(httpStatus int) {
+ r.statusCode = httpStatus
+ r.ResponseWriter.WriteHeader(httpStatus)
+}
+
+// StatusCode returns the code that has been written using WriteHeader.
+func (r Response) StatusCode() int {
+ if 0 == r.statusCode {
+ // no status code has been written yet; assume OK
+ return http.StatusOK
+ }
+ return r.statusCode
+}
+
+// Write writes the data to the connection as part of an HTTP reply.
+// Write is part of http.ResponseWriter interface.
+func (r *Response) Write(bytes []byte) (int, error) {
+ written, err := r.ResponseWriter.Write(bytes)
+ r.contentLength += written
+ return written, err
+}
+
+// ContentLength returns the number of bytes written for the response content.
+// Note that this value is only correct if all data is written through the Response using its Write* methods.
+// Data written directly using the underlying http.ResponseWriter is not accounted for.
+func (r Response) ContentLength() int {
+ return r.contentLength
+}
+
+// CloseNotify is part of http.CloseNotifier interface
+func (r Response) CloseNotify() <-chan bool {
+ return r.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+// Error returns the err created by WriteError
+func (r Response) Error() error {
+ return r.err
+}
diff --git a/vendor/github.com/emicklei/go-restful/route.go b/vendor/github.com/emicklei/go-restful/route.go
new file mode 100644
index 000000000..3dd520eec
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/route.go
@@ -0,0 +1,186 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "net/http"
+ "strings"
+)
+
+// RouteFunction declares the signature of a function that can be bound to a Route.
+type RouteFunction func(*Request, *Response)
+
+// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction.
+type Route struct {
+ Method string
+ Produces []string
+ Consumes []string
+ Path string // webservice root path + described path
+ Function RouteFunction
+ Filters []FilterFunction
+
+ // cached values for dispatching
+ relativePath string
+ pathParts []string
+ pathExpr *pathExpression // cached compilation of relativePath as RegExp
+
+ // documentation
+ Doc string
+ Notes string
+ Operation string
+ ParameterDocs []*Parameter
+ ResponseErrors map[int]ResponseError
+ ReadSample, WriteSample interface{} // structs that model an example request or response payload
+
+ // Extra information used to store custom information about the route.
+ Metadata map[string]interface{}
+}
+
+// Initialize for Route
+func (r *Route) postBuild() {
+ r.pathParts = tokenizePath(r.Path)
+}
+
+// Create Request and Response from their http versions
+func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
+ params := r.extractParameters(httpRequest.URL.Path)
+ wrappedRequest := NewRequest(httpRequest)
+ wrappedRequest.pathParameters = params
+ wrappedRequest.selectedRoutePath = r.Path
+ wrappedResponse := NewResponse(httpWriter)
+ wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept)
+ wrappedResponse.routeProduces = r.Produces
+ return wrappedRequest, wrappedResponse
+}
+
+// dispatchWithFilters call the function after passing through its own filters
+func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) {
+ if len(r.Filters) > 0 {
+ chain := FilterChain{Filters: r.Filters, Target: r.Function}
+ chain.ProcessFilter(wrappedRequest, wrappedResponse)
+ } else {
+ // unfiltered
+ r.Function(wrappedRequest, wrappedResponse)
+ }
+}
+
+// Return whether the mimeType matches to what this Route can produce.
+func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
+ parts := strings.Split(mimeTypesWithQuality, ",")
+ for _, each := range parts {
+ var withoutQuality string
+ if strings.Contains(each, ";") {
+ withoutQuality = strings.Split(each, ";")[0]
+ } else {
+ withoutQuality = each
+ }
+ // trim before compare
+ withoutQuality = strings.Trim(withoutQuality, " ")
+ if withoutQuality == "*/*" {
+ return true
+ }
+ for _, producibleType := range r.Produces {
+ if producibleType == "*/*" || producibleType == withoutQuality {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
+func (r Route) matchesContentType(mimeTypes string) bool {
+
+ if len(r.Consumes) == 0 {
+ // did not specify what it can consume ; any media type (“*/*”) is assumed
+ return true
+ }
+
+ if len(mimeTypes) == 0 {
+ // idempotent methods with (most-likely or garanteed) empty content match missing Content-Type
+ m := r.Method
+ if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" {
+ return true
+ }
+ // proceed with default
+ mimeTypes = MIME_OCTET
+ }
+
+ parts := strings.Split(mimeTypes, ",")
+ for _, each := range parts {
+ var contentType string
+ if strings.Contains(each, ";") {
+ contentType = strings.Split(each, ";")[0]
+ } else {
+ contentType = each
+ }
+ // trim before compare
+ contentType = strings.Trim(contentType, " ")
+ for _, consumeableType := range r.Consumes {
+ if consumeableType == "*/*" || consumeableType == contentType {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// Extract the parameters from the request url path
+func (r Route) extractParameters(urlPath string) map[string]string {
+ urlParts := tokenizePath(urlPath)
+ pathParameters := map[string]string{}
+ for i, key := range r.pathParts {
+ var value string
+ if i >= len(urlParts) {
+ value = ""
+ } else {
+ value = urlParts[i]
+ }
+ if strings.HasPrefix(key, "{") { // path-parameter
+ if colon := strings.Index(key, ":"); colon != -1 {
+ // extract by regex
+ regPart := key[colon+1 : len(key)-1]
+ keyPart := key[1:colon]
+ if regPart == "*" {
+ pathParameters[keyPart] = untokenizePath(i, urlParts)
+ break
+ } else {
+ pathParameters[keyPart] = value
+ }
+ } else {
+ // without enclosing {}
+ pathParameters[key[1:len(key)-1]] = value
+ }
+ }
+ }
+ return pathParameters
+}
+
+// Untokenize back into an URL path using the slash separator
+func untokenizePath(offset int, parts []string) string {
+ var buffer bytes.Buffer
+ for p := offset; p < len(parts); p++ {
+ buffer.WriteString(parts[p])
+ // do not end
+ if p < len(parts)-1 {
+ buffer.WriteString("/")
+ }
+ }
+ return buffer.String()
+}
+
+// Tokenize an URL path using the slash separator ; the result does not have empty tokens
+func tokenizePath(path string) []string {
+ if "/" == path {
+ return []string{}
+ }
+ return strings.Split(strings.Trim(path, "/"), "/")
+}
+
+// for debugging
+func (r Route) String() string {
+ return r.Method + " " + r.Path
+}
diff --git a/vendor/github.com/emicklei/go-restful/route_builder.go b/vendor/github.com/emicklei/go-restful/route_builder.go
new file mode 100644
index 000000000..5ad4a3a7c
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/route_builder.go
@@ -0,0 +1,293 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync/atomic"
+
+ "github.com/emicklei/go-restful/log"
+)
+
+// RouteBuilder is a helper to construct Routes.
+type RouteBuilder struct {
+ rootPath string
+ currentPath string
+ produces []string
+ consumes []string
+ httpMethod string // required
+ function RouteFunction // required
+ filters []FilterFunction
+
+ typeNameHandleFunc TypeNameHandleFunction // required
+
+ // documentation
+ doc string
+ notes string
+ operation string
+ readSample, writeSample interface{}
+ parameters []*Parameter
+ errorMap map[int]ResponseError
+ metadata map[string]interface{}
+}
+
+// Do evaluates each argument with the RouteBuilder itself.
+// This allows you to follow DRY principles without breaking the fluent programming style.
+// Example:
+// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
+//
+// func Returns500(b *RouteBuilder) {
+// b.Returns(500, "Internal Server Error", restful.ServiceError{})
+// }
+func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
+ for _, each := range oneArgBlocks {
+ each(b)
+ }
+ return b
+}
+
+// To bind the route to a function.
+// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required.
+func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder {
+ b.function = function
+ return b
+}
+
+// Method specifies what HTTP method to match. Required.
+func (b *RouteBuilder) Method(method string) *RouteBuilder {
+ b.httpMethod = method
+ return b
+}
+
+// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header.
+func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder {
+ b.produces = mimeTypes
+ return b
+}
+
+// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these
+func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder {
+ b.consumes = mimeTypes
+ return b
+}
+
+// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/".
+func (b *RouteBuilder) Path(subPath string) *RouteBuilder {
+ b.currentPath = subPath
+ return b
+}
+
+// Doc tells what this route is all about. Optional.
+func (b *RouteBuilder) Doc(documentation string) *RouteBuilder {
+ b.doc = documentation
+ return b
+}
+
+// A verbose explanation of the operation behavior. Optional.
+func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
+ b.notes = notes
+ return b
+}
+
+// Reads tells what resource type will be read from the request payload. Optional.
+// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type.
+func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder {
+ fn := b.typeNameHandleFunc
+ if fn == nil {
+ fn = reflectTypeName
+ }
+ typeAsName := fn(sample)
+
+ b.readSample = sample
+ bodyParameter := &Parameter{&ParameterData{Name: "body"}}
+ bodyParameter.beBody()
+ bodyParameter.Required(true)
+ bodyParameter.DataType(typeAsName)
+ b.Param(bodyParameter)
+ return b
+}
+
+// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not.
+// Use this to modify or extend information for the Parameter (through its Data()).
+func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) {
+ for _, each := range b.parameters {
+ if each.Data().Name == name {
+ return each
+ }
+ }
+ return p
+}
+
+// Writes tells what resource type will be written as the response payload. Optional.
+func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder {
+ b.writeSample = sample
+ return b
+}
+
+// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates).
+func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder {
+ if b.parameters == nil {
+ b.parameters = []*Parameter{}
+ }
+ b.parameters = append(b.parameters, parameter)
+ return b
+}
+
+// Operation allows you to document what the actual method/function call is of the Route.
+// Unless called, the operation name is derived from the RouteFunction set using To(..).
+func (b *RouteBuilder) Operation(name string) *RouteBuilder {
+ b.operation = name
+ return b
+}
+
+// ReturnsError is deprecated, use Returns instead.
+func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder {
+ log.Print("ReturnsError is deprecated, use Returns instead.")
+ return b.Returns(code, message, model)
+}
+
+// Returns allows you to document what responses (errors or regular) can be expected.
+// The model parameter is optional ; either pass a struct instance or use nil if not applicable.
+func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder {
+ err := ResponseError{
+ Code: code,
+ Message: message,
+ Model: model,
+ IsDefault: false,
+ }
+ // lazy init because there is no NewRouteBuilder (yet)
+ if b.errorMap == nil {
+ b.errorMap = map[int]ResponseError{}
+ }
+ b.errorMap[code] = err
+ return b
+}
+
+// DefaultReturns is a special Returns call that sets the default of the response ; the code is zero.
+func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder {
+ b.Returns(0, message, model)
+ // Modify the ResponseError just added/updated
+ re := b.errorMap[0]
+ // errorMap is initialized
+ b.errorMap[0] = ResponseError{
+ Code: re.Code,
+ Message: re.Message,
+ Model: re.Model,
+ IsDefault: true,
+ }
+ return b
+}
+
+// Metadata adds or updates a key=value pair to the metadata map.
+func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder {
+ if b.metadata == nil {
+ b.metadata = map[string]interface{}{}
+ }
+ b.metadata[key] = value
+ return b
+}
+
+// ResponseError represents a response; not necessarily an error.
+type ResponseError struct {
+ Code int
+ Message string
+ Model interface{}
+ IsDefault bool
+}
+
+func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
+ b.rootPath = path
+ return b
+}
+
+// Filter appends a FilterFunction to the end of filters for this Route to build.
+func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder {
+ b.filters = append(b.filters, filter)
+ return b
+}
+
+// If no specific Route path then set to rootPath
+// If no specific Produces then set to rootProduces
+// If no specific Consumes then set to rootConsumes
+func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) {
+ if len(b.produces) == 0 {
+ b.produces = rootProduces
+ }
+ if len(b.consumes) == 0 {
+ b.consumes = rootConsumes
+ }
+}
+
+// typeNameHandler sets the function that will convert types to strings in the parameter
+// and model definitions.
+func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBuilder {
+ b.typeNameHandleFunc = handler
+ return b
+}
+
+// Build creates a new Route using the specification details collected by the RouteBuilder
+func (b *RouteBuilder) Build() Route {
+ pathExpr, err := newPathExpression(b.currentPath)
+ if err != nil {
+ log.Printf("[restful] Invalid path:%s because:%v", b.currentPath, err)
+ os.Exit(1)
+ }
+ if b.function == nil {
+ log.Printf("[restful] No function specified for route:" + b.currentPath)
+ os.Exit(1)
+ }
+ operationName := b.operation
+ if len(operationName) == 0 && b.function != nil {
+ // extract from definition
+ operationName = nameOfFunction(b.function)
+ }
+ route := Route{
+ Method: b.httpMethod,
+ Path: concatPath(b.rootPath, b.currentPath),
+ Produces: b.produces,
+ Consumes: b.consumes,
+ Function: b.function,
+ Filters: b.filters,
+ relativePath: b.currentPath,
+ pathExpr: pathExpr,
+ Doc: b.doc,
+ Notes: b.notes,
+ Operation: operationName,
+ ParameterDocs: b.parameters,
+ ResponseErrors: b.errorMap,
+ ReadSample: b.readSample,
+ WriteSample: b.writeSample,
+ Metadata: b.metadata}
+ route.postBuild()
+ return route
+}
+
+func concatPath(path1, path2 string) string {
+ return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
+}
+
+var anonymousFuncCount int32
+
+// nameOfFunction returns the short name of the function f for documentation.
+// It uses a runtime feature for debugging ; its value may change for later Go versions.
+func nameOfFunction(f interface{}) string {
+ fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer())
+ tokenized := strings.Split(fun.Name(), ".")
+ last := tokenized[len(tokenized)-1]
+ last = strings.TrimSuffix(last, ")·fm") // < Go 1.5
+ last = strings.TrimSuffix(last, ")-fm") // Go 1.5
+ last = strings.TrimSuffix(last, "·fm") // < Go 1.5
+ last = strings.TrimSuffix(last, "-fm") // Go 1.5
+ if last == "func1" { // this could mean conflicts in API docs
+ val := atomic.AddInt32(&anonymousFuncCount, 1)
+ last = "func" + fmt.Sprintf("%d", val)
+ atomic.StoreInt32(&anonymousFuncCount, val)
+ }
+ return last
+}
diff --git a/vendor/github.com/emicklei/go-restful/router.go b/vendor/github.com/emicklei/go-restful/router.go
new file mode 100644
index 000000000..9b32fb675
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/router.go
@@ -0,0 +1,18 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import "net/http"
+
+// A RouteSelector finds the best matching Route given the input HTTP Request
+type RouteSelector interface {
+
+ // SelectRoute finds a Route given the input HTTP Request and a list of WebServices.
+ // It returns a selected Route and its containing WebService or an error indicating
+ // a problem.
+ SelectRoute(
+ webServices []*WebService,
+ httpRequest *http.Request) (selectedService *WebService, selected *Route, err error)
+}
diff --git a/vendor/github.com/emicklei/go-restful/service_error.go b/vendor/github.com/emicklei/go-restful/service_error.go
new file mode 100644
index 000000000..62d1108bb
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/service_error.go
@@ -0,0 +1,23 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import "fmt"
+
+// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request.
+type ServiceError struct {
+ Code int
+ Message string
+}
+
+// NewError returns a ServiceError using the code and reason
+func NewError(code int, message string) ServiceError {
+ return ServiceError{Code: code, Message: message}
+}
+
+// Error returns a text representation of the service error
+func (s ServiceError) Error() string {
+ return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message)
+}
diff --git a/vendor/github.com/emicklei/go-restful/web_service.go b/vendor/github.com/emicklei/go-restful/web_service.go
new file mode 100644
index 000000000..7af60233a
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/web_service.go
@@ -0,0 +1,290 @@
+package restful
+
+import (
+ "errors"
+ "os"
+ "reflect"
+ "sync"
+
+ "github.com/emicklei/go-restful/log"
+)
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// WebService holds a collection of Route values that bind a Http Method + URL Path to a function.
+type WebService struct {
+ rootPath string
+ pathExpr *pathExpression // cached compilation of rootPath as RegExp
+ routes []Route
+ produces []string
+ consumes []string
+ pathParameters []*Parameter
+ filters []FilterFunction
+ documentation string
+ apiVersion string
+
+ typeNameHandleFunc TypeNameHandleFunction
+
+ dynamicRoutes bool
+
+ // protects 'routes' if dynamic routes are enabled
+ routesLock sync.RWMutex
+}
+
+func (w *WebService) SetDynamicRoutes(enable bool) {
+ w.dynamicRoutes = enable
+}
+
+// TypeNameHandleFunction declares functions that can handle translating the name of a sample object
+// into the restful documentation for the service.
+type TypeNameHandleFunction func(sample interface{}) string
+
+// TypeNameHandler sets the function that will convert types to strings in the parameter
+// and model definitions. If not set, the web service will invoke
+// reflect.TypeOf(object).String().
+func (w *WebService) TypeNameHandler(handler TypeNameHandleFunction) *WebService {
+ w.typeNameHandleFunc = handler
+ return w
+}
+
+// reflectTypeName is the default TypeNameHandleFunction and for a given object
+// returns the name that Go identifies it with (e.g. "string" or "v1.Object") via
+// the reflection API.
+func reflectTypeName(sample interface{}) string {
+ return reflect.TypeOf(sample).String()
+}
+
+// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it.
+func (w *WebService) compilePathExpression() {
+ compiled, err := newPathExpression(w.rootPath)
+ if err != nil {
+ log.Printf("[restful] invalid path:%s because:%v", w.rootPath, err)
+ os.Exit(1)
+ }
+ w.pathExpr = compiled
+}
+
+// ApiVersion sets the API version for documentation purposes.
+func (w *WebService) ApiVersion(apiVersion string) *WebService {
+ w.apiVersion = apiVersion
+ return w
+}
+
+// Version returns the API version for documentation purposes.
+func (w *WebService) Version() string { return w.apiVersion }
+
+// Path specifies the root URL template path of the WebService.
+// All Routes will be relative to this path.
+func (w *WebService) Path(root string) *WebService {
+ w.rootPath = root
+ if len(w.rootPath) == 0 {
+ w.rootPath = "/"
+ }
+ w.compilePathExpression()
+ return w
+}
+
+// Param adds a PathParameter to document parameters used in the root path.
+func (w *WebService) Param(parameter *Parameter) *WebService {
+ if w.pathParameters == nil {
+ w.pathParameters = []*Parameter{}
+ }
+ w.pathParameters = append(w.pathParameters, parameter)
+ return w
+}
+
+// PathParameter creates a new Parameter of kind Path for documentation purposes.
+// It is initialized as required with string as its DataType.
+func (w *WebService) PathParameter(name, description string) *Parameter {
+ return PathParameter(name, description)
+}
+
+// PathParameter creates a new Parameter of kind Path for documentation purposes.
+// It is initialized as required with string as its DataType.
+func PathParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}}
+ p.bePath()
+ return p
+}
+
+// QueryParameter creates a new Parameter of kind Query for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func (w *WebService) QueryParameter(name, description string) *Parameter {
+ return QueryParameter(name, description)
+}
+
+// QueryParameter creates a new Parameter of kind Query for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func QueryParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
+ p.beQuery()
+ return p
+}
+
+// BodyParameter creates a new Parameter of kind Body for documentation purposes.
+// It is initialized as required without a DataType.
+func (w *WebService) BodyParameter(name, description string) *Parameter {
+ return BodyParameter(name, description)
+}
+
+// BodyParameter creates a new Parameter of kind Body for documentation purposes.
+// It is initialized as required without a DataType.
+func BodyParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}}
+ p.beBody()
+ return p
+}
+
+// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func (w *WebService) HeaderParameter(name, description string) *Parameter {
+ return HeaderParameter(name, description)
+}
+
+// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func HeaderParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
+ p.beHeader()
+ return p
+}
+
+// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
+// It is initialized as required with string as its DataType.
+func (w *WebService) FormParameter(name, description string) *Parameter {
+ return FormParameter(name, description)
+}
+
+// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
+// It is initialized as required with string as its DataType.
+func FormParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
+ p.beForm()
+ return p
+}
+
+// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes.
+func (w *WebService) Route(builder *RouteBuilder) *WebService {
+ w.routesLock.Lock()
+ defer w.routesLock.Unlock()
+ builder.copyDefaults(w.produces, w.consumes)
+ w.routes = append(w.routes, builder.Build())
+ return w
+}
+
+// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method'
+func (w *WebService) RemoveRoute(path, method string) error {
+ if !w.dynamicRoutes {
+ return errors.New("dynamic routes are not enabled.")
+ }
+ w.routesLock.Lock()
+ defer w.routesLock.Unlock()
+ newRoutes := make([]Route, (len(w.routes) - 1))
+ current := 0
+ for ix := range w.routes {
+ if w.routes[ix].Method == method && w.routes[ix].Path == path {
+ continue
+ }
+ newRoutes[current] = w.routes[ix]
+ current = current + 1
+ }
+ w.routes = newRoutes
+ return nil
+}
+
+// Method creates a new RouteBuilder and initialize its http method
+func (w *WebService) Method(httpMethod string) *RouteBuilder {
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method(httpMethod)
+}
+
+// Produces specifies that this WebService can produce one or more MIME types.
+// Http requests must have one of these values set for the Accept header.
+func (w *WebService) Produces(contentTypes ...string) *WebService {
+ w.produces = contentTypes
+ return w
+}
+
+// Consumes specifies that this WebService can consume one or more MIME types.
+// Http requests must have one of these values set for the Content-Type header.
+func (w *WebService) Consumes(accepts ...string) *WebService {
+ w.consumes = accepts
+ return w
+}
+
+// Routes returns the Routes associated with this WebService
+func (w *WebService) Routes() []Route {
+ if !w.dynamicRoutes {
+ return w.routes
+ }
+ // Make a copy of the array to prevent concurrency problems
+ w.routesLock.RLock()
+ defer w.routesLock.RUnlock()
+ result := make([]Route, len(w.routes))
+ for ix := range w.routes {
+ result[ix] = w.routes[ix]
+ }
+ return result
+}
+
+// RootPath returns the RootPath associated with this WebService. Default "/"
+func (w *WebService) RootPath() string {
+ return w.rootPath
+}
+
+// PathParameters return the path parameter names for (shared amoung its Routes)
+func (w *WebService) PathParameters() []*Parameter {
+ return w.pathParameters
+}
+
+// Filter adds a filter function to the chain of filters applicable to all its Routes
+func (w *WebService) Filter(filter FilterFunction) *WebService {
+ w.filters = append(w.filters, filter)
+ return w
+}
+
+// Doc is used to set the documentation of this service.
+func (w *WebService) Doc(plainText string) *WebService {
+ w.documentation = plainText
+ return w
+}
+
+// Documentation returns it.
+func (w *WebService) Documentation() string {
+ return w.documentation
+}
+
+/*
+ Convenience methods
+*/
+
+// HEAD is a shortcut for .Method("HEAD").Path(subPath)
+func (w *WebService) HEAD(subPath string) *RouteBuilder {
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("HEAD").Path(subPath)
+}
+
+// GET is a shortcut for .Method("GET").Path(subPath)
+func (w *WebService) GET(subPath string) *RouteBuilder {
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("GET").Path(subPath)
+}
+
+// POST is a shortcut for .Method("POST").Path(subPath)
+func (w *WebService) POST(subPath string) *RouteBuilder {
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("POST").Path(subPath)
+}
+
+// PUT is a shortcut for .Method("PUT").Path(subPath)
+func (w *WebService) PUT(subPath string) *RouteBuilder {
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PUT").Path(subPath)
+}
+
+// PATCH is a shortcut for .Method("PATCH").Path(subPath)
+func (w *WebService) PATCH(subPath string) *RouteBuilder {
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PATCH").Path(subPath)
+}
+
+// DELETE is a shortcut for .Method("DELETE").Path(subPath)
+func (w *WebService) DELETE(subPath string) *RouteBuilder {
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath)
+}
diff --git a/vendor/github.com/emicklei/go-restful/web_service_container.go b/vendor/github.com/emicklei/go-restful/web_service_container.go
new file mode 100644
index 000000000..c9d31b06c
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/web_service_container.go
@@ -0,0 +1,39 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "net/http"
+)
+
+// DefaultContainer is a restful.Container that uses http.DefaultServeMux
+var DefaultContainer *Container
+
+func init() {
+ DefaultContainer = NewContainer()
+ DefaultContainer.ServeMux = http.DefaultServeMux
+}
+
+// If set the true then panics will not be caught to return HTTP 500.
+// In that case, Route functions are responsible for handling any error situation.
+// Default value is false = recover from panics. This has performance implications.
+// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true)
+var DoNotRecover = false
+
+// Add registers a new WebService add it to the DefaultContainer.
+func Add(service *WebService) {
+ DefaultContainer.Add(service)
+}
+
+// Filter appends a container FilterFunction from the DefaultContainer.
+// These are called before dispatching a http.Request to a WebService.
+func Filter(filter FilterFunction) {
+ DefaultContainer.Filter(filter)
+}
+
+// RegisteredWebServices returns the collections of WebServices from the DefaultContainer
+func RegisteredWebServices() []*WebService {
+ return DefaultContainer.RegisteredWebServices()
+}
diff --git a/vendor/github.com/exponent-io/jsonpath/LICENSE b/vendor/github.com/exponent-io/jsonpath/LICENSE
new file mode 100644
index 000000000..541977250
--- /dev/null
+++ b/vendor/github.com/exponent-io/jsonpath/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Exponent Labs LLC
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/exponent-io/jsonpath/README.md b/vendor/github.com/exponent-io/jsonpath/README.md
new file mode 100644
index 000000000..382fb3138
--- /dev/null
+++ b/vendor/github.com/exponent-io/jsonpath/README.md
@@ -0,0 +1,66 @@
+[![GoDoc](https://godoc.org/github.com/exponent-io/jsonpath?status.svg)](https://godoc.org/github.com/exponent-io/jsonpath)
+[![Build Status](https://travis-ci.org/exponent-io/jsonpath.svg?branch=master)](https://travis-ci.org/exponent-io/jsonpath)
+
+# jsonpath
+
+This package extends the [json.Decoder](https://golang.org/pkg/encoding/json/#Decoder) to support navigating a stream of JSON tokens. You should be able to use this extended Decoder places where a json.Decoder would have been used.
+
+This Decoder has the following enhancements...
+ * The [Scan](https://godoc.org/github.com/exponent-io/jsonpath/#Decoder.Scan) method supports scanning a JSON stream while extracting particular values along the way using [PathActions](https://godoc.org/github.com/exponent-io/jsonpath#PathActions).
+ * The [SeekTo](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.SeekTo) method supports seeking forward in a JSON token stream to a particular path.
+ * The [Path](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Path) method returns the path of the most recently parsed token.
+ * The [Token](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Token) method has been modified to distinguish between strings that are object keys and strings that are values. Object key strings are returned as the [KeyString](https://godoc.org/github.com/exponent-io/jsonpath#KeyString) type rather than a native string.
+
+## Installation
+
+ go get -u github.com/exponent-io/jsonpath
+
+## Example Usage
+
+#### SeekTo
+
+```go
+import "github.com/exponent-io/jsonpath"
+
+var j = []byte(`[
+ {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}},
+ {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}}
+]`)
+
+w := json.NewDecoder(bytes.NewReader(j))
+var v interface{}
+
+w.SeekTo(1, "Point", "G")
+w.Decode(&v) // v is 218
+```
+
+#### Scan with PathActions
+
+```go
+var j = []byte(`{"colors":[
+ {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10, "A": 58}},
+ {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255, "A": 231}}
+]}`)
+
+var actions PathActions
+
+// Extract the value at Point.A
+actions.Add(func(d *Decoder) error {
+ var alpha int
+ err := d.Decode(&alpha)
+ fmt.Printf("Alpha: %v\n", alpha)
+ return err
+}, "Point", "A")
+
+w := NewDecoder(bytes.NewReader(j))
+w.SeekTo("colors", 0)
+
+var ok = true
+var err error
+for ok {
+ ok, err = w.Scan(&actions)
+ if err != nil && err != io.EOF {
+ panic(err)
+ }
+}
+```
diff --git a/vendor/github.com/exponent-io/jsonpath/decoder.go b/vendor/github.com/exponent-io/jsonpath/decoder.go
new file mode 100644
index 000000000..31de46c73
--- /dev/null
+++ b/vendor/github.com/exponent-io/jsonpath/decoder.go
@@ -0,0 +1,210 @@
+package jsonpath
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// KeyString is returned from Decoder.Token to represent each key in a JSON object value.
+type KeyString string
+
+// Decoder extends the Go runtime's encoding/json.Decoder to support navigating in a stream of JSON tokens.
+type Decoder struct {
+ json.Decoder
+
+ path JsonPath
+ context jsonContext
+}
+
+// NewDecoder creates a new instance of the extended JSON Decoder.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{Decoder: *json.NewDecoder(r)}
+}
+
+// SeekTo causes the Decoder to move forward to a given path in the JSON structure.
+//
+// The path argument must consist of strings or integers. Each string specifies an JSON object key, and
+// each integer specifies an index into a JSON array.
+//
+// Consider the JSON structure
+//
+// { "a": [0,"s",12e4,{"b":0,"v":35} ] }
+//
+// SeekTo("a",3,"v") will move to the value referenced by the "a" key in the current object,
+// followed by a move to the 4th value (index 3) in the array, followed by a move to the value at key "v".
+// In this example, a subsequent call to the decoder's Decode() would unmarshal the value 35.
+//
+// SeekTo returns a boolean value indicating whether a match was found.
+//
+// Decoder is intended to be used with a stream of tokens. As a result it navigates forward only.
+func (d *Decoder) SeekTo(path ...interface{}) (bool, error) {
+
+ if len(path) == 0 {
+ return len(d.path) == 0, nil
+ }
+ last := len(path) - 1
+ if i, ok := path[last].(int); ok {
+ path[last] = i - 1
+ }
+
+ for {
+ if d.path.Equal(path) {
+ return true, nil
+ }
+ _, err := d.Token()
+ if err == io.EOF {
+ return false, nil
+ } else if err != nil {
+ return false, err
+ }
+ }
+}
+
+// Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v. This is
+// equivalent to encoding/json.Decode().
+func (d *Decoder) Decode(v interface{}) error {
+ switch d.context {
+ case objValue:
+ d.context = objKey
+ break
+ case arrValue:
+ d.path.incTop()
+ break
+ }
+ return d.Decoder.Decode(v)
+}
+
+// Path returns a slice of string and/or int values representing the path from the root of the JSON object to the
+// position of the most-recently parsed token.
+func (d *Decoder) Path() JsonPath {
+ p := make(JsonPath, len(d.path))
+ copy(p, d.path)
+ return p
+}
+
+// Token is equivalent to the Token() method on json.Decoder. The primary difference is that it distinguishes
+// between strings that are keys and and strings that are values. String tokens that are object keys are returned as a
+// KeyString rather than as a native string.
+func (d *Decoder) Token() (json.Token, error) {
+ t, err := d.Decoder.Token()
+ if err != nil {
+ return t, err
+ }
+
+ if t == nil {
+ switch d.context {
+ case objValue:
+ d.context = objKey
+ break
+ case arrValue:
+ d.path.incTop()
+ break
+ }
+ return t, err
+ }
+
+ switch t := t.(type) {
+ case json.Delim:
+ switch t {
+ case json.Delim('{'):
+ if d.context == arrValue {
+ d.path.incTop()
+ }
+ d.path.push("")
+ d.context = objKey
+ break
+ case json.Delim('}'):
+ d.path.pop()
+ d.context = d.path.inferContext()
+ break
+ case json.Delim('['):
+ if d.context == arrValue {
+ d.path.incTop()
+ }
+ d.path.push(-1)
+ d.context = arrValue
+ break
+ case json.Delim(']'):
+ d.path.pop()
+ d.context = d.path.inferContext()
+ break
+ }
+ case float64, json.Number, bool:
+ switch d.context {
+ case objValue:
+ d.context = objKey
+ break
+ case arrValue:
+ d.path.incTop()
+ break
+ }
+ break
+ case string:
+ switch d.context {
+ case objKey:
+ d.path.nameTop(t)
+ d.context = objValue
+ return KeyString(t), err
+ case objValue:
+ d.context = objKey
+ case arrValue:
+ d.path.incTop()
+ }
+ break
+ }
+
+ return t, err
+}
+
+// Scan moves forward over the JSON stream consuming all the tokens at the current level (current object, current array)
+// invoking each matching PathAction along the way.
+//
+// Scan returns true if there are more contiguous values to scan (for example in an array).
+func (d *Decoder) Scan(ext *PathActions) (bool, error) {
+
+ rootPath := d.Path()
+
+ // If this is an array path, increment the root path in our local copy.
+ if rootPath.inferContext() == arrValue {
+ rootPath.incTop()
+ }
+
+ for {
+ // advance the token position
+ _, err := d.Token()
+ if err != nil {
+ return false, err
+ }
+
+ match:
+ var relPath JsonPath
+
+ // capture the new JSON path
+ path := d.Path()
+
+ if len(path) > len(rootPath) {
+ // capture the path relative to where the scan started
+ relPath = path[len(rootPath):]
+ } else {
+ // if the path is not longer than the root, then we are done with this scan
+ // return boolean flag indicating if there are more items to scan at the same level
+ return d.Decoder.More(), nil
+ }
+
+ // match the relative path against the path actions
+ if node := ext.node.match(relPath); node != nil {
+ if node.action != nil {
+ // we have a match so execute the action
+ err = node.action(d)
+ if err != nil {
+ return d.Decoder.More(), err
+ }
+ // The action may have advanced the decoder. If we are in an array, advancing it further would
+ // skip tokens. So, if we are scanning an array, jump to the top without advancing the token.
+ if d.path.inferContext() == arrValue && d.Decoder.More() {
+ goto match
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/exponent-io/jsonpath/path.go b/vendor/github.com/exponent-io/jsonpath/path.go
new file mode 100644
index 000000000..d7db2ad33
--- /dev/null
+++ b/vendor/github.com/exponent-io/jsonpath/path.go
@@ -0,0 +1,67 @@
+// Extends the Go runtime's json.Decoder enabling navigation of a stream of json tokens.
+package jsonpath
+
+import "fmt"
+
+type jsonContext int
+
+const (
+ none jsonContext = iota
+ objKey
+ objValue
+ arrValue
+)
+
+// AnyIndex can be used in a pattern to match any array index.
+const AnyIndex = -2
+
+// JsonPath is a slice of strings and/or integers. Each string specifies an JSON object key, and
+// each integer specifies an index into a JSON array.
+type JsonPath []interface{}
+
+func (p *JsonPath) push(n interface{}) { *p = append(*p, n) }
+func (p *JsonPath) pop() { *p = (*p)[:len(*p)-1] }
+
+// increment the index at the top of the stack (must be an array index)
+func (p *JsonPath) incTop() { (*p)[len(*p)-1] = (*p)[len(*p)-1].(int) + 1 }
+
+// name the key at the top of the stack (must be an object key)
+func (p *JsonPath) nameTop(n string) { (*p)[len(*p)-1] = n }
+
+// infer the context from the item at the top of the stack
+func (p *JsonPath) inferContext() jsonContext {
+ if len(*p) == 0 {
+ return none
+ }
+ t := (*p)[len(*p)-1]
+ switch t.(type) {
+ case string:
+ return objKey
+ case int:
+ return arrValue
+ default:
+ panic(fmt.Sprintf("Invalid stack type %T", t))
+ }
+}
+
+// Equal tests for equality between two JsonPath types.
+func (p *JsonPath) Equal(o JsonPath) bool {
+ if len(*p) != len(o) {
+ return false
+ }
+ for i, v := range *p {
+ if v != o[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (p *JsonPath) HasPrefix(o JsonPath) bool {
+ for i, v := range o {
+ if v != (*p)[i] {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/exponent-io/jsonpath/pathaction.go b/vendor/github.com/exponent-io/jsonpath/pathaction.go
new file mode 100644
index 000000000..497ed686c
--- /dev/null
+++ b/vendor/github.com/exponent-io/jsonpath/pathaction.go
@@ -0,0 +1,61 @@
+package jsonpath
+
+// pathNode is used to construct a trie of paths to be matched
+type pathNode struct {
+ matchOn interface{} // string, or integer
+ childNodes []pathNode
+ action DecodeAction
+}
+
+// match climbs the trie to find a node that matches the given JSON path.
+func (n *pathNode) match(path JsonPath) *pathNode {
+ var node *pathNode = n
+ for _, ps := range path {
+ found := false
+ for i, n := range node.childNodes {
+ if n.matchOn == ps {
+ node = &node.childNodes[i]
+ found = true
+ break
+ } else if _, ok := ps.(int); ok && n.matchOn == AnyIndex {
+ node = &node.childNodes[i]
+ found = true
+ break
+ }
+ }
+ if !found {
+ return nil
+ }
+ }
+ return node
+}
+
+// PathActions represents a collection of DecodeAction functions that should be called at certain path positions
+// when scanning the JSON stream. PathActions can be created once and used many times in one or more JSON streams.
+type PathActions struct {
+ node pathNode
+}
+
+// DecodeAction handlers are called by the Decoder when scanning objects. See PathActions.Add for more detail.
+type DecodeAction func(d *Decoder) error
+
+// Add specifies an action to call on the Decoder when the specified path is encountered.
+func (je *PathActions) Add(action DecodeAction, path ...interface{}) {
+
+ var node *pathNode = &je.node
+ for _, ps := range path {
+ found := false
+ for i, n := range node.childNodes {
+ if n.matchOn == ps {
+ node = &node.childNodes[i]
+ found = true
+ break
+ }
+ }
+ if !found {
+ node.childNodes = append(node.childNodes, pathNode{matchOn: ps})
+ node = &node.childNodes[len(node.childNodes)-1]
+ }
+ }
+ node.action = action
+}
diff --git a/vendor/github.com/fatih/camelcase/LICENSE.md b/vendor/github.com/fatih/camelcase/LICENSE.md
new file mode 100644
index 000000000..aa4a536ca
--- /dev/null
+++ b/vendor/github.com/fatih/camelcase/LICENSE.md
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Fatih Arslan
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/fatih/camelcase/README.md b/vendor/github.com/fatih/camelcase/README.md
new file mode 100644
index 000000000..105a6ae33
--- /dev/null
+++ b/vendor/github.com/fatih/camelcase/README.md
@@ -0,0 +1,58 @@
+# CamelCase [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/camelcase) [![Build Status](http://img.shields.io/travis/fatih/camelcase.svg?style=flat-square)](https://travis-ci.org/fatih/camelcase)
+
+CamelCase is a Golang (Go) package to split the words of a camelcase type
+string into a slice of words. It can be used to convert a camelcase word (lower
+or upper case) into any type of word.
+
+## Splitting rules:
+
+1. If string is not valid UTF-8, return it without splitting as
+ single item array.
+2. Assign all unicode characters into one of 4 sets: lower case
+ letters, upper case letters, numbers, and all other characters.
+3. Iterate through characters of string, introducing splits
+ between adjacent characters that belong to different sets.
+4. Iterate through array of split strings, and if a given string
+ is upper case:
+ * if subsequent string is lower case:
+ * move last character of upper case string to beginning of
+ lower case string
+
+## Install
+
+```bash
+go get github.com/fatih/camelcase
+```
+
+## Usage and examples
+
+```go
+splitted := camelcase.Split("GolangPackage")
+
+fmt.Println(splitted[0], splitted[1]) // prints: "Golang", "Package"
+```
+
+Both lower camel case and upper camel case are supported. For more info please
+check: [http://en.wikipedia.org/wiki/CamelCase](http://en.wikipedia.org/wiki/CamelCase)
+
+Below are some example cases:
+
+```
+"" => []
+"lowercase" => ["lowercase"]
+"Class" => ["Class"]
+"MyClass" => ["My", "Class"]
+"MyC" => ["My", "C"]
+"HTML" => ["HTML"]
+"PDFLoader" => ["PDF", "Loader"]
+"AString" => ["A", "String"]
+"SimpleXMLParser" => ["Simple", "XML", "Parser"]
+"vimRPCPlugin" => ["vim", "RPC", "Plugin"]
+"GL11Version" => ["GL", "11", "Version"]
+"99Bottles" => ["99", "Bottles"]
+"May5" => ["May", "5"]
+"BFG9000" => ["BFG", "9000"]
+"BöseÜberraschung" => ["Böse", "Überraschung"]
+"Two spaces" => ["Two", " ", "spaces"]
+"BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"]
+```
diff --git a/vendor/github.com/fatih/camelcase/camelcase.go b/vendor/github.com/fatih/camelcase/camelcase.go
new file mode 100644
index 000000000..02160c9a4
--- /dev/null
+++ b/vendor/github.com/fatih/camelcase/camelcase.go
@@ -0,0 +1,90 @@
+// Package camelcase is a micro package to split the words of a camelcase type
+// string into a slice of words.
+package camelcase
+
+import (
+ "unicode"
+ "unicode/utf8"
+)
+
+// Split splits the camelcase word and returns a list of words. It also
+// supports digits. Both lower camel case and upper camel case are supported.
+// For more info please check: http://en.wikipedia.org/wiki/CamelCase
+//
+// Examples
+//
+// "" => [""]
+// "lowercase" => ["lowercase"]
+// "Class" => ["Class"]
+// "MyClass" => ["My", "Class"]
+// "MyC" => ["My", "C"]
+// "HTML" => ["HTML"]
+// "PDFLoader" => ["PDF", "Loader"]
+// "AString" => ["A", "String"]
+// "SimpleXMLParser" => ["Simple", "XML", "Parser"]
+// "vimRPCPlugin" => ["vim", "RPC", "Plugin"]
+// "GL11Version" => ["GL", "11", "Version"]
+// "99Bottles" => ["99", "Bottles"]
+// "May5" => ["May", "5"]
+// "BFG9000" => ["BFG", "9000"]
+// "BöseÜberraschung" => ["Böse", "Überraschung"]
+// "Two spaces" => ["Two", " ", "spaces"]
+// "BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"]
+//
+// Splitting rules
+//
+// 1) If string is not valid UTF-8, return it without splitting as
+// single item array.
+// 2) Assign all unicode characters into one of 4 sets: lower case
+// letters, upper case letters, numbers, and all other characters.
+// 3) Iterate through characters of string, introducing splits
+// between adjacent characters that belong to different sets.
+// 4) Iterate through array of split strings, and if a given string
+// is upper case:
+// if subsequent string is lower case:
+// move last character of upper case string to beginning of
+// lower case string
+func Split(src string) (entries []string) {
+ // don't split invalid utf8
+ if !utf8.ValidString(src) {
+ return []string{src}
+ }
+ entries = []string{}
+ var runes [][]rune
+ lastClass := 0
+ class := 0
+ // split into fields based on class of unicode character
+ for _, r := range src {
+ switch true {
+ case unicode.IsLower(r):
+ class = 1
+ case unicode.IsUpper(r):
+ class = 2
+ case unicode.IsDigit(r):
+ class = 3
+ default:
+ class = 4
+ }
+ if class == lastClass {
+ runes[len(runes)-1] = append(runes[len(runes)-1], r)
+ } else {
+ runes = append(runes, []rune{r})
+ }
+ lastClass = class
+ }
+ // handle upper case -> lower case sequences, e.g.
+ // "PDFL", "oader" -> "PDF", "Loader"
+ for i := 0; i < len(runes)-1; i++ {
+ if unicode.IsUpper(runes[i][0]) && unicode.IsLower(runes[i+1][0]) {
+ runes[i+1] = append([]rune{runes[i][len(runes[i])-1]}, runes[i+1]...)
+ runes[i] = runes[i][:len(runes[i])-1]
+ }
+ }
+ // construct []string from results
+ for _, s := range runes {
+ if len(s) > 0 {
+ entries = append(entries, string(s))
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE
new file mode 100644
index 000000000..f21e54080
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2012 fsnotify Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md
new file mode 100644
index 000000000..399320741
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/README.md
@@ -0,0 +1,79 @@
+# File system notifications for Go
+
+[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
+
+fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
+
+```console
+go get -u golang.org/x/sys/...
+```
+
+Cross platform: Windows, Linux, BSD and macOS.
+
+|Adapter |OS |Status |
+|----------|----------|----------|
+|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
+|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
+|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
+|fanotify |Linux 2.6.37+ | |
+|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
+|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
+
+\* Android and iOS are untested.
+
+Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
+
+## API stability
+
+fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
+
+All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
+
+Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
+
+## Contributing
+
+Please refer to [CONTRIBUTING][] before opening an issue or pull request.
+
+## Example
+
+See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
+
+## FAQ
+
+**When a file is moved to another directory is it still being watched?**
+
+No (it shouldn't be, unless you are watching where it was moved to).
+
+**When I watch a directory, are all subdirectories watched as well?**
+
+No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
+
+**Do I have to watch the Error and Event channels in a separate goroutine?**
+
+As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
+
+**Why am I receiving multiple events for the same file on OS X?**
+
+Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
+
+**How many files can be watched at once?**
+
+There are OS-specific limits as to how many watches can be created:
+* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
+* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
+
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#18]: https://github.com/fsnotify/fsnotify/issues/18
+[#11]: https://github.com/fsnotify/fsnotify/issues/11
+[#7]: https://github.com/howeyc/fsnotify/issues/7
+
+[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
+
+## Related Projects
+
+* [notify](https://github.com/rjeczalik/notify)
+* [fsevents](https://github.com/fsnotify/fsevents)
+
diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go
new file mode 100644
index 000000000..ced39cb88
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fen.go
@@ -0,0 +1,37 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build solaris
+
+package fsnotify
+
+import (
+ "errors"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ return nil
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ return nil
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
new file mode 100644
index 000000000..190bf0de5
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
@@ -0,0 +1,66 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+// Package fsnotify provides a platform-independent interface for file system notifications.
+package fsnotify
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+// Event represents a single file system notification.
+type Event struct {
+ Name string // Relative path to the file or directory.
+ Op Op // File operation that triggered the event.
+}
+
+// Op describes a set of file operations.
+type Op uint32
+
+// These are the generalized file operations that can trigger a notification.
+const (
+ Create Op = 1 << iota
+ Write
+ Remove
+ Rename
+ Chmod
+)
+
+func (op Op) String() string {
+ // Use a buffer for efficient string concatenation
+ var buffer bytes.Buffer
+
+ if op&Create == Create {
+ buffer.WriteString("|CREATE")
+ }
+ if op&Remove == Remove {
+ buffer.WriteString("|REMOVE")
+ }
+ if op&Write == Write {
+ buffer.WriteString("|WRITE")
+ }
+ if op&Rename == Rename {
+ buffer.WriteString("|RENAME")
+ }
+ if op&Chmod == Chmod {
+ buffer.WriteString("|CHMOD")
+ }
+ if buffer.Len() == 0 {
+ return ""
+ }
+ return buffer.String()[1:] // Strip leading pipe
+}
+
+// String returns a string representation of the event in the form
+// "file: REMOVE|WRITE|..."
+func (e Event) String() string {
+ return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
+}
+
+// Common errors that can be reported by a watcher
+var ErrEventOverflow = errors.New("fsnotify queue overflow")
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go
new file mode 100644
index 000000000..bfa9dbc3c
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify.go
@@ -0,0 +1,334 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ mu sync.Mutex // Map access
+ cv *sync.Cond // sync removing on rm_watch with IN_IGNORE
+ fd int
+ poller *fdPoller
+ watches map[string]*watch // Map of inotify watches (key: path)
+ paths map[int]string // Map of watched paths (key: watch descriptor)
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+ doneResp chan struct{} // Channel to respond to Close
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ // Create inotify fd
+ fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
+ if fd == -1 {
+ return nil, errno
+ }
+ // Create epoll
+ poller, err := newFdPoller(fd)
+ if err != nil {
+ unix.Close(fd)
+ return nil, err
+ }
+ w := &Watcher{
+ fd: fd,
+ poller: poller,
+ watches: make(map[string]*watch),
+ paths: make(map[int]string),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan struct{}),
+ doneResp: make(chan struct{}),
+ }
+ w.cv = sync.NewCond(&w.mu)
+
+ go w.readEvents()
+ return w, nil
+}
+
+func (w *Watcher) isClosed() bool {
+ select {
+ case <-w.done:
+ return true
+ default:
+ return false
+ }
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed() {
+ return nil
+ }
+
+ // Send 'close' signal to goroutine, and set the Watcher to closed.
+ close(w.done)
+
+ // Wake up goroutine
+ w.poller.wake()
+
+ // Wait for goroutine to close
+ <-w.doneResp
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ name = filepath.Clean(name)
+ if w.isClosed() {
+ return errors.New("inotify instance already closed")
+ }
+
+ const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
+ unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
+ unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
+
+ var flags uint32 = agnosticEvents
+
+ w.mu.Lock()
+ watchEntry, found := w.watches[name]
+ w.mu.Unlock()
+ if found {
+ watchEntry.flags |= flags
+ flags |= unix.IN_MASK_ADD
+ }
+ wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
+ if wd == -1 {
+ return errno
+ }
+
+ w.mu.Lock()
+ w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+ w.paths[wd] = name
+ w.mu.Unlock()
+
+ return nil
+}
+
+// Remove stops watching the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+
+ // Fetch the watch.
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ watch, ok := w.watches[name]
+
+ // Remove it from inotify.
+ if !ok {
+ return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
+ }
+ // inotify_rm_watch will return EINVAL if the file has been deleted;
+ // the inotify will already have been removed.
+ // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
+ // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
+ // so that EINVAL means that the wd is being rm_watch()ed or its file removed
+ // by another thread and we have not received IN_IGNORE event.
+ success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
+ if success == -1 {
+ // TODO: Perhaps it's not helpful to return an error here in every case.
+ // the only two possible errors are:
+ // EBADF, which happens when w.fd is not a valid file descriptor of any kind.
+ // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
+ // Watch descriptors are invalidated when they are removed explicitly or implicitly;
+ // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
+ return errno
+ }
+
+ // wait until ignoreLinux() deleting maps
+ exists := true
+ for exists {
+ w.cv.Wait()
+ _, exists = w.watches[name]
+ }
+
+ return nil
+}
+
+type watch struct {
+ wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+ flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+}
+
+// readEvents reads from the inotify file descriptor, converts the
+// received events into Event objects and sends them via the Events channel
+func (w *Watcher) readEvents() {
+ var (
+ buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
+ n int // Number of bytes read with read()
+ errno error // Syscall errno
+ ok bool // For poller.wait
+ )
+
+ defer close(w.doneResp)
+ defer close(w.Errors)
+ defer close(w.Events)
+ defer unix.Close(w.fd)
+ defer w.poller.close()
+
+ for {
+ // See if we have been closed.
+ if w.isClosed() {
+ return
+ }
+
+ ok, errno = w.poller.wait()
+ if errno != nil {
+ select {
+ case w.Errors <- errno:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ if !ok {
+ continue
+ }
+
+ n, errno = unix.Read(w.fd, buf[:])
+ // If a signal interrupted execution, see if we've been asked to close, and try again.
+ // http://man7.org/linux/man-pages/man7/signal.7.html :
+ // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
+ if errno == unix.EINTR {
+ continue
+ }
+
+ // unix.Read might have been woken up by Close. If so, we're done.
+ if w.isClosed() {
+ return
+ }
+
+ if n < unix.SizeofInotifyEvent {
+ var err error
+ if n == 0 {
+ // If EOF is received. This should really never happen.
+ err = io.EOF
+ } else if n < 0 {
+ // If an error occurred while reading.
+ err = errno
+ } else {
+ // Read was too short.
+ err = errors.New("notify: short read in readEvents()")
+ }
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ var offset uint32
+ // We don't know how many events we just read into the buffer
+ // While the offset points to at least one whole event...
+ for offset <= uint32(n-unix.SizeofInotifyEvent) {
+ // Point "raw" to the event in the buffer
+ raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
+
+ mask := uint32(raw.Mask)
+ nameLen := uint32(raw.Len)
+
+ if mask&unix.IN_Q_OVERFLOW != 0 {
+ select {
+ case w.Errors <- ErrEventOverflow:
+ case <-w.done:
+ return
+ }
+ }
+
+ // If the event happened to the watched directory or the watched file, the kernel
+ // doesn't append the filename to the event, but we would like to always fill the
+ // the "Name" field with a valid filename. We retrieve the path of the watch from
+ // the "paths" map.
+ w.mu.Lock()
+ name := w.paths[int(raw.Wd)]
+ w.mu.Unlock()
+ if nameLen > 0 {
+ // Point "bytes" at the first byte of the filename
+ bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
+ // The filename is padded with NULL bytes. TrimRight() gets rid of those.
+ name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
+ }
+
+ event := newEvent(name, mask)
+
+ // Send the events that are not ignored on the events channel
+ if !event.ignoreLinux(w, raw.Wd, mask) {
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ return
+ }
+ }
+
+ // Move to the next event in the buffer
+ offset += unix.SizeofInotifyEvent + nameLen
+ }
+ }
+}
+
+// Certain types of events can be "ignored" and not sent over the Events
+// channel. Such as events marked ignore by the kernel, or MODIFY events
+// against files that do not exist.
+func (e *Event) ignoreLinux(w *Watcher, wd int32, mask uint32) bool {
+ // Ignore anything the inotify API says to ignore
+ if mask&unix.IN_IGNORED == unix.IN_IGNORED {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ name := w.paths[int(wd)]
+ delete(w.paths, int(wd))
+ delete(w.watches, name)
+ w.cv.Broadcast()
+ return true
+ }
+
+ // If the event is not a DELETE or RENAME, the file must exist.
+ // Otherwise the event is ignored.
+ // *Note*: this was put in place because it was seen that a MODIFY
+ // event was sent after the DELETE. This ignores that MODIFY and
+ // assumes a DELETE will come or has come if the file doesn't exist.
+ if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
+ _, statErr := os.Lstat(e.Name)
+ return os.IsNotExist(statErr)
+ }
+ return false
+}
+
+// newEvent returns an platform-independent Event based on an inotify mask.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
+ e.Op |= Create
+ }
+ if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
+ e.Op |= Remove
+ }
+ if mask&unix.IN_MODIFY == unix.IN_MODIFY {
+ e.Op |= Write
+ }
+ if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
+ e.Op |= Rename
+ }
+ if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
new file mode 100644
index 000000000..cc7db4b22
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
@@ -0,0 +1,187 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+
+ "golang.org/x/sys/unix"
+)
+
+type fdPoller struct {
+ fd int // File descriptor (as returned by the inotify_init() syscall)
+ epfd int // Epoll file descriptor
+ pipe [2]int // Pipe for waking up
+}
+
+func emptyPoller(fd int) *fdPoller {
+ poller := new(fdPoller)
+ poller.fd = fd
+ poller.epfd = -1
+ poller.pipe[0] = -1
+ poller.pipe[1] = -1
+ return poller
+}
+
+// Create a new inotify poller.
+// This creates an inotify handler, and an epoll handler.
+func newFdPoller(fd int) (*fdPoller, error) {
+ var errno error
+ poller := emptyPoller(fd)
+ defer func() {
+ if errno != nil {
+ poller.close()
+ }
+ }()
+ poller.fd = fd
+
+ // Create epoll fd
+ poller.epfd, errno = unix.EpollCreate1(0)
+ if poller.epfd == -1 {
+ return nil, errno
+ }
+ // Create pipe; pipe[0] is the read end, pipe[1] the write end.
+ errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register inotify fd with epoll
+ event := unix.EpollEvent{
+ Fd: int32(poller.fd),
+ Events: unix.EPOLLIN,
+ }
+ errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register pipe fd with epoll
+ event = unix.EpollEvent{
+ Fd: int32(poller.pipe[0]),
+ Events: unix.EPOLLIN,
+ }
+ errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ return poller, nil
+}
+
+// Wait using epoll.
+// Returns true if something is ready to be read,
+// false if there is not.
+func (poller *fdPoller) wait() (bool, error) {
+ // 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
+ // I don't know whether epoll_wait returns the number of events returned,
+ // or the total number of events ready.
+ // I decided to catch both by making the buffer one larger than the maximum.
+ events := make([]unix.EpollEvent, 7)
+ for {
+ n, errno := unix.EpollWait(poller.epfd, events, -1)
+ if n == -1 {
+ if errno == unix.EINTR {
+ continue
+ }
+ return false, errno
+ }
+ if n == 0 {
+ // If there are no events, try again.
+ continue
+ }
+ if n > 6 {
+ // This should never happen. More events were returned than should be possible.
+ return false, errors.New("epoll_wait returned more events than I know what to do with")
+ }
+ ready := events[:n]
+ epollhup := false
+ epollerr := false
+ epollin := false
+ for _, event := range ready {
+ if event.Fd == int32(poller.fd) {
+ if event.Events&unix.EPOLLHUP != 0 {
+ // This should not happen, but if it does, treat it as a wakeup.
+ epollhup = true
+ }
+ if event.Events&unix.EPOLLERR != 0 {
+ // If an error is waiting on the file descriptor, we should pretend
+ // something is ready to read, and let unix.Read pick up the error.
+ epollerr = true
+ }
+ if event.Events&unix.EPOLLIN != 0 {
+ // There is data to read.
+ epollin = true
+ }
+ }
+ if event.Fd == int32(poller.pipe[0]) {
+ if event.Events&unix.EPOLLHUP != 0 {
+ // Write pipe descriptor was closed, by us. This means we're closing down the
+ // watcher, and we should wake up.
+ }
+ if event.Events&unix.EPOLLERR != 0 {
+ // If an error is waiting on the pipe file descriptor.
+ // This is an absolute mystery, and should never ever happen.
+ return false, errors.New("Error on the pipe descriptor.")
+ }
+ if event.Events&unix.EPOLLIN != 0 {
+ // This is a regular wakeup, so we have to clear the buffer.
+ err := poller.clearWake()
+ if err != nil {
+ return false, err
+ }
+ }
+ }
+ }
+
+ if epollhup || epollerr || epollin {
+ return true, nil
+ }
+ return false, nil
+ }
+}
+
+// Close the write end of the poller.
+func (poller *fdPoller) wake() error {
+ buf := make([]byte, 1)
+ n, errno := unix.Write(poller.pipe[1], buf)
+ if n == -1 {
+ if errno == unix.EAGAIN {
+ // Buffer is full, poller will wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+func (poller *fdPoller) clearWake() error {
+ // You have to be woken up a LOT in order to get to 100!
+ buf := make([]byte, 100)
+ n, errno := unix.Read(poller.pipe[0], buf)
+ if n == -1 {
+ if errno == unix.EAGAIN {
+ // Buffer is empty, someone else cleared our wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+// Close all poller file descriptors, but not the one passed to it.
+func (poller *fdPoller) close() {
+ if poller.pipe[1] != -1 {
+ unix.Close(poller.pipe[1])
+ }
+ if poller.pipe[0] != -1 {
+ unix.Close(poller.pipe[0])
+ }
+ if poller.epfd != -1 {
+ unix.Close(poller.epfd)
+ }
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go
new file mode 100644
index 000000000..c2b4acb18
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go
@@ -0,0 +1,503 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly darwin
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ done chan bool // Channel for sending a "quit message" to the reader goroutine
+
+ kq int // File descriptor (as returned by the kqueue() syscall).
+
+ mu sync.Mutex // Protects access to watcher data
+ watches map[string]int // Map of watched file descriptors (key: path).
+ externalWatches map[string]bool // Map of watches added by user of the library.
+ dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
+ paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
+ fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
+ isClosed bool // Set to true when Close() is first called
+}
+
+type pathInfo struct {
+ name string
+ isDir bool
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ kq, err := kqueue()
+ if err != nil {
+ return nil, err
+ }
+
+ w := &Watcher{
+ kq: kq,
+ watches: make(map[string]int),
+ dirFlags: make(map[string]uint32),
+ paths: make(map[int]pathInfo),
+ fileExists: make(map[string]bool),
+ externalWatches: make(map[string]bool),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan bool),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return nil
+ }
+ w.isClosed = true
+ w.mu.Unlock()
+
+ // copy paths to remove while locked
+ w.mu.Lock()
+ var pathsToRemove = make([]string, 0, len(w.watches))
+ for name := range w.watches {
+ pathsToRemove = append(pathsToRemove, name)
+ }
+ w.mu.Unlock()
+ // unlock before calling Remove, which also locks
+
+ var err error
+ for _, name := range pathsToRemove {
+ if e := w.Remove(name); e != nil && err == nil {
+ err = e
+ }
+ }
+
+ // Send "quit" message to the reader goroutine:
+ w.done <- true
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ w.mu.Lock()
+ w.externalWatches[name] = true
+ w.mu.Unlock()
+ _, err := w.addWatch(name, noteAllEvents)
+ return err
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+ w.mu.Lock()
+ watchfd, ok := w.watches[name]
+ w.mu.Unlock()
+ if !ok {
+ return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
+ }
+
+ const registerRemove = unix.EV_DELETE
+ if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
+ return err
+ }
+
+ unix.Close(watchfd)
+
+ w.mu.Lock()
+ isDir := w.paths[watchfd].isDir
+ delete(w.watches, name)
+ delete(w.paths, watchfd)
+ delete(w.dirFlags, name)
+ w.mu.Unlock()
+
+ // Find all watched paths that are in this directory that are not external.
+ if isDir {
+ var pathsToRemove []string
+ w.mu.Lock()
+ for _, path := range w.paths {
+ wdir, _ := filepath.Split(path.name)
+ if filepath.Clean(wdir) == name {
+ if !w.externalWatches[path.name] {
+ pathsToRemove = append(pathsToRemove, path.name)
+ }
+ }
+ }
+ w.mu.Unlock()
+ for _, name := range pathsToRemove {
+ // Since these are internal, not much sense in propagating error
+ // to the user, as that will just confuse them with an error about
+ // a path they did not explicitly watch themselves.
+ w.Remove(name)
+ }
+ }
+
+ return nil
+}
+
+// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
+const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
+
+// keventWaitTime to block on each read from kevent
+var keventWaitTime = durationToTimespec(100 * time.Millisecond)
+
+// addWatch adds name to the watched file set.
+// The flags are interpreted as described in kevent(2).
+// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
+func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
+ var isDir bool
+ // Make ./name and name equivalent
+ name = filepath.Clean(name)
+
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return "", errors.New("kevent instance already closed")
+ }
+ watchfd, alreadyWatching := w.watches[name]
+ // We already have a watch, but we can still override flags.
+ if alreadyWatching {
+ isDir = w.paths[watchfd].isDir
+ }
+ w.mu.Unlock()
+
+ if !alreadyWatching {
+ fi, err := os.Lstat(name)
+ if err != nil {
+ return "", err
+ }
+
+ // Don't watch sockets.
+ if fi.Mode()&os.ModeSocket == os.ModeSocket {
+ return "", nil
+ }
+
+ // Don't watch named pipes.
+ if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
+ return "", nil
+ }
+
+ // Follow Symlinks
+ // Unfortunately, Linux can add bogus symlinks to watch list without
+ // issue, and Windows can't do symlinks period (AFAIK). To maintain
+ // consistency, we will act like everything is fine. There will simply
+ // be no file events for broken symlinks.
+ // Hence the returns of nil on errors.
+ if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
+ name, err = filepath.EvalSymlinks(name)
+ if err != nil {
+ return "", nil
+ }
+
+ w.mu.Lock()
+ _, alreadyWatching = w.watches[name]
+ w.mu.Unlock()
+
+ if alreadyWatching {
+ return name, nil
+ }
+
+ fi, err = os.Lstat(name)
+ if err != nil {
+ return "", nil
+ }
+ }
+
+ watchfd, err = unix.Open(name, openMode, 0700)
+ if watchfd == -1 {
+ return "", err
+ }
+
+ isDir = fi.IsDir()
+ }
+
+ const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
+ if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
+ unix.Close(watchfd)
+ return "", err
+ }
+
+ if !alreadyWatching {
+ w.mu.Lock()
+ w.watches[name] = watchfd
+ w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
+ w.mu.Unlock()
+ }
+
+ if isDir {
+ // Watch the directory if it has not been watched before,
+ // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+ w.mu.Lock()
+
+ watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
+ (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
+ // Store flags so this watch can be updated later
+ w.dirFlags[name] = flags
+ w.mu.Unlock()
+
+ if watchDir {
+ if err := w.watchDirectoryFiles(name); err != nil {
+ return "", err
+ }
+ }
+ }
+ return name, nil
+}
+
+// readEvents reads from kqueue and converts the received kevents into
+// Event values that it sends down the Events channel.
+func (w *Watcher) readEvents() {
+ eventBuffer := make([]unix.Kevent_t, 10)
+
+ for {
+ // See if there is a message on the "done" channel
+ select {
+ case <-w.done:
+ err := unix.Close(w.kq)
+ if err != nil {
+ w.Errors <- err
+ }
+ close(w.Events)
+ close(w.Errors)
+ return
+ default:
+ }
+
+ // Get new events
+ kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
+ // EINTR is okay, the syscall was interrupted before timeout expired.
+ if err != nil && err != unix.EINTR {
+ w.Errors <- err
+ continue
+ }
+
+ // Flush the events we received to the Events channel
+ for len(kevents) > 0 {
+ kevent := &kevents[0]
+ watchfd := int(kevent.Ident)
+ mask := uint32(kevent.Fflags)
+ w.mu.Lock()
+ path := w.paths[watchfd]
+ w.mu.Unlock()
+ event := newEvent(path.name, mask)
+
+ if path.isDir && !(event.Op&Remove == Remove) {
+ // Double check to make sure the directory exists. This can happen when
+ // we do a rm -fr on a recursively watched folders and we receive a
+ // modification event first but the folder has been deleted and later
+ // receive the delete event
+ if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
+ // mark is as delete event
+ event.Op |= Remove
+ }
+ }
+
+ if event.Op&Rename == Rename || event.Op&Remove == Remove {
+ w.Remove(event.Name)
+ w.mu.Lock()
+ delete(w.fileExists, event.Name)
+ w.mu.Unlock()
+ }
+
+ if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
+ w.sendDirectoryChangeEvents(event.Name)
+ } else {
+ // Send the event on the Events channel
+ w.Events <- event
+ }
+
+ if event.Op&Remove == Remove {
+ // Look for a file that may have overwritten this.
+ // For example, mv f1 f2 will delete f2, then create f2.
+ if path.isDir {
+ fileDir := filepath.Clean(event.Name)
+ w.mu.Lock()
+ _, found := w.watches[fileDir]
+ w.mu.Unlock()
+ if found {
+ // make sure the directory exists before we watch for changes. When we
+ // do a recursive watch and perform rm -fr, the parent directory might
+ // have gone missing, ignore the missing directory and let the
+ // upcoming delete event remove the watch from the parent directory.
+ if _, err := os.Lstat(fileDir); err == nil {
+ w.sendDirectoryChangeEvents(fileDir)
+ }
+ }
+ } else {
+ filePath := filepath.Clean(event.Name)
+ if fileInfo, err := os.Lstat(filePath); err == nil {
+ w.sendFileCreatedEventIfNew(filePath, fileInfo)
+ }
+ }
+ }
+
+ // Move to next event
+ kevents = kevents[1:]
+ }
+ }
+}
+
+// newEvent returns an platform-independent Event based on kqueue Fflags.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
+ e.Op |= Remove
+ }
+ if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
+ e.Op |= Write
+ }
+ if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
+ e.Op |= Rename
+ }
+ if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+func newCreateEvent(name string) Event {
+ return Event{Name: name, Op: Create}
+}
+
+// watchDirectoryFiles to mimic inotify when adding a watch on a directory
+func (w *Watcher) watchDirectoryFiles(dirPath string) error {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ return err
+ }
+
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ filePath, err = w.internalWatch(filePath, fileInfo)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+ }
+
+ return nil
+}
+
+// sendDirectoryEvents searches the directory for newly created files
+// and sends them over the event channel. This functionality is to have
+// the BSD version of fsnotify match Linux inotify which provides a
+// create event for files created in a watched directory.
+func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ w.Errors <- err
+ }
+
+ // Search for new files
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
+
+ if err != nil {
+ return
+ }
+ }
+}
+
+// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
+func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
+ w.mu.Lock()
+ _, doesExist := w.fileExists[filePath]
+ w.mu.Unlock()
+ if !doesExist {
+ // Send create event
+ w.Events <- newCreateEvent(filePath)
+ }
+
+ // like watchDirectoryFiles (but without doing another ReadDir)
+ filePath, err = w.internalWatch(filePath, fileInfo)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+
+ return nil
+}
+
+func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
+ if fileInfo.IsDir() {
+ // mimic Linux providing delete events for subdirectories
+ // but preserve the flags used if currently watching subdirectory
+ w.mu.Lock()
+ flags := w.dirFlags[name]
+ w.mu.Unlock()
+
+ flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
+ return w.addWatch(name, flags)
+ }
+
+ // watch file to mimic Linux inotify
+ return w.addWatch(name, noteAllEvents)
+}
+
+// kqueue creates a new kernel event queue and returns a descriptor.
+func kqueue() (kq int, err error) {
+ kq, err = unix.Kqueue()
+ if kq == -1 {
+ return kq, err
+ }
+ return kq, nil
+}
+
+// register events with the queue
+func register(kq int, fds []int, flags int, fflags uint32) error {
+ changes := make([]unix.Kevent_t, len(fds))
+
+ for i, fd := range fds {
+ // SetKevent converts int to the platform-specific types:
+ unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
+ changes[i].Fflags = fflags
+ }
+
+ // register the events
+ success, err := unix.Kevent(kq, changes, nil, nil)
+ if success == -1 {
+ return err
+ }
+ return nil
+}
+
+// read retrieves pending events, or waits until an event occurs.
+// A timeout of nil blocks indefinitely, while 0 polls the queue.
+func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
+ n, err := unix.Kevent(kq, nil, events, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return events[0:n], nil
+}
+
+// durationToTimespec prepares a timeout value
+func durationToTimespec(d time.Duration) unix.Timespec {
+ return unix.NsecToTimespec(d.Nanoseconds())
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
new file mode 100644
index 000000000..7d8de1451
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+const openMode = unix.O_NONBLOCK | unix.O_RDONLY
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
new file mode 100644
index 000000000..9139e1716
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
@@ -0,0 +1,12 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+// note: this constant is not defined on BSD
+const openMode = unix.O_EVTONLY
diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go
new file mode 100644
index 000000000..09436f31d
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/windows.go
@@ -0,0 +1,561 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ isClosed bool // Set to true when Close() is first called
+ mu sync.Mutex // Map access
+ port syscall.Handle // Handle to completion port
+ watches watchMap // Map of watches (key: i-number)
+ input chan *input // Inputs to the reader are sent on this channel
+ quit chan chan<- error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ w := &Watcher{
+ port: port,
+ watches: make(watchMap),
+ input: make(chan *input, 1),
+ Events: make(chan Event, 50),
+ Errors: make(chan error),
+ quit: make(chan chan<- error, 1),
+ }
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed {
+ return nil
+ }
+ w.isClosed = true
+
+ // Send "quit" message to the reader goroutine
+ ch := make(chan error)
+ w.quit <- ch
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-ch
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ if w.isClosed {
+ return errors.New("watcher already closed")
+ }
+ in := &input{
+ op: opAddWatch,
+ path: filepath.Clean(name),
+ flags: sysFSALLEVENTS,
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ in := &input{
+ op: opRemoveWatch,
+ path: filepath.Clean(name),
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+const (
+ // Options for AddWatch
+ sysFSONESHOT = 0x80000000
+ sysFSONLYDIR = 0x1000000
+
+ // Events
+ sysFSACCESS = 0x1
+ sysFSALLEVENTS = 0xfff
+ sysFSATTRIB = 0x4
+ sysFSCLOSE = 0x18
+ sysFSCREATE = 0x100
+ sysFSDELETE = 0x200
+ sysFSDELETESELF = 0x400
+ sysFSMODIFY = 0x2
+ sysFSMOVE = 0xc0
+ sysFSMOVEDFROM = 0x40
+ sysFSMOVEDTO = 0x80
+ sysFSMOVESELF = 0x800
+
+ // Special events
+ sysFSIGNORED = 0x8000
+ sysFSQOVERFLOW = 0x4000
+)
+
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
+ e.Op |= Create
+ }
+ if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
+ e.Op |= Remove
+ }
+ if mask&sysFSMODIFY == sysFSMODIFY {
+ e.Op |= Write
+ }
+ if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
+ e.Op |= Rename
+ }
+ if mask&sysFSATTRIB == sysFSATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+const (
+ opAddWatch = iota
+ opRemoveWatch
+)
+
+const (
+ provisional uint64 = 1 << (32 + iota)
+)
+
+type input struct {
+ op int
+ path string
+ flags uint32
+ reply chan error
+}
+
+type inode struct {
+ handle syscall.Handle
+ volume uint32
+ index uint64
+}
+
+type watch struct {
+ ov syscall.Overlapped
+ ino *inode // i-number
+ path string // Directory path
+ mask uint64 // Directory itself is being watched with these notify flags
+ names map[string]uint64 // Map of names being watched and their notify flags
+ rename string // Remembers the old name while renaming a file
+ buf [4096]byte
+}
+
+type indexMap map[uint64]*watch
+type watchMap map[uint32]indexMap
+
+func (w *Watcher) wakeupReader() error {
+ e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
+ if e != nil {
+ return os.NewSyscallError("PostQueuedCompletionStatus", e)
+ }
+ return nil
+}
+
+func getDir(pathname string) (dir string, err error) {
+ attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
+ if e != nil {
+ return "", os.NewSyscallError("GetFileAttributes", e)
+ }
+ if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
+ dir = pathname
+ } else {
+ dir, _ = filepath.Split(pathname)
+ dir = filepath.Clean(dir)
+ }
+ return
+}
+
+func getIno(path string) (ino *inode, err error) {
+ h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
+ syscall.FILE_LIST_DIRECTORY,
+ syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+ nil, syscall.OPEN_EXISTING,
+ syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateFile", e)
+ }
+ var fi syscall.ByHandleFileInformation
+ if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
+ syscall.CloseHandle(h)
+ return nil, os.NewSyscallError("GetFileInformationByHandle", e)
+ }
+ ino = &inode{
+ handle: h,
+ volume: fi.VolumeSerialNumber,
+ index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
+ }
+ return ino, nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) get(ino *inode) *watch {
+ if i := m[ino.volume]; i != nil {
+ return i[ino.index]
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) set(ino *inode, watch *watch) {
+ i := m[ino.volume]
+ if i == nil {
+ i = make(indexMap)
+ m[ino.volume] = i
+ }
+ i[ino.index] = watch
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) addWatch(pathname string, flags uint64) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ if flags&sysFSONLYDIR != 0 && pathname != dir {
+ return nil
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watchEntry := w.watches.get(ino)
+ w.mu.Unlock()
+ if watchEntry == nil {
+ if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
+ syscall.CloseHandle(ino.handle)
+ return os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ watchEntry = &watch{
+ ino: ino,
+ path: dir,
+ names: make(map[string]uint64),
+ }
+ w.mu.Lock()
+ w.watches.set(ino, watchEntry)
+ w.mu.Unlock()
+ flags |= provisional
+ } else {
+ syscall.CloseHandle(ino.handle)
+ }
+ if pathname == dir {
+ watchEntry.mask |= flags
+ } else {
+ watchEntry.names[filepath.Base(pathname)] |= flags
+ }
+ if err = w.startRead(watchEntry); err != nil {
+ return err
+ }
+ if pathname == dir {
+ watchEntry.mask &= ^provisional
+ } else {
+ watchEntry.names[filepath.Base(pathname)] &= ^provisional
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) remWatch(pathname string) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watch := w.watches.get(ino)
+ w.mu.Unlock()
+ if watch == nil {
+ return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
+ }
+ if pathname == dir {
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ watch.mask = 0
+ } else {
+ name := filepath.Base(pathname)
+ w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
+ delete(watch.names, name)
+ }
+ return w.startRead(watch)
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) deleteWatch(watch *watch) {
+ for name, mask := range watch.names {
+ if mask&provisional == 0 {
+ w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
+ }
+ delete(watch.names, name)
+ }
+ if watch.mask != 0 {
+ if watch.mask&provisional == 0 {
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ }
+ watch.mask = 0
+ }
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) startRead(watch *watch) error {
+ if e := syscall.CancelIo(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CancelIo", e)
+ w.deleteWatch(watch)
+ }
+ mask := toWindowsFlags(watch.mask)
+ for _, m := range watch.names {
+ mask |= toWindowsFlags(m)
+ }
+ if mask == 0 {
+ if e := syscall.CloseHandle(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CloseHandle", e)
+ }
+ w.mu.Lock()
+ delete(w.watches[watch.ino.volume], watch.ino.index)
+ w.mu.Unlock()
+ return nil
+ }
+ e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
+ uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
+ if e != nil {
+ err := os.NewSyscallError("ReadDirectoryChanges", e)
+ if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
+ // Watched directory was probably removed
+ if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
+ if watch.mask&sysFSONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ err = nil
+ }
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ return err
+ }
+ return nil
+}
+
+// readEvents reads from the I/O completion port, converts the
+// received events into Event objects and sends them via the Events channel.
+// Entry point to the I/O thread.
+func (w *Watcher) readEvents() {
+ var (
+ n, key uint32
+ ov *syscall.Overlapped
+ )
+ runtime.LockOSThread()
+
+ for {
+ e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
+ watch := (*watch)(unsafe.Pointer(ov))
+
+ if watch == nil {
+ select {
+ case ch := <-w.quit:
+ w.mu.Lock()
+ var indexes []indexMap
+ for _, index := range w.watches {
+ indexes = append(indexes, index)
+ }
+ w.mu.Unlock()
+ for _, index := range indexes {
+ for _, watch := range index {
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ }
+ }
+ var err error
+ if e := syscall.CloseHandle(w.port); e != nil {
+ err = os.NewSyscallError("CloseHandle", e)
+ }
+ close(w.Events)
+ close(w.Errors)
+ ch <- err
+ return
+ case in := <-w.input:
+ switch in.op {
+ case opAddWatch:
+ in.reply <- w.addWatch(in.path, uint64(in.flags))
+ case opRemoveWatch:
+ in.reply <- w.remWatch(in.path)
+ }
+ default:
+ }
+ continue
+ }
+
+ switch e {
+ case syscall.ERROR_MORE_DATA:
+ if watch == nil {
+ w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
+ } else {
+ // The i/o succeeded but the buffer is full.
+ // In theory we should be building up a full packet.
+ // In practice we can get away with just carrying on.
+ n = uint32(unsafe.Sizeof(watch.buf))
+ }
+ case syscall.ERROR_ACCESS_DENIED:
+ // Watched directory was probably removed
+ w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ continue
+ case syscall.ERROR_OPERATION_ABORTED:
+ // CancelIo was called on this handle
+ continue
+ default:
+ w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
+ continue
+ case nil:
+ }
+
+ var offset uint32
+ for {
+ if n == 0 {
+ w.Events <- newEvent("", sysFSQOVERFLOW)
+ w.Errors <- errors.New("short read in readEvents()")
+ break
+ }
+
+ // Point "raw" to the event in the buffer
+ raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
+ buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
+ name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
+ fullname := filepath.Join(watch.path, name)
+
+ var mask uint64
+ switch raw.Action {
+ case syscall.FILE_ACTION_REMOVED:
+ mask = sysFSDELETESELF
+ case syscall.FILE_ACTION_MODIFIED:
+ mask = sysFSMODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ watch.rename = name
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ if watch.names[watch.rename] != 0 {
+ watch.names[name] |= watch.names[watch.rename]
+ delete(watch.names, watch.rename)
+ mask = sysFSMOVESELF
+ }
+ }
+
+ sendNameEvent := func() {
+ if w.sendEvent(fullname, watch.names[name]&mask) {
+ if watch.names[name]&sysFSONESHOT != 0 {
+ delete(watch.names, name)
+ }
+ }
+ }
+ if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ sendNameEvent()
+ }
+ if raw.Action == syscall.FILE_ACTION_REMOVED {
+ w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
+ delete(watch.names, name)
+ }
+ if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
+ if watch.mask&sysFSONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ fullname = filepath.Join(watch.path, watch.rename)
+ sendNameEvent()
+ }
+
+ // Move to the next event in the buffer
+ if raw.NextEntryOffset == 0 {
+ break
+ }
+ offset += raw.NextEntryOffset
+
+ // Error!
+ if offset >= n {
+ w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
+ break
+ }
+ }
+
+ if err := w.startRead(watch); err != nil {
+ w.Errors <- err
+ }
+ }
+}
+
+func (w *Watcher) sendEvent(name string, mask uint64) bool {
+ if mask == 0 {
+ return false
+ }
+ event := newEvent(name, uint32(mask))
+ select {
+ case ch := <-w.quit:
+ w.quit <- ch
+ case w.Events <- event:
+ }
+ return true
+}
+
+func toWindowsFlags(mask uint64) uint32 {
+ var m uint32
+ if mask&sysFSACCESS != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
+ }
+ if mask&sysFSMODIFY != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
+ }
+ if mask&sysFSATTRIB != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
+ }
+ if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
+ }
+ return m
+}
+
+func toFSnotifyFlags(action uint32) uint64 {
+ switch action {
+ case syscall.FILE_ACTION_ADDED:
+ return sysFSCREATE
+ case syscall.FILE_ACTION_REMOVED:
+ return sysFSDELETE
+ case syscall.FILE_ACTION_MODIFIED:
+ return sysFSMODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ return sysFSMOVEDFROM
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ return sysFSMOVEDTO
+ }
+ return 0
+}
diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE
new file mode 100644
index 000000000..7805d36de
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/LICENSE
@@ -0,0 +1,50 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Sam Ghods
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md
new file mode 100644
index 000000000..fff09b619
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/README.md
@@ -0,0 +1,120 @@
+# YAML marshaling and unmarshaling support for Go
+
+[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
+
+## Introduction
+
+A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
+
+In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
+
+## Compatibility
+
+This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
+
+## Caveats
+
+**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
+
+```
+BAD:
+ exampleKey: !!binary gIGC
+
+GOOD:
+ exampleKey: gIGC
+... and decode the base64 data in your code.
+```
+
+**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
+
+## Installation and usage
+
+To install, run:
+
+```
+$ go get github.com/ghodss/yaml
+```
+
+And import using:
+
+```
+import "github.com/ghodss/yaml"
+```
+
+Usage is very similar to the JSON library:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+)
+
+type Person struct {
+ Name string `json:"name"` // Affects YAML field names too.
+ Age int `json:"age"`
+}
+
+func main() {
+ // Marshal a Person struct to YAML.
+ p := Person{"John", 30}
+ y, err := yaml.Marshal(p)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(y))
+ /* Output:
+ age: 30
+ name: John
+ */
+
+ // Unmarshal the YAML back into a Person struct.
+ var p2 Person
+ err = yaml.Unmarshal(y, &p2)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(p2)
+ /* Output:
+ {John 30}
+ */
+}
+```
+
+`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+)
+func main() {
+ j := []byte(`{"name": "John", "age": 30}`)
+ y, err := yaml.JSONToYAML(j)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(y))
+ /* Output:
+ name: John
+ age: 30
+ */
+ j2, err := yaml.YAMLToJSON(y)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(j2))
+ /* Output:
+ {"age":30,"name":"John"}
+ */
+}
+```
diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go
new file mode 100644
index 000000000..586007402
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/fields.go
@@ -0,0 +1,501 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package yaml
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ if v.CanSet() {
+ v.Set(reflect.New(v.Type().Elem()))
+ } else {
+ v = reflect.New(v.Type().Elem())
+ }
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(json.Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string
+ nameBytes []byte // []byte(name)
+ equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+ tag bool
+ index []int
+ typ reflect.Type
+ omitEmpty bool
+ quoted bool
+}
+
+func fillField(f field) field {
+ f.nameBytes = []byte(f.name)
+ f.equalFold = foldFunc(f.nameBytes)
+ return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" { // unexported
+ continue
+ }
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, fillField(field{
+ name: name,
+ tag: tagged,
+ index: index,
+ typ: ft,
+ omitEmpty: opts.Contains("omitempty"),
+ quoted: opts.Contains("string"),
+ }))
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+const (
+ caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
+ kelvin = '\u212a'
+ smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+// * S maps to s and to U+017F 'ſ' Latin small letter long s
+// * k maps to K and to U+212A 'K' Kelvin sign
+// See http://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+ nonLetter := false
+ special := false // special letter
+ for _, b := range s {
+ if b >= utf8.RuneSelf {
+ return bytes.EqualFold
+ }
+ upper := b & caseMask
+ if upper < 'A' || upper > 'Z' {
+ nonLetter = true
+ } else if upper == 'K' || upper == 'S' {
+ // See above for why these letters are special.
+ special = true
+ }
+ }
+ if special {
+ return equalFoldRight
+ }
+ if nonLetter {
+ return asciiEqualFold
+ }
+ return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+ for _, sb := range s {
+ if len(t) == 0 {
+ return false
+ }
+ tb := t[0]
+ if tb < utf8.RuneSelf {
+ if sb != tb {
+ sbUpper := sb & caseMask
+ if 'A' <= sbUpper && sbUpper <= 'Z' {
+ if sbUpper != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ t = t[1:]
+ continue
+ }
+ // sb is ASCII and t is not. t must be either kelvin
+ // sign or long s; sb must be s, S, k, or K.
+ tr, size := utf8.DecodeRune(t)
+ switch sb {
+ case 's', 'S':
+ if tr != smallLongEss {
+ return false
+ }
+ case 'k', 'K':
+ if tr != kelvin {
+ return false
+ }
+ default:
+ return false
+ }
+ t = t[size:]
+
+ }
+ if len(t) > 0 {
+ return false
+ }
+ return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, sb := range s {
+ tb := t[i]
+ if sb == tb {
+ continue
+ }
+ if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+ if sb&caseMask != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, b := range s {
+ if b&caseMask != t[i]&caseMask {
+ return false
+ }
+ }
+ return true
+}
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go
new file mode 100644
index 000000000..4fb4054a8
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/yaml.go
@@ -0,0 +1,277 @@
+package yaml
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "gopkg.in/yaml.v2"
+)
+
+// Marshals the object into JSON then converts JSON to YAML and returns the
+// YAML.
+func Marshal(o interface{}) ([]byte, error) {
+ j, err := json.Marshal(o)
+ if err != nil {
+ return nil, fmt.Errorf("error marshaling into JSON: %v", err)
+ }
+
+ y, err := JSONToYAML(j)
+ if err != nil {
+ return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
+ }
+
+ return y, nil
+}
+
+// Converts YAML to JSON then uses JSON to unmarshal into an object.
+func Unmarshal(y []byte, o interface{}) error {
+ vo := reflect.ValueOf(o)
+ j, err := yamlToJSON(y, &vo)
+ if err != nil {
+ return fmt.Errorf("error converting YAML to JSON: %v", err)
+ }
+
+ err = json.Unmarshal(j, o)
+ if err != nil {
+ return fmt.Errorf("error unmarshaling JSON: %v", err)
+ }
+
+ return nil
+}
+
+// Convert JSON to YAML.
+func JSONToYAML(j []byte) ([]byte, error) {
+ // Convert the JSON to an object.
+ var jsonObj interface{}
+ // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
+ // Go JSON library doesn't try to pick the right number type (int, float,
+ // etc.) when unmarshalling to interface{}, it just picks float64
+ // universally. go-yaml does go through the effort of picking the right
+ // number type, so we can preserve number type throughout this process.
+ err := yaml.Unmarshal(j, &jsonObj)
+ if err != nil {
+ return nil, err
+ }
+
+ // Marshal this object into YAML.
+ return yaml.Marshal(jsonObj)
+}
+
+// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
+// this method should be a no-op.
+//
+// Things YAML can do that are not supported by JSON:
+// * In YAML you can have binary and null keys in your maps. These are invalid
+// in JSON. (int and float keys are converted to strings.)
+// * Binary data in YAML with the !!binary tag is not supported. If you want to
+// use binary data with this library, encode the data as base64 as usual but do
+// not use the !!binary tag in your YAML. This will ensure the original base64
+// encoded data makes it all the way through to the JSON.
+func YAMLToJSON(y []byte) ([]byte, error) {
+ return yamlToJSON(y, nil)
+}
+
+func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
+ // Convert the YAML to an object.
+ var yamlObj interface{}
+ err := yaml.Unmarshal(y, &yamlObj)
+ if err != nil {
+ return nil, err
+ }
+
+ // YAML objects are not completely compatible with JSON objects (e.g. you
+ // can have non-string keys in YAML). So, convert the YAML-compatible object
+ // to a JSON-compatible object, failing with an error if irrecoverable
+ // incompatibilties happen along the way.
+ jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
+ if err != nil {
+ return nil, err
+ }
+
+ // Convert this object to JSON and return the data.
+ return json.Marshal(jsonObj)
+}
+
+func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
+ var err error
+
+ // Resolve jsonTarget to a concrete value (i.e. not a pointer or an
+ // interface). We pass decodingNull as false because we're not actually
+ // decoding into the value, we're just checking if the ultimate target is a
+ // string.
+ if jsonTarget != nil {
+ ju, tu, pv := indirect(*jsonTarget, false)
+ // We have a JSON or Text Umarshaler at this level, so we can't be trying
+ // to decode into a string.
+ if ju != nil || tu != nil {
+ jsonTarget = nil
+ } else {
+ jsonTarget = &pv
+ }
+ }
+
+ // If yamlObj is a number or a boolean, check if jsonTarget is a string -
+ // if so, coerce. Else return normal.
+ // If yamlObj is a map or array, find the field that each key is
+ // unmarshaling to, and when you recurse pass the reflect.Value for that
+ // field back into this function.
+ switch typedYAMLObj := yamlObj.(type) {
+ case map[interface{}]interface{}:
+ // JSON does not support arbitrary keys in a map, so we must convert
+ // these keys to strings.
+ //
+ // From my reading of go-yaml v2 (specifically the resolve function),
+ // keys can only have the types string, int, int64, float64, binary
+ // (unsupported), or null (unsupported).
+ strMap := make(map[string]interface{})
+ for k, v := range typedYAMLObj {
+ // Resolve the key to a string first.
+ var keyString string
+ switch typedKey := k.(type) {
+ case string:
+ keyString = typedKey
+ case int:
+ keyString = strconv.Itoa(typedKey)
+ case int64:
+ // go-yaml will only return an int64 as a key if the system
+ // architecture is 32-bit and the key's value is between 32-bit
+ // and 64-bit. Otherwise the key type will simply be int.
+ keyString = strconv.FormatInt(typedKey, 10)
+ case float64:
+ // Stolen from go-yaml to use the same conversion to string as
+ // the go-yaml library uses to convert float to string when
+ // Marshaling.
+ s := strconv.FormatFloat(typedKey, 'g', -1, 32)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ keyString = s
+ case bool:
+ if typedKey {
+ keyString = "true"
+ } else {
+ keyString = "false"
+ }
+ default:
+ return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
+ reflect.TypeOf(k), k, v)
+ }
+
+ // jsonTarget should be a struct or a map. If it's a struct, find
+ // the field it's going to map to and pass its reflect.Value. If
+ // it's a map, find the element type of the map and pass the
+ // reflect.Value created from that type. If it's neither, just pass
+ // nil - JSON conversion will error for us if it's a real issue.
+ if jsonTarget != nil {
+ t := *jsonTarget
+ if t.Kind() == reflect.Struct {
+ keyBytes := []byte(keyString)
+ // Find the field that the JSON library would use.
+ var f *field
+ fields := cachedTypeFields(t.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, keyBytes) {
+ f = ff
+ break
+ }
+ // Do case-insensitive comparison.
+ if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
+ f = ff
+ }
+ }
+ if f != nil {
+ // Find the reflect.Value of the most preferential
+ // struct field.
+ jtf := t.Field(f.index[0])
+ strMap[keyString], err = convertToJSONableObject(v, &jtf)
+ if err != nil {
+ return nil, err
+ }
+ continue
+ }
+ } else if t.Kind() == reflect.Map {
+ // Create a zero value of the map's element type to use as
+ // the JSON target.
+ jtv := reflect.Zero(t.Type().Elem())
+ strMap[keyString], err = convertToJSONableObject(v, &jtv)
+ if err != nil {
+ return nil, err
+ }
+ continue
+ }
+ }
+ strMap[keyString], err = convertToJSONableObject(v, nil)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return strMap, nil
+ case []interface{}:
+ // We need to recurse into arrays in case there are any
+ // map[interface{}]interface{}'s inside and to convert any
+ // numbers to strings.
+
+ // If jsonTarget is a slice (which it really should be), find the
+ // thing it's going to map to. If it's not a slice, just pass nil
+ // - JSON conversion will error for us if it's a real issue.
+ var jsonSliceElemValue *reflect.Value
+ if jsonTarget != nil {
+ t := *jsonTarget
+ if t.Kind() == reflect.Slice {
+ // By default slices point to nil, but we need a reflect.Value
+ // pointing to a value of the slice type, so we create one here.
+ ev := reflect.Indirect(reflect.New(t.Type().Elem()))
+ jsonSliceElemValue = &ev
+ }
+ }
+
+ // Make and use a new array.
+ arr := make([]interface{}, len(typedYAMLObj))
+ for i, v := range typedYAMLObj {
+ arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return arr, nil
+ default:
+ // If the target type is a string and the YAML type is a number,
+ // convert the YAML type to a string.
+ if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
+ // Based on my reading of go-yaml, it may return int, int64,
+ // float64, or uint64.
+ var s string
+ switch typedVal := typedYAMLObj.(type) {
+ case int:
+ s = strconv.FormatInt(int64(typedVal), 10)
+ case int64:
+ s = strconv.FormatInt(typedVal, 10)
+ case float64:
+ s = strconv.FormatFloat(typedVal, 'g', -1, 32)
+ case uint64:
+ s = strconv.FormatUint(typedVal, 10)
+ case bool:
+ if typedVal {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ }
+ if len(s) > 0 {
+ yamlObj = interface{}(s)
+ }
+ }
+ return yamlObj, nil
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/go-openapi/analysis/LICENSE b/vendor/github.com/go-openapi/analysis/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md
new file mode 100644
index 000000000..d675c49d0
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/README.md
@@ -0,0 +1,6 @@
+# OpenAPI initiative analysis [![Build Status](https://ci.vmware.run/api/badges/go-openapi/analysis/status.svg)](https://ci.vmware.run/go-openapi/analysis) [![Coverage](https://coverage.vmware.run/badges/go-openapi/analysis/coverage.svg)](https://coverage.vmware.run/go-openapi/analysis) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
+
+[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/analysis?status.svg)](http://godoc.org/github.com/go-openapi/analysis)
+
+
+A foundational library to analyze an OAI specification document for easier reasoning about the content. \ No newline at end of file
diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go
new file mode 100644
index 000000000..d388db3a7
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/analyzer.go
@@ -0,0 +1,614 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package analysis
+
+import (
+ "fmt"
+ slashpath "path"
+ "strconv"
+ "strings"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/swag"
+)
+
+type referenceAnalysis struct {
+ schemas map[string]spec.Ref
+ responses map[string]spec.Ref
+ parameters map[string]spec.Ref
+ items map[string]spec.Ref
+ allRefs map[string]spec.Ref
+ referenced struct {
+ schemas map[string]SchemaRef
+ responses map[string]*spec.Response
+ parameters map[string]*spec.Parameter
+ }
+}
+
+func (r *referenceAnalysis) addRef(key string, ref spec.Ref) {
+ r.allRefs["#"+key] = ref
+}
+
+func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items) {
+ r.items["#"+key] = items.Ref
+ r.addRef(key, items.Ref)
+}
+
+func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) {
+ r.schemas["#"+key] = ref.Schema.Ref
+ r.addRef(key, ref.Schema.Ref)
+}
+
+func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) {
+ r.responses["#"+key] = resp.Ref
+ r.addRef(key, resp.Ref)
+}
+
+func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) {
+ r.parameters["#"+key] = param.Ref
+ r.addRef(key, param.Ref)
+}
+
+// New takes a swagger spec object and returns an analyzed spec document.
+// The analyzed document contains a number of indices that make it easier to
+// reason about semantics of a swagger specification for use in code generation
+// or validation etc.
+func New(doc *spec.Swagger) *Spec {
+ a := &Spec{
+ spec: doc,
+ consumes: make(map[string]struct{}, 150),
+ produces: make(map[string]struct{}, 150),
+ authSchemes: make(map[string]struct{}, 150),
+ operations: make(map[string]map[string]*spec.Operation, 150),
+ allSchemas: make(map[string]SchemaRef, 150),
+ allOfs: make(map[string]SchemaRef, 150),
+ references: referenceAnalysis{
+ schemas: make(map[string]spec.Ref, 150),
+ responses: make(map[string]spec.Ref, 150),
+ parameters: make(map[string]spec.Ref, 150),
+ items: make(map[string]spec.Ref, 150),
+ allRefs: make(map[string]spec.Ref, 150),
+ },
+ }
+ a.references.referenced.schemas = make(map[string]SchemaRef, 150)
+ a.references.referenced.responses = make(map[string]*spec.Response, 150)
+ a.references.referenced.parameters = make(map[string]*spec.Parameter, 150)
+ a.initialize()
+ return a
+}
+
+// Spec takes a swagger spec object and turns it into a registry
+// with a bunch of utility methods to act on the information in the spec
+type Spec struct {
+ spec *spec.Swagger
+ consumes map[string]struct{}
+ produces map[string]struct{}
+ authSchemes map[string]struct{}
+ operations map[string]map[string]*spec.Operation
+ references referenceAnalysis
+ allSchemas map[string]SchemaRef
+ allOfs map[string]SchemaRef
+}
+
+func (s *Spec) initialize() {
+ for _, c := range s.spec.Consumes {
+ s.consumes[c] = struct{}{}
+ }
+ for _, c := range s.spec.Produces {
+ s.produces[c] = struct{}{}
+ }
+ for _, ss := range s.spec.Security {
+ for k := range ss {
+ s.authSchemes[k] = struct{}{}
+ }
+ }
+ for path, pathItem := range s.AllPaths() {
+ s.analyzeOperations(path, &pathItem)
+ }
+
+ for name, parameter := range s.spec.Parameters {
+ refPref := slashpath.Join("/parameters", jsonpointer.Escape(name))
+ if parameter.Items != nil {
+ s.analyzeItems("items", parameter.Items, refPref)
+ }
+ if parameter.In == "body" && parameter.Schema != nil {
+ s.analyzeSchema("schema", *parameter.Schema, refPref)
+ }
+ }
+
+ for name, response := range s.spec.Responses {
+ refPref := slashpath.Join("/responses", jsonpointer.Escape(name))
+ for _, v := range response.Headers {
+ if v.Items != nil {
+ s.analyzeItems("items", v.Items, refPref)
+ }
+ }
+ if response.Schema != nil {
+ s.analyzeSchema("schema", *response.Schema, refPref)
+ }
+ }
+
+ for name, schema := range s.spec.Definitions {
+ s.analyzeSchema(name, schema, "/definitions")
+ }
+ // TODO: after analyzing all things and flattening schemas etc
+ // resolve all the collected references to their final representations
+ // best put in a separate method because this could get expensive
+}
+
+func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) {
+ // TODO: resolve refs here?
+ op := pi
+ s.analyzeOperation("GET", path, op.Get)
+ s.analyzeOperation("PUT", path, op.Put)
+ s.analyzeOperation("POST", path, op.Post)
+ s.analyzeOperation("PATCH", path, op.Patch)
+ s.analyzeOperation("DELETE", path, op.Delete)
+ s.analyzeOperation("HEAD", path, op.Head)
+ s.analyzeOperation("OPTIONS", path, op.Options)
+ for i, param := range op.Parameters {
+ refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i))
+ if param.Ref.String() != "" {
+ s.references.addParamRef(refPref, &param)
+ }
+ if param.Items != nil {
+ s.analyzeItems("items", param.Items, refPref)
+ }
+ if param.Schema != nil {
+ s.analyzeSchema("schema", *param.Schema, refPref)
+ }
+ }
+}
+
+func (s *Spec) analyzeItems(name string, items *spec.Items, prefix string) {
+ if items == nil {
+ return
+ }
+ refPref := slashpath.Join(prefix, name)
+ s.analyzeItems(name, items.Items, refPref)
+ if items.Ref.String() != "" {
+ s.references.addItemsRef(refPref, items)
+ }
+}
+
+func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) {
+ if op == nil {
+ return
+ }
+
+ for _, c := range op.Consumes {
+ s.consumes[c] = struct{}{}
+ }
+ for _, c := range op.Produces {
+ s.produces[c] = struct{}{}
+ }
+ for _, ss := range op.Security {
+ for k := range ss {
+ s.authSchemes[k] = struct{}{}
+ }
+ }
+ if _, ok := s.operations[method]; !ok {
+ s.operations[method] = make(map[string]*spec.Operation)
+ }
+ s.operations[method][path] = op
+ prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method))
+ for i, param := range op.Parameters {
+ refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i))
+ if param.Ref.String() != "" {
+ s.references.addParamRef(refPref, &param)
+ }
+ s.analyzeItems("items", param.Items, refPref)
+ if param.In == "body" && param.Schema != nil {
+ s.analyzeSchema("schema", *param.Schema, refPref)
+ }
+ }
+ if op.Responses != nil {
+ if op.Responses.Default != nil {
+ refPref := slashpath.Join(prefix, "responses", "default")
+ if op.Responses.Default.Ref.String() != "" {
+ s.references.addResponseRef(refPref, op.Responses.Default)
+ }
+ for _, v := range op.Responses.Default.Headers {
+ s.analyzeItems("items", v.Items, refPref)
+ }
+ if op.Responses.Default.Schema != nil {
+ s.analyzeSchema("schema", *op.Responses.Default.Schema, refPref)
+ }
+ }
+ for k, res := range op.Responses.StatusCodeResponses {
+ refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k))
+ if res.Ref.String() != "" {
+ s.references.addResponseRef(refPref, &res)
+ }
+ for _, v := range res.Headers {
+ s.analyzeItems("items", v.Items, refPref)
+ }
+ if res.Schema != nil {
+ s.analyzeSchema("schema", *res.Schema, refPref)
+ }
+ }
+ }
+}
+
+func (s *Spec) analyzeSchema(name string, schema spec.Schema, prefix string) {
+ refURI := slashpath.Join(prefix, jsonpointer.Escape(name))
+ schRef := SchemaRef{
+ Name: name,
+ Schema: &schema,
+ Ref: spec.MustCreateRef("#" + refURI),
+ }
+ s.allSchemas["#"+refURI] = schRef
+ if schema.Ref.String() != "" {
+ s.references.addSchemaRef(refURI, schRef)
+ }
+ for k, v := range schema.Definitions {
+ s.analyzeSchema(k, v, slashpath.Join(refURI, "definitions"))
+ }
+ for k, v := range schema.Properties {
+ s.analyzeSchema(k, v, slashpath.Join(refURI, "properties"))
+ }
+ for k, v := range schema.PatternProperties {
+ s.analyzeSchema(k, v, slashpath.Join(refURI, "patternProperties"))
+ }
+ for i, v := range schema.AllOf {
+ s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf"))
+ }
+ if len(schema.AllOf) > 0 {
+ s.allOfs["#"+refURI] = SchemaRef{Name: name, Schema: &schema, Ref: spec.MustCreateRef("#" + refURI)}
+ }
+ for i, v := range schema.AnyOf {
+ s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf"))
+ }
+ for i, v := range schema.OneOf {
+ s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf"))
+ }
+ if schema.Not != nil {
+ s.analyzeSchema("not", *schema.Not, refURI)
+ }
+ if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil {
+ s.analyzeSchema("additionalProperties", *schema.AdditionalProperties.Schema, refURI)
+ }
+ if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil {
+ s.analyzeSchema("additionalItems", *schema.AdditionalItems.Schema, refURI)
+ }
+ if schema.Items != nil {
+ if schema.Items.Schema != nil {
+ s.analyzeSchema("items", *schema.Items.Schema, refURI)
+ }
+ for i, sch := range schema.Items.Schemas {
+ s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items"))
+ }
+ }
+}
+
+// SecurityRequirement is a representation of a security requirement for an operation
+type SecurityRequirement struct {
+ Name string
+ Scopes []string
+}
+
+// SecurityRequirementsFor gets the security requirements for the operation
+func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) []SecurityRequirement {
+ if s.spec.Security == nil && operation.Security == nil {
+ return nil
+ }
+
+ schemes := s.spec.Security
+ if operation.Security != nil {
+ schemes = operation.Security
+ }
+
+ unique := make(map[string]SecurityRequirement)
+ for _, scheme := range schemes {
+ for k, v := range scheme {
+ if _, ok := unique[k]; !ok {
+ unique[k] = SecurityRequirement{Name: k, Scopes: v}
+ }
+ }
+ }
+
+ var result []SecurityRequirement
+ for _, v := range unique {
+ result = append(result, v)
+ }
+ return result
+}
+
+// SecurityDefinitionsFor gets the matching security definitions for a set of requirements
+func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme {
+ requirements := s.SecurityRequirementsFor(operation)
+ if len(requirements) == 0 {
+ return nil
+ }
+ result := make(map[string]spec.SecurityScheme)
+ for _, v := range requirements {
+ if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok {
+ if definition != nil {
+ result[v.Name] = *definition
+ }
+ }
+ }
+ return result
+}
+
+// ConsumesFor gets the mediatypes for the operation
+func (s *Spec) ConsumesFor(operation *spec.Operation) []string {
+
+ if len(operation.Consumes) == 0 {
+ cons := make(map[string]struct{}, len(s.spec.Consumes))
+ for _, k := range s.spec.Consumes {
+ cons[k] = struct{}{}
+ }
+ return s.structMapKeys(cons)
+ }
+
+ cons := make(map[string]struct{}, len(operation.Consumes))
+ for _, c := range operation.Consumes {
+ cons[c] = struct{}{}
+ }
+ return s.structMapKeys(cons)
+}
+
+// ProducesFor gets the mediatypes for the operation
+func (s *Spec) ProducesFor(operation *spec.Operation) []string {
+ if len(operation.Produces) == 0 {
+ prod := make(map[string]struct{}, len(s.spec.Produces))
+ for _, k := range s.spec.Produces {
+ prod[k] = struct{}{}
+ }
+ return s.structMapKeys(prod)
+ }
+
+ prod := make(map[string]struct{}, len(operation.Produces))
+ for _, c := range operation.Produces {
+ prod[c] = struct{}{}
+ }
+ return s.structMapKeys(prod)
+}
+
+func mapKeyFromParam(param *spec.Parameter) string {
+ return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param))
+}
+
+func fieldNameFromParam(param *spec.Parameter) string {
+ if nm, ok := param.Extensions.GetString("go-name"); ok {
+ return nm
+ }
+ return swag.ToGoName(param.Name)
+}
+
+func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter) {
+ for _, param := range parameters {
+ pr := param
+ if pr.Ref.String() != "" {
+ obj, _, err := pr.Ref.GetPointer().Get(s.spec)
+ if err != nil {
+ panic(err)
+ }
+ pr = obj.(spec.Parameter)
+ }
+ res[mapKeyFromParam(&pr)] = pr
+ }
+}
+
+// ParametersFor the specified operation id
+func (s *Spec) ParametersFor(operationID string) []spec.Parameter {
+ gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter {
+ bag := make(map[string]spec.Parameter)
+ s.paramsAsMap(pi.Parameters, bag)
+ s.paramsAsMap(op.Parameters, bag)
+
+ var res []spec.Parameter
+ for _, v := range bag {
+ res = append(res, v)
+ }
+ return res
+ }
+ for _, pi := range s.spec.Paths.Paths {
+ if pi.Get != nil && pi.Get.ID == operationID {
+ return gatherParams(&pi, pi.Get)
+ }
+ if pi.Head != nil && pi.Head.ID == operationID {
+ return gatherParams(&pi, pi.Head)
+ }
+ if pi.Options != nil && pi.Options.ID == operationID {
+ return gatherParams(&pi, pi.Options)
+ }
+ if pi.Post != nil && pi.Post.ID == operationID {
+ return gatherParams(&pi, pi.Post)
+ }
+ if pi.Patch != nil && pi.Patch.ID == operationID {
+ return gatherParams(&pi, pi.Patch)
+ }
+ if pi.Put != nil && pi.Put.ID == operationID {
+ return gatherParams(&pi, pi.Put)
+ }
+ if pi.Delete != nil && pi.Delete.ID == operationID {
+ return gatherParams(&pi, pi.Delete)
+ }
+ }
+ return nil
+}
+
+// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that
+// apply for the method and path.
+func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter {
+ res := make(map[string]spec.Parameter)
+ if pi, ok := s.spec.Paths.Paths[path]; ok {
+ s.paramsAsMap(pi.Parameters, res)
+ s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res)
+ }
+ return res
+}
+
+// OperationForName gets the operation for the given id
+func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) {
+ for method, pathItem := range s.operations {
+ for path, op := range pathItem {
+ if operationID == op.ID {
+ return method, path, op, true
+ }
+ }
+ }
+ return "", "", nil, false
+}
+
+// OperationFor the given method and path
+func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) {
+ if mp, ok := s.operations[strings.ToUpper(method)]; ok {
+ op, fn := mp[path]
+ return op, fn
+ }
+ return nil, false
+}
+
+// Operations gathers all the operations specified in the spec document
+func (s *Spec) Operations() map[string]map[string]*spec.Operation {
+ return s.operations
+}
+
+func (s *Spec) structMapKeys(mp map[string]struct{}) []string {
+ if len(mp) == 0 {
+ return nil
+ }
+
+ result := make([]string, 0, len(mp))
+ for k := range mp {
+ result = append(result, k)
+ }
+ return result
+}
+
+// AllPaths returns all the paths in the swagger spec
+func (s *Spec) AllPaths() map[string]spec.PathItem {
+ if s.spec == nil || s.spec.Paths == nil {
+ return nil
+ }
+ return s.spec.Paths.Paths
+}
+
+// OperationIDs gets all the operation ids based on method an dpath
+func (s *Spec) OperationIDs() []string {
+ if len(s.operations) == 0 {
+ return nil
+ }
+ result := make([]string, 0, len(s.operations))
+ for method, v := range s.operations {
+ for p, o := range v {
+ if o.ID != "" {
+ result = append(result, o.ID)
+ } else {
+ result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p))
+ }
+ }
+ }
+ return result
+}
+
+// RequiredConsumes gets all the distinct consumes that are specified in the specification document
+func (s *Spec) RequiredConsumes() []string {
+ return s.structMapKeys(s.consumes)
+}
+
+// RequiredProduces gets all the distinct produces that are specified in the specification document
+func (s *Spec) RequiredProduces() []string {
+ return s.structMapKeys(s.produces)
+}
+
+// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec
+func (s *Spec) RequiredSecuritySchemes() []string {
+ return s.structMapKeys(s.authSchemes)
+}
+
+// SchemaRef is a reference to a schema
+type SchemaRef struct {
+ Name string
+ Ref spec.Ref
+ Schema *spec.Schema
+}
+
+// SchemasWithAllOf returns schema references to all schemas that are defined
+// with an allOf key
+func (s *Spec) SchemasWithAllOf() (result []SchemaRef) {
+ for _, v := range s.allOfs {
+ result = append(result, v)
+ }
+ return
+}
+
+// AllDefinitions returns schema references for all the definitions that were discovered
+func (s *Spec) AllDefinitions() (result []SchemaRef) {
+ for _, v := range s.allSchemas {
+ result = append(result, v)
+ }
+ return
+}
+
+// AllDefinitionReferences returns json refs for all the discovered schemas
+func (s *Spec) AllDefinitionReferences() (result []string) {
+ for _, v := range s.references.schemas {
+ result = append(result, v.String())
+ }
+ return
+}
+
+// AllParameterReferences returns json refs for all the discovered parameters
+func (s *Spec) AllParameterReferences() (result []string) {
+ for _, v := range s.references.parameters {
+ result = append(result, v.String())
+ }
+ return
+}
+
+// AllResponseReferences returns json refs for all the discovered responses
+func (s *Spec) AllResponseReferences() (result []string) {
+ for _, v := range s.references.responses {
+ result = append(result, v.String())
+ }
+ return
+}
+
+// AllItemsReferences returns the references for all the items
+func (s *Spec) AllItemsReferences() (result []string) {
+ for _, v := range s.references.items {
+ result = append(result, v.String())
+ }
+ return
+}
+
+// AllReferences returns all the references found in the document
+func (s *Spec) AllReferences() (result []string) {
+ for _, v := range s.references.allRefs {
+ result = append(result, v.String())
+ }
+ return
+}
+
+// AllRefs returns all the unique references found in the document
+func (s *Spec) AllRefs() (result []spec.Ref) {
+ set := make(map[string]struct{})
+ for _, v := range s.references.allRefs {
+ a := v.String()
+ if a == "" {
+ continue
+ }
+ if _, ok := set[a]; !ok {
+ set[a] = struct{}{}
+ result = append(result, v)
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/go-openapi/jsonpointer/LICENSE b/vendor/github.com/go-openapi/jsonpointer/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md
new file mode 100644
index 000000000..813788aff
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/README.md
@@ -0,0 +1,15 @@
+# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
+
+[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer)
+An implementation of JSON Pointer - Go language
+
+## Status
+Completed YES
+
+Tested YES
+
+## References
+http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
+
+### Note
+The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented.
diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go
new file mode 100644
index 000000000..fe2d6ee57
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go
@@ -0,0 +1,390 @@
+// Copyright 2013 sigu-399 ( https://github.com/sigu-399 )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author sigu-399
+// author-github https://github.com/sigu-399
+// author-mail sigu.399@gmail.com
+//
+// repository-name jsonpointer
+// repository-desc An implementation of JSON Pointer - Go language
+//
+// description Main and unique file.
+//
+// created 25-02-2013
+
+package jsonpointer
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/go-openapi/swag"
+)
+
+const (
+ emptyPointer = ``
+ pointerSeparator = `/`
+
+ invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator
+)
+
+var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem()
+var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem()
+
+// JSONPointable is an interface for structs to implement when they need to customize the
+// json pointer process
+type JSONPointable interface {
+ JSONLookup(string) (interface{}, error)
+}
+
+// JSONSetable is an interface for structs to implement when they need to customize the
+// json pointer process
+type JSONSetable interface {
+ JSONSet(string, interface{}) error
+}
+
+// New creates a new json pointer for the given string
+func New(jsonPointerString string) (Pointer, error) {
+
+ var p Pointer
+ err := p.parse(jsonPointerString)
+ return p, err
+
+}
+
+// Pointer the json pointer reprsentation
+type Pointer struct {
+ referenceTokens []string
+}
+
+// "Constructor", parses the given string JSON pointer
+func (p *Pointer) parse(jsonPointerString string) error {
+
+ var err error
+
+ if jsonPointerString != emptyPointer {
+ if !strings.HasPrefix(jsonPointerString, pointerSeparator) {
+ err = errors.New(invalidStart)
+ } else {
+ referenceTokens := strings.Split(jsonPointerString, pointerSeparator)
+ for _, referenceToken := range referenceTokens[1:] {
+ p.referenceTokens = append(p.referenceTokens, referenceToken)
+ }
+ }
+ }
+
+ return err
+}
+
+// Get uses the pointer to retrieve a value from a JSON document
+func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
+ return p.get(document, swag.DefaultJSONNameProvider)
+}
+
+// Set uses the pointer to set a value from a JSON document
+func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) {
+ return document, p.set(document, value, swag.DefaultJSONNameProvider)
+}
+
+// GetForToken gets a value for a json pointer token 1 level deep
+func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) {
+ return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider)
+}
+
+// SetForToken gets a value for a json pointer token 1 level deep
+func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) {
+ return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider)
+}
+
+func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
+ rValue := reflect.Indirect(reflect.ValueOf(node))
+ kind := rValue.Kind()
+
+ switch kind {
+
+ case reflect.Struct:
+ if rValue.Type().Implements(jsonPointableType) {
+ r, err := node.(JSONPointable).JSONLookup(decodedToken)
+ if err != nil {
+ return nil, kind, err
+ }
+ return r, kind, nil
+ }
+ nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
+ if !ok {
+ return nil, kind, fmt.Errorf("object has no field %q", decodedToken)
+ }
+ fld := rValue.FieldByName(nm)
+ return fld.Interface(), kind, nil
+
+ case reflect.Map:
+ kv := reflect.ValueOf(decodedToken)
+ mv := rValue.MapIndex(kv)
+
+ if mv.IsValid() && !swag.IsZero(mv) {
+ return mv.Interface(), kind, nil
+ }
+ return nil, kind, fmt.Errorf("object has no key %q", decodedToken)
+
+ case reflect.Slice:
+ tokenIndex, err := strconv.Atoi(decodedToken)
+ if err != nil {
+ return nil, kind, err
+ }
+ sLength := rValue.Len()
+ if tokenIndex < 0 || tokenIndex >= sLength {
+ return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex)
+ }
+
+ elem := rValue.Index(tokenIndex)
+ return elem.Interface(), kind, nil
+
+ default:
+ return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken)
+ }
+
+}
+
+func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error {
+ rValue := reflect.Indirect(reflect.ValueOf(node))
+ switch rValue.Kind() {
+
+ case reflect.Struct:
+ if ns, ok := node.(JSONSetable); ok { // pointer impl
+ return ns.JSONSet(decodedToken, data)
+ }
+
+ if rValue.Type().Implements(jsonSetableType) {
+ return node.(JSONSetable).JSONSet(decodedToken, data)
+ }
+
+ nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
+ if !ok {
+ return fmt.Errorf("object has no field %q", decodedToken)
+ }
+ fld := rValue.FieldByName(nm)
+ if fld.IsValid() {
+ fld.Set(reflect.ValueOf(data))
+ }
+ return nil
+
+ case reflect.Map:
+ kv := reflect.ValueOf(decodedToken)
+ rValue.SetMapIndex(kv, reflect.ValueOf(data))
+ return nil
+
+ case reflect.Slice:
+ tokenIndex, err := strconv.Atoi(decodedToken)
+ if err != nil {
+ return err
+ }
+ sLength := rValue.Len()
+ if tokenIndex < 0 || tokenIndex >= sLength {
+ return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex)
+ }
+
+ elem := rValue.Index(tokenIndex)
+ if !elem.CanSet() {
+ return fmt.Errorf("can't set slice index %s to %v", decodedToken, data)
+ }
+ elem.Set(reflect.ValueOf(data))
+ return nil
+
+ default:
+ return fmt.Errorf("invalid token reference %q", decodedToken)
+ }
+
+}
+
+func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
+
+ if nameProvider == nil {
+ nameProvider = swag.DefaultJSONNameProvider
+ }
+
+ kind := reflect.Invalid
+
+ // Full document when empty
+ if len(p.referenceTokens) == 0 {
+ return node, kind, nil
+ }
+
+ for _, token := range p.referenceTokens {
+
+ decodedToken := Unescape(token)
+
+ r, knd, err := getSingleImpl(node, decodedToken, nameProvider)
+ if err != nil {
+ return nil, knd, err
+ }
+ node, kind = r, knd
+
+ }
+
+ rValue := reflect.ValueOf(node)
+ kind = rValue.Kind()
+
+ return node, kind, nil
+}
+
+func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error {
+ knd := reflect.ValueOf(node).Kind()
+
+ if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
+ return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values")
+ }
+
+ if nameProvider == nil {
+ nameProvider = swag.DefaultJSONNameProvider
+ }
+
+ // Full document when empty
+ if len(p.referenceTokens) == 0 {
+ return nil
+ }
+
+ lastI := len(p.referenceTokens) - 1
+ for i, token := range p.referenceTokens {
+ isLastToken := i == lastI
+ decodedToken := Unescape(token)
+
+ if isLastToken {
+
+ return setSingleImpl(node, data, decodedToken, nameProvider)
+ }
+
+ rValue := reflect.Indirect(reflect.ValueOf(node))
+ kind := rValue.Kind()
+
+ switch kind {
+
+ case reflect.Struct:
+ if rValue.Type().Implements(jsonPointableType) {
+ r, err := node.(JSONPointable).JSONLookup(decodedToken)
+ if err != nil {
+ return err
+ }
+ fld := reflect.ValueOf(r)
+ if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr {
+ node = fld.Addr().Interface()
+ continue
+ }
+ node = r
+ continue
+ }
+ nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
+ if !ok {
+ return fmt.Errorf("object has no field %q", decodedToken)
+ }
+ fld := rValue.FieldByName(nm)
+ if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr {
+ node = fld.Addr().Interface()
+ continue
+ }
+ node = fld.Interface()
+
+ case reflect.Map:
+ kv := reflect.ValueOf(decodedToken)
+ mv := rValue.MapIndex(kv)
+
+ if !mv.IsValid() {
+ return fmt.Errorf("object has no key %q", decodedToken)
+ }
+ if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr {
+ node = mv.Addr().Interface()
+ continue
+ }
+ node = mv.Interface()
+
+ case reflect.Slice:
+ tokenIndex, err := strconv.Atoi(decodedToken)
+ if err != nil {
+ return err
+ }
+ sLength := rValue.Len()
+ if tokenIndex < 0 || tokenIndex >= sLength {
+ return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex)
+ }
+
+ elem := rValue.Index(tokenIndex)
+ if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Ptr {
+ node = elem.Addr().Interface()
+ continue
+ }
+ node = elem.Interface()
+
+ default:
+ return fmt.Errorf("invalid token reference %q", decodedToken)
+ }
+
+ }
+
+ return nil
+}
+
+// DecodedTokens returns the decoded tokens
+func (p *Pointer) DecodedTokens() []string {
+ result := make([]string, 0, len(p.referenceTokens))
+ for _, t := range p.referenceTokens {
+ result = append(result, Unescape(t))
+ }
+ return result
+}
+
+// IsEmpty returns true if this is an empty json pointer
+// this indicates that it points to the root document
+func (p *Pointer) IsEmpty() bool {
+ return len(p.referenceTokens) == 0
+}
+
+// Pointer to string representation function
+func (p *Pointer) String() string {
+
+ if len(p.referenceTokens) == 0 {
+ return emptyPointer
+ }
+
+ pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator)
+
+ return pointerString
+}
+
+// Specific JSON pointer encoding here
+// ~0 => ~
+// ~1 => /
+// ... and vice versa
+
+const (
+ encRefTok0 = `~0`
+ encRefTok1 = `~1`
+ decRefTok0 = `~`
+ decRefTok1 = `/`
+)
+
+// Unescape unescapes a json pointer reference token string to the original representation
+func Unescape(token string) string {
+ step1 := strings.Replace(token, encRefTok1, decRefTok1, -1)
+ step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1)
+ return step2
+}
+
+// Escape escapes a pointer reference token string
+func Escape(token string) string {
+ step1 := strings.Replace(token, decRefTok0, encRefTok0, -1)
+ step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1)
+ return step2
+}
diff --git a/vendor/github.com/go-openapi/jsonreference/LICENSE b/vendor/github.com/go-openapi/jsonreference/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md
new file mode 100644
index 000000000..66345f4c6
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/README.md
@@ -0,0 +1,15 @@
+# gojsonreference [![Build Status](https://travis-ci.org/go-openapi/jsonreference.svg?branch=master)](https://travis-ci.org/go-openapi/jsonreference) [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
+
+[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference)
+An implementation of JSON Reference - Go language
+
+## Status
+Work in progress ( 90% done )
+
+## Dependencies
+https://github.com/go-openapi/jsonpointer
+
+## References
+http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
+
+http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03
diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go
new file mode 100644
index 000000000..3bc0a6e26
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/reference.go
@@ -0,0 +1,156 @@
+// Copyright 2013 sigu-399 ( https://github.com/sigu-399 )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author sigu-399
+// author-github https://github.com/sigu-399
+// author-mail sigu.399@gmail.com
+//
+// repository-name jsonreference
+// repository-desc An implementation of JSON Reference - Go language
+//
+// description Main and unique file.
+//
+// created 26-02-2013
+
+package jsonreference
+
+import (
+ "errors"
+ "net/url"
+ "strings"
+
+ "github.com/PuerkitoBio/purell"
+ "github.com/go-openapi/jsonpointer"
+)
+
+const (
+ fragmentRune = `#`
+)
+
+// New creates a new reference for the given string
+func New(jsonReferenceString string) (Ref, error) {
+
+ var r Ref
+ err := r.parse(jsonReferenceString)
+ return r, err
+
+}
+
+// MustCreateRef parses the ref string and panics when it's invalid.
+// Use the New method for a version that returns an error
+func MustCreateRef(ref string) Ref {
+ r, err := New(ref)
+ if err != nil {
+ panic(err)
+ }
+ return r
+}
+
+// Ref represents a json reference object
+type Ref struct {
+ referenceURL *url.URL
+ referencePointer jsonpointer.Pointer
+
+ HasFullURL bool
+ HasURLPathOnly bool
+ HasFragmentOnly bool
+ HasFileScheme bool
+ HasFullFilePath bool
+}
+
+// GetURL gets the URL for this reference
+func (r *Ref) GetURL() *url.URL {
+ return r.referenceURL
+}
+
+// GetPointer gets the json pointer for this reference
+func (r *Ref) GetPointer() *jsonpointer.Pointer {
+ return &r.referencePointer
+}
+
+// String returns the best version of the url for this reference
+func (r *Ref) String() string {
+
+ if r.referenceURL != nil {
+ return r.referenceURL.String()
+ }
+
+ if r.HasFragmentOnly {
+ return fragmentRune + r.referencePointer.String()
+ }
+
+ return r.referencePointer.String()
+}
+
+// IsRoot returns true if this reference is a root document
+func (r *Ref) IsRoot() bool {
+ return r.referenceURL != nil &&
+ !r.IsCanonical() &&
+ !r.HasURLPathOnly &&
+ r.referenceURL.Fragment == ""
+}
+
+// IsCanonical returns true when this pointer starts with http(s):// or file://
+func (r *Ref) IsCanonical() bool {
+ return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL)
+}
+
+// "Constructor", parses the given string JSON reference
+func (r *Ref) parse(jsonReferenceString string) error {
+
+ parsed, err := url.Parse(jsonReferenceString)
+ if err != nil {
+ return err
+ }
+
+ r.referenceURL, _ = url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes))
+ refURL := r.referenceURL
+
+ if refURL.Scheme != "" && refURL.Host != "" {
+ r.HasFullURL = true
+ } else {
+ if refURL.Path != "" {
+ r.HasURLPathOnly = true
+ } else if refURL.RawQuery == "" && refURL.Fragment != "" {
+ r.HasFragmentOnly = true
+ }
+ }
+
+ r.HasFileScheme = refURL.Scheme == "file"
+ r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/")
+
+ // invalid json-pointer error means url has no json-pointer fragment. simply ignore error
+ r.referencePointer, _ = jsonpointer.New(refURL.Fragment)
+
+ return nil
+}
+
+// Inherits creates a new reference from a parent and a child
+// If the child cannot inherit from the parent, an error is returned
+func (r *Ref) Inherits(child Ref) (*Ref, error) {
+ childURL := child.GetURL()
+ parentURL := r.GetURL()
+ if childURL == nil {
+ return nil, errors.New("child url is nil")
+ }
+ if parentURL == nil {
+ return &child, nil
+ }
+
+ ref, err := New(parentURL.ResolveReference(childURL).String())
+ if err != nil {
+ return nil, err
+ }
+ return &ref, nil
+}
diff --git a/vendor/github.com/go-openapi/loads/LICENSE b/vendor/github.com/go-openapi/loads/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/go-openapi/loads/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md
new file mode 100644
index 000000000..9d5c89997
--- /dev/null
+++ b/vendor/github.com/go-openapi/loads/README.md
@@ -0,0 +1,5 @@
+# Loads OAI specs [![Build Status](https://ci.vmware.run/api/badges/go-openapi/loads/status.svg)](https://ci.vmware.run/go-openapi/loads) [![Coverage](https://coverage.vmware.run/badges/go-openapi/loads/coverage.svg)](https://coverage.vmware.run/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
+
+[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads)
+
+Loading of OAI specification documents from local or remote locations.
diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go
new file mode 100644
index 000000000..ff1ee1c9f
--- /dev/null
+++ b/vendor/github.com/go-openapi/loads/spec.go
@@ -0,0 +1,203 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package loads
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "github.com/go-openapi/analysis"
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/swag"
+)
+
+// JSONDoc loads a json document from either a file or a remote url
+func JSONDoc(path string) (json.RawMessage, error) {
+ data, err := swag.LoadFromFileOrHTTP(path)
+ if err != nil {
+ return nil, err
+ }
+ return json.RawMessage(data), nil
+}
+
+// DocLoader represents a doc loader type
+type DocLoader func(string) (json.RawMessage, error)
+
+// DocMatcher represents a predicate to check if a loader matches
+type DocMatcher func(string) bool
+
+var loaders = &loader{Match: func(_ string) bool { return true }, Fn: JSONDoc}
+
+// AddLoader for a document
+func AddLoader(predicate DocMatcher, load DocLoader) {
+ prev := loaders
+ loaders = &loader{
+ Match: predicate,
+ Fn: load,
+ Next: prev,
+ }
+
+}
+
+type loader struct {
+ Fn DocLoader
+ Match DocMatcher
+ Next *loader
+}
+
+// JSONSpec loads a spec from a json document
+func JSONSpec(path string) (*Document, error) {
+ data, err := JSONDoc(path)
+ if err != nil {
+ return nil, err
+ }
+ // convert to json
+ return Analyzed(json.RawMessage(data), "")
+}
+
+// Document represents a swagger spec document
+type Document struct {
+ // specAnalyzer
+ Analyzer *analysis.Spec
+ spec *spec.Swagger
+ origSpec *spec.Swagger
+ schema *spec.Schema
+ raw json.RawMessage
+}
+
+// Spec loads a new spec document
+func Spec(path string) (*Document, error) {
+ specURL, err := url.Parse(path)
+ if err != nil {
+ return nil, err
+ }
+ for l := loaders.Next; l != nil; l = l.Next {
+ if loaders.Match(specURL.Path) {
+ b, err2 := loaders.Fn(path)
+ if err2 != nil {
+ return nil, err2
+ }
+ return Analyzed(b, "")
+ }
+ }
+ b, err := loaders.Fn(path)
+ if err != nil {
+ return nil, err
+ }
+ return Analyzed(b, "")
+}
+
+var swag20Schema = spec.MustLoadSwagger20Schema()
+
+// Analyzed creates a new analyzed spec document
+func Analyzed(data json.RawMessage, version string) (*Document, error) {
+ if version == "" {
+ version = "2.0"
+ }
+ if version != "2.0" {
+ return nil, fmt.Errorf("spec version %q is not supported", version)
+ }
+
+ swspec := new(spec.Swagger)
+ if err := json.Unmarshal(data, swspec); err != nil {
+ return nil, err
+ }
+
+ origsqspec := new(spec.Swagger)
+ if err := json.Unmarshal(data, origsqspec); err != nil {
+ return nil, err
+ }
+
+ d := &Document{
+ Analyzer: analysis.New(swspec),
+ schema: swag20Schema,
+ spec: swspec,
+ raw: data,
+ origSpec: origsqspec,
+ }
+ return d, nil
+}
+
+// Expanded expands the ref fields in the spec document and returns a new spec document
+func (d *Document) Expanded() (*Document, error) {
+ swspec := new(spec.Swagger)
+ if err := json.Unmarshal(d.raw, swspec); err != nil {
+ return nil, err
+ }
+ if err := spec.ExpandSpec(swspec); err != nil {
+ return nil, err
+ }
+
+ dd := &Document{
+ Analyzer: analysis.New(swspec),
+ spec: swspec,
+ schema: swag20Schema,
+ raw: d.raw,
+ origSpec: d.origSpec,
+ }
+ return dd, nil
+}
+
+// BasePath the base path for this spec
+func (d *Document) BasePath() string {
+ return d.spec.BasePath
+}
+
+// Version returns the version of this spec
+func (d *Document) Version() string {
+ return d.spec.Swagger
+}
+
+// Schema returns the swagger 2.0 schema
+func (d *Document) Schema() *spec.Schema {
+ return d.schema
+}
+
+// Spec returns the swagger spec object model
+func (d *Document) Spec() *spec.Swagger {
+ return d.spec
+}
+
+// Host returns the host for the API
+func (d *Document) Host() string {
+ return d.spec.Host
+}
+
+// Raw returns the raw swagger spec as json bytes
+func (d *Document) Raw() json.RawMessage {
+ return d.raw
+}
+
+func (d *Document) OrigSpec() *spec.Swagger {
+ return d.origSpec
+}
+
+// ResetDefinitions gives a shallow copy with the models reset
+func (d *Document) ResetDefinitions() *Document {
+ defs := make(map[string]spec.Schema, len(d.origSpec.Definitions))
+ for k, v := range d.origSpec.Definitions {
+ defs[k] = v
+ }
+
+ d.spec.Definitions = defs
+ return d
+}
+
+// Pristine creates a new pristine document instance based on the input data
+func (d *Document) Pristine() *Document {
+ dd, _ := Analyzed(d.Raw(), d.Version())
+ return dd
+}
diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md
new file mode 100644
index 000000000..4b2af124a
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/README.md
@@ -0,0 +1,5 @@
+# OAI object model [![Build Status](https://ci.vmware.run/api/badges/go-openapi/spec/status.svg)](https://ci.vmware.run/go-openapi/spec) [![Coverage](https://coverage.vmware.run/badges/go-openapi/spec/coverage.svg)](https://coverage.vmware.run/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
+
+[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec)
+
+The object model for OpenAPI specification documents \ No newline at end of file
diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go
new file mode 100644
index 000000000..294cbccf7
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/bindata.go
@@ -0,0 +1,274 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by go-bindata.
+// sources:
+// schemas/jsonschema-draft-04.json
+// schemas/v2/schema.json
+// DO NOT EDIT!
+
+package spec
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+func bindataRead(data []byte, name string) ([]byte, error) {
+ gz, err := gzip.NewReader(bytes.NewBuffer(data))
+ if err != nil {
+ return nil, fmt.Errorf("Read %q: %v", name, err)
+ }
+
+ var buf bytes.Buffer
+ _, err = io.Copy(&buf, gz)
+ clErr := gz.Close()
+
+ if err != nil {
+ return nil, fmt.Errorf("Read %q: %v", name, err)
+ }
+ if clErr != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
+
+type asset struct {
+ bytes []byte
+ info os.FileInfo
+}
+
+type bindataFileInfo struct {
+ name string
+ size int64
+ mode os.FileMode
+ modTime time.Time
+}
+
+func (fi bindataFileInfo) Name() string {
+ return fi.name
+}
+func (fi bindataFileInfo) Size() int64 {
+ return fi.size
+}
+func (fi bindataFileInfo) Mode() os.FileMode {
+ return fi.mode
+}
+func (fi bindataFileInfo) ModTime() time.Time {
+ return fi.modTime
+}
+func (fi bindataFileInfo) IsDir() bool {
+ return false
+}
+func (fi bindataFileInfo) Sys() interface{} {
+ return nil
+}
+
+var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xcc\x57\x3b\x6f\xdb\x30\x10\xde\xfd\x2b\x04\xa5\x63\x52\xb9\x40\xa7\x6c\x45\xbb\x18\x68\xd1\x0c\xdd\x0c\x0f\xb4\x75\xb2\x19\x50\xa4\x42\x51\x85\x0d\x43\xff\xbd\xa4\xa8\x07\x29\x91\x92\x2d\xbb\x48\xb4\xc4\xe1\xbd\xbe\x3b\xde\x8b\xe7\x45\x20\xbf\x10\xc7\xe1\x73\x10\x1e\x84\xc8\x9e\xa3\xe8\x35\x67\xf4\x29\xdf\x1d\x20\x45\x9f\x19\xdf\x47\x31\x47\x89\x78\x5a\x7e\x8d\xf4\xd9\x43\xf8\xa8\x85\x3e\xe9\xff\x67\x48\xc6\x90\xef\x38\xce\x04\x66\x54\x49\x7f\x67\x1c\x02\xcd\x12\xa4\x20\x50\xad\xa2\xe3\x4e\x30\xc5\x8a\x39\x97\xdc\x1a\x71\x45\xd0\x6c\xdf\x38\x47\x27\x8b\x50\x11\xc5\x29\x03\xa5\x1c\x55\xe4\x47\x9b\x98\x62\xba\x12\x90\x2a\x7d\x5f\x7a\x24\x5c\x9f\x9f\xa5\x83\x1c\x12\xa5\xe2\x21\x0c\xca\x96\xa9\xec\xf8\xc3\x8c\xe5\x12\xd7\x5f\x58\x51\x01\x7b\xe0\x7e\x10\xb8\x66\x18\xc2\xc0\x69\x91\x4a\x8e\xe5\x25\xfa\x7f\x40\x82\x0a\x22\x96\x43\x3b\x88\x90\xdf\x0a\xea\xda\x82\x1d\x19\x91\x8b\xfa\x58\xa5\x21\xc5\x1c\x6b\x9d\x0a\x42\x50\x06\x1b\x27\x8c\x1c\xa7\x19\x81\x3f\xd2\x97\x7c\x68\x1a\x68\xe5\xc0\xba\x8d\x74\x10\x6e\x19\x23\x80\xa8\xfa\xd9\x3a\x1e\x84\xb4\x20\x44\xff\x4d\xb7\xfa\x84\x6d\x5f\x61\x27\xd4\xaf\x5c\x70\x4c\xf7\xa1\xcf\x7e\x45\x9d\x73\xcf\xc6\x65\x36\x7c\x8d\xa9\xf2\xf2\x94\x28\x28\x7e\x2b\xa0\xa1\x0a\x5e\x40\x07\x73\x61\x80\x6d\x6d\x34\x8e\xe9\xd3\x8c\xb3\x0c\xb8\xc0\xbd\xe8\xe9\xa2\xf3\x78\x53\xa3\xec\x01\x49\x18\x4f\x91\xba\xab\xb0\xe0\x38\x74\xc6\xaa\x2b\xca\x7b\x6b\x16\x58\x10\x98\xd4\xeb\x14\xb5\xeb\x7d\x96\x82\x26\x4b\xcf\xe6\x71\x2a\xcf\xb0\x4c\xcd\x2a\xf7\x3d\x6a\x9b\x74\xf3\x56\x5e\x8f\x02\xc7\x1d\x29\x72\x59\x28\xbf\x5a\x16\xfb\xc6\x4d\xfb\xe8\x58\xb3\x8c\x1b\x77\x0a\x77\x86\xa6\xb4\xb4\xf5\x64\x93\xbb\xa0\x24\x88\xe4\x1e\x84\xad\x13\x37\x21\x9c\xd2\x72\x0b\x42\x74\xfc\x09\x74\x2f\x0e\xbd\x9e\x3b\xd5\xbc\x2c\x1f\xaf\xd6\xd0\xb6\x52\xbb\xdf\x22\x21\x80\x4f\xe7\xa8\xb7\x78\xb8\xd4\x7d\x74\x07\x13\xc5\x71\x05\x05\x91\xa6\x91\xf4\x7b\x38\x3d\xe9\x1e\x6e\x1d\xab\xef\x3c\x0c\x74\xbf\x7d\xd5\x6c\xce\x89\xa5\xbe\x8d\xf7\x66\xce\xee\xd1\x86\x67\x80\x34\xad\x8f\xc3\xb3\xae\xc6\x1c\xe3\xb7\xc2\x96\xd9\xb4\x72\x0c\xf0\xab\x92\xe9\x5a\x05\xee\x5c\xb2\x87\xc6\x7f\xa9\x9b\x17\x6b\xb0\xcc\x75\x77\x96\x16\xb7\xcf\x1c\xde\x0a\xcc\x21\x1e\x53\x64\x0e\x73\x4f\x81\xbc\xb8\x07\xa6\xe6\xfa\x50\x55\xe2\x5b\x4d\xad\x4b\xb6\xb6\x81\x49\x77\xc7\xca\x68\x1a\x90\x67\xd7\x78\x3f\x3c\xba\xa3\x8e\xdd\xe8\x7b\xc0\x8a\x21\x03\x1a\x03\xdd\xdd\x11\xd1\x20\xd3\x46\x72\x55\x7d\x93\x0d\xb3\xcf\x34\x52\x46\x03\xd9\x8d\x75\xe2\x0e\x42\xbd\xb9\xdf\xe9\xdd\x34\xb6\x24\x9b\x5b\xa4\x56\x3f\x6b\xac\xd8\x01\x30\x1e\x25\xce\x3a\x77\xc6\x73\xd4\xbd\x96\xc9\xf5\x06\xbc\xca\xf8\x44\xb0\x2e\x09\x5a\xf3\xf5\x3a\x94\x7b\xb7\xa8\x9f\x7f\x17\x8e\x58\x53\xb2\x0e\xfc\xf5\x92\x8c\xc2\x4c\x49\xca\x84\xe7\x7d\x5d\xb6\x2f\x7e\x4f\x79\xba\x96\xe6\x75\xb7\x87\x9b\x0d\xdc\xb5\xbd\xae\xbb\x85\xb8\x8e\x64\x67\xd1\xe8\x18\xe5\xe2\x5f\x00\x00\x00\xff\xff\x4e\x9b\x8d\xdf\x17\x11\x00\x00")
+
+func jsonschemaDraft04JSONBytes() ([]byte, error) {
+ return bindataRead(
+ _jsonschemaDraft04JSON,
+ "jsonschema-draft-04.json",
+ )
+}
+
+func jsonschemaDraft04JSON() (*asset, error) {
+ bytes, err := jsonschemaDraft04JSONBytes()
+ if err != nil {
+ return nil, err
+ }
+
+ info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4375, mode: os.FileMode(420), modTime: time.Unix(1441640690, 0)}
+ a := &asset{bytes: bytes, info: info}
+ return a, nil
+}
+
+var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5d\xdf\x73\xdc\xb6\xf1\x7f\xcf\x5f\x81\xb9\x78\x46\xf6\x24\xd6\x39\xfe\x7e\x5f\xea\x97\x8c\x1a\x39\x89\x5a\xbb\xd2\xf8\x9c\xf6\xc1\x95\x67\x70\x24\x4e\x87\x84\x3f\x2e\x04\x29\xe9\xea\xea\x7f\xef\x02\xfc\x71\x04\x01\x90\x20\x89\x3b\x9d\x6d\x7a\xa6\x8d\x8e\x04\x16\x8b\xc5\x62\xf7\xb3\x0b\x10\xf8\xf4\x0d\x42\xb3\x94\xa6\x01\x99\xbd\x42\xb3\x33\xf4\xb7\xc5\xe5\x3f\xd0\xc2\x5b\x93\x10\xa3\x55\x9c\xa0\xc5\x1d\xbe\xb9\x21\x09\x7a\x79\xfa\x02\x9d\x5d\x5d\x9c\xce\xbe\xe7\x15\xa8\xcf\x4b\xaf\xd3\x74\xf3\x6a\x3e\x67\x79\x91\x53\x1a\xcf\x6f\x5f\xce\x99\xa8\x7b\xfa\x3b\x8b\xa3\x6f\xf3\xc2\x4f\xf2\x47\xb5\x1a\xfc\xe5\xf3\xa2\x60\x9c\xdc\xcc\xfd\x04\xaf\xd2\xe7\x2f\xfe\xbf\xa8\x5c\xd4\x4b\xb7\x1b\xc1\x54\xbc\xfc\x9d\x78\x69\xfe\x2c\x21\x7f\x66\x34\x21\xbc\xf9\x0f\xf0\x1b\x9e\x14\xad\x8b\xd7\x9c\xb3\x68\x15\x97\x7f\x6f\x70\xba\x66\x33\xf8\xfb\x5a\xd4\xc5\xbe\x4f\x53\x1a\x47\x38\xb8\x4a\xe2\x0d\x49\x52\x4a\x18\xd0\x59\xe1\x80\x11\x51\x00\xca\xa7\x24\x89\xa4\xb7\x9f\x72\x52\x1f\xef\x9f\x57\x3f\x78\x97\x12\xb2\xe2\xac\x7d\x3b\xf7\xc9\x8a\x46\x82\x2c\x9b\xdf\x92\xc8\x8f\x93\xd7\xf7\x29\x89\x18\x3c\x98\x89\xd2\x0f\xf0\xff\x0f\x39\x79\x0d\xdd\x92\xfb\x1a\xed\xb2\xdb\x2c\x4d\x68\x74\x53\xf4\x05\x9e\x93\x28\x0b\xab\x6e\x8b\x27\x30\x26\xb3\xe2\xd7\x75\x55\xcc\x27\xcc\x4b\xe8\x86\x73\xc4\xa9\xbc\x5f\x93\x6a\x0c\x6f\x49\xc2\xf9\x42\xf1\x0a\xa5\x6b\xca\x90\x1f\x7b\x59\x48\xa2\xf4\xb4\xe0\xb4\x2e\xc2\xce\xce\x8a\x52\x52\xbd\x75\xcc\x52\x9b\x8e\x14\x62\xe6\xaf\x3e\x7e\xf8\xf8\xe9\x61\x8e\x5e\xfd\x1b\xfe\x5d\x7f\xf7\xf4\xc7\x57\xf0\x97\xff\xdd\xb3\x1f\x9f\xcc\xda\xfa\xc3\x1b\x42\x4f\x23\x1c\x12\x04\x1a\x4a\x37\xcf\xf2\x1e\x11\xa1\xa0\xe8\xf5\x3d\x0e\x37\x01\x79\x85\x4e\x76\x8a\x79\x22\x73\xba\xc4\x8c\x5c\x81\x72\xf4\xe5\x76\xde\xca\x16\xa7\x8a\xb8\xce\xa1\x34\xd6\xb1\x33\xc7\x1b\x7a\xd2\x90\xb5\x50\xf8\x9a\x42\x18\xc5\x5d\x14\x7c\x43\x41\xc6\x12\x05\x0f\xde\x66\x0d\x12\x0d\xe6\xce\x50\x00\xd5\xb8\x90\xde\x5e\xbc\x7d\x8d\x78\x4f\x19\xc2\x9e\x47\x36\x29\xf1\xd1\x72\x5b\x31\xbb\xeb\x9e\x9e\x89\x90\xf8\x14\xbf\x87\xea\x2a\x1b\xa0\xdc\x7e\xe6\xf5\x67\xa3\x68\x1a\x79\x38\x42\x05\x8d\x51\x6c\x88\x29\xdf\x29\xcd\xca\x32\xec\x6a\xd6\x5e\x77\xd7\xaf\x17\x6e\xb4\x9f\x80\x5a\x82\xc2\x58\x31\x51\x94\x3d\x37\x51\x4b\x08\xdb\xc0\x43\x1b\xfd\x28\x8b\x1a\x69\x31\xe2\x65\x09\x4d\xb7\x16\xaa\x56\x96\xd4\xd6\x3f\xef\x23\x27\x5d\x25\x89\x6a\x8a\x6f\x98\x6e\x16\xe2\x24\xc1\xdb\x9d\x1e\xd0\x94\x84\xf5\x72\xc6\x06\x81\x5e\x69\x12\x1f\xaa\xda\x59\x44\xff\xcc\xc8\x45\x41\x23\x4d\x32\x22\xf1\x40\xee\xf9\x04\xc7\xc1\x79\xec\x59\x74\x49\x2a\xdd\xb0\xf0\x3a\x1d\x52\xcc\xa9\xc6\xad\xe9\x66\xcb\x2f\x24\x22\x09\x0e\x10\xaf\x9e\x84\x98\x3f\x46\x78\x19\x67\xa9\x66\xb6\x2a\x5e\x51\x3c\x2d\xcc\x7d\x55\xac\x72\xf4\x8a\xcf\xe8\xf2\x8c\xe5\xd4\x32\x78\x47\xf1\x5a\xf6\x90\x2d\x02\xd4\x7a\xc9\x52\x8e\xf2\xc0\x69\x3c\x66\xad\x1b\x8d\xd6\x0c\x06\x5c\x27\xdb\x33\x94\xab\x04\xc2\x91\x0f\x56\x87\x78\x14\x2c\xb7\x20\x5a\xf7\x24\x35\xce\xbe\x57\xa5\x3a\xa6\x75\x06\x20\x27\x4a\xa9\x57\x79\x64\x70\xed\x4b\x70\xd0\x9d\x8d\xcb\x94\x86\x33\x10\xc4\x11\x07\x04\xb5\xe7\x92\x0b\x5d\xac\xe3\x2c\x00\xcf\x40\x90\x4f\x57\x2b\x92\x00\x46\x40\xab\x24\x0e\x45\x09\x21\xa7\x53\x84\x7e\xa1\xe9\xaf\xd9\x12\xfd\x1c\xe0\xdb\x18\x74\x0f\xbd\xc5\xc9\x1f\x7e\x7c\x17\x21\x40\x16\x38\x08\xe2\x3b\xe2\x1b\x7a\x01\x6a\x14\xb2\xcb\xd5\x82\x24\xb7\xd4\x1b\x33\x8e\xdc\xeb\x0a\x62\x9c\x7b\x96\x93\x13\xa8\xb5\x5d\x8a\xe0\x32\x53\xec\xa5\x76\xea\x5a\x16\xd6\x52\x0a\xa0\x41\x30\xba\x76\x94\xca\xc2\xaa\xc2\x37\x1d\x7a\x83\x3b\x5b\x93\xf1\x53\x5e\x53\x32\x19\xa5\x34\x60\x60\x40\xd7\x24\x0d\xeb\x39\xfd\x0d\x73\x91\xc3\xb0\x91\x43\x48\x7d\x50\x30\xba\xda\x42\x59\x94\xa3\xba\x9c\xcb\x42\x12\x08\xda\x85\x80\x61\x0e\x91\x02\x8e\xe8\x7f\x44\xbf\x0c\x23\x9b\x25\xc1\x48\x5e\x7e\x7b\xf7\x06\x6d\x62\x0a\xfc\x00\x33\x05\x8e\xf3\x54\xb9\x9e\xca\x84\xf2\xe7\x9c\x06\xb8\x3b\x3d\x6b\x30\xe5\xe9\x58\xe6\x04\x0d\x04\xc3\x05\xde\x9e\x59\x49\xc9\xc0\x65\xce\x4c\x9b\xe5\x3d\x98\xb1\x97\x74\x5f\x9d\x4f\x46\xdd\xd7\xfb\x3c\xa1\x8d\x03\xfd\xdb\xfe\x14\xbc\xae\xd4\x45\x17\x05\xfc\x3d\x45\x17\xe9\x09\x43\x24\xf2\xe2\x2c\xc1\x37\x60\x44\x41\xe3\x32\xc6\xfd\x12\xba\x5c\x00\x28\x8e\x43\x18\x08\xba\x0c\xaa\x6a\x07\xd5\xfb\xaa\x4d\x2b\x5d\x3f\x16\x1d\x52\x42\x00\x4b\xeb\xf9\x8e\x04\x20\xeb\xdb\x3c\x84\x63\xa5\x0c\x68\xe4\xd3\x5b\xea\x67\x80\xc4\x80\x0d\x21\x21\x76\x8a\x40\x62\x5b\x14\x66\x10\xcd\x80\x8f\x4c\xca\x8a\x45\x95\x93\x32\xbc\x3c\x39\x55\xc2\xc8\x3d\x0a\xa3\xa6\x0e\x10\xa8\x5a\x11\xe3\x3d\xe5\xb0\xb8\x6d\x14\xdb\xe6\x8e\x4d\x00\x65\x92\xbe\x81\x6e\x27\xc2\x2f\x92\x49\x0a\x9f\x8d\xd1\xbc\x8c\x44\x72\x20\x04\x68\x92\xe7\xb4\xf2\xf6\x59\x81\x79\x96\x42\xcd\x61\xb0\x72\x72\x0c\xc6\x91\x3f\x29\x82\x69\xbf\x00\x86\x22\x1c\x95\x23\xe4\x86\xaa\x69\x22\xb8\x3d\xf6\xbd\x6a\xaf\x7f\xf7\x13\x02\x38\x97\x81\x9f\x15\x8e\x81\x09\x5c\x50\x0b\x56\xa5\x6e\xe9\x62\xc9\x3d\xf6\xaa\x6c\x6e\xbf\x9d\x32\x45\x79\x3d\x7b\x23\xfb\x8c\x06\x83\x6a\xac\x56\xb6\x5a\xe5\xda\xc4\xcb\x2e\x2f\xc6\xcd\xb9\xe2\xc4\x4c\xfe\xc9\x3e\x26\x70\xe1\x3a\x8e\xdd\xfa\x93\x3c\xdd\x36\x66\x88\x95\x04\x41\x48\x43\xf2\x3e\xa7\xd1\x99\x2e\xd4\xb8\xd6\x2a\xdb\x55\x42\x80\x5f\xdf\xbf\xbf\x42\x21\x40\x38\x70\xf9\x0d\x8b\xc2\xd9\xc0\x8d\xa1\xec\x09\x81\x76\x49\xa3\x81\x38\xe8\x88\xe2\x7c\x39\x3b\x24\x09\x43\xce\x10\x89\x57\x6a\x96\x48\x37\x54\xb5\x97\x0f\x52\x75\x43\x9a\xa8\x51\x70\x06\x0e\x22\xc4\xc9\x76\x54\xfc\xbd\x4c\x28\x81\x88\x35\xa7\x54\xaa\x45\x35\xf6\x8f\x16\xfc\x57\x1c\x7c\x3f\x22\xba\x37\x18\x5a\xf1\xce\x36\xa5\xd6\xa4\x59\x31\x76\xe1\xbb\x48\xfb\x14\x01\x27\xdd\xa5\x5c\xba\x64\xaf\x49\x6f\x1b\x84\xdb\x33\xc5\xdd\x22\x16\x4d\x9a\xbb\xc9\x96\x26\xf9\x3f\x88\xad\x82\x8e\x2b\xb6\xb4\x59\xf0\x16\x92\xbb\xf2\x66\x9a\xba\x5c\x78\x0b\x49\xc5\x0a\x36\x26\xb1\xb2\xee\xd2\x42\x4b\x59\x7b\x69\x52\xf3\x39\x0e\xf1\x70\x4a\x8c\xda\xb9\x8c\xe3\x80\xe0\xa8\xa9\x9e\x2b\x9c\x05\xa9\x84\xa6\x15\x46\xd5\xb4\x7d\x1b\xa7\x52\xea\x5e\xd0\x32\xc6\x48\x02\xf8\xbb\x02\x42\x47\xe4\x34\x0a\xc2\xbd\x81\xd0\x0d\xb1\xcc\x08\xee\x7c\xb4\x5e\xf9\x33\x47\x74\xe4\xe5\xd4\xe1\x84\x7c\x12\xc0\xdc\x72\x42\x2a\xde\x34\xa3\x81\xe1\xb4\xd6\x04\x2b\xd3\x65\x98\xa0\x70\xea\xad\x1d\x51\x72\x64\xb7\xb4\x93\x4e\xbb\x9a\x67\x9d\x9c\xc8\xeb\x56\x61\x2c\x4f\x29\x31\x61\xbb\x09\x05\x4b\x9e\xf0\x44\x04\x8e\xb6\xe8\x16\x07\xd4\xcf\x11\x26\x83\x60\x23\x83\x32\xb1\x2f\xc2\xa6\x93\xc2\xdc\xd4\xb3\x12\x21\x95\xa7\xec\x0f\x6e\x67\xfd\xd3\x0f\x2f\x9e\xff\xe5\xfa\xd3\xff\x3d\x3c\x7b\xf2\xdf\x8f\x4f\x8b\xf6\x9f\x3d\xe9\x67\xc1\xff\x89\x83\x8c\x18\xf2\x1c\x7b\x30\x2b\x51\x9c\x36\x40\xa8\x7e\x84\x2c\x65\xd4\x29\x25\x6d\x37\xfa\x77\x64\xd7\x95\x2e\xf5\xcb\xe5\x59\x53\xc1\x38\x22\x97\x2b\x29\x86\xe8\x31\x3a\xda\x81\xb1\xa8\xcf\xb7\x00\xbd\x23\x62\x6d\xc9\xd3\x2c\x89\x5c\x6b\x59\x1f\x1e\x14\xd5\xa7\x53\xd9\xc4\xbe\x23\xeb\x6a\xdb\x93\x54\x53\x95\x76\x53\x62\x2d\x52\x93\x93\x5f\x4a\x93\x3d\x28\xad\x68\x40\x16\x3a\x6a\xb5\x5f\xd7\x46\xbb\x6d\x6d\x21\xcb\xc2\x86\x48\x41\x89\xd5\x5b\x48\x55\xa5\x5b\x26\xef\x91\x61\x15\x49\x89\x55\xb9\x39\xcf\xa4\xe5\x4d\xcc\x5a\x9a\x77\x06\xf8\xf4\xd3\x4c\x90\xb4\x9e\x5f\xa9\x9c\x53\x91\x98\xd2\x85\x73\xca\x0e\x38\xf1\x54\x53\x92\x9b\x71\xb1\xa2\xde\x7c\x4a\xa3\x94\xdc\xa8\x8f\x75\xe8\x1c\x95\x29\x86\xce\x09\x51\xa5\xc4\x7a\x5b\x08\x5d\xc2\xc2\x04\x35\x12\x1a\x52\xbe\xca\xc0\xf2\x04\x85\x96\x9e\x17\x07\x01\x0c\x25\x54\xf8\x59\xcb\x93\x69\x85\xbb\x51\xcb\x80\x22\xcb\x60\xc5\x82\x64\x59\x58\x4b\x29\xc4\xf7\x34\xcc\x42\x3b\x4a\x65\x61\x83\x01\xf1\x82\x8c\x81\x50\xde\xf6\x21\xa9\xd4\xd2\x73\x09\xe5\xed\xb9\x2c\x0a\x77\x70\xd9\x87\xa4\x52\xcb\x24\xcb\x37\x24\xba\x49\x2d\xf1\xef\xae\xb8\xa9\xcf\xbd\xa8\x55\xc5\x4d\xb8\xbc\xd8\x39\x69\xb7\x14\x25\x0a\x9b\x7a\x79\x61\x3f\x55\xaa\xd2\xa6\x3e\xf6\xa1\x55\x96\xd6\xd2\x92\x33\x86\x16\xe4\xea\x15\xf4\xba\x12\x59\xeb\x47\x64\xd4\x09\x98\x79\x14\x3c\xe5\xa5\x12\x06\x1b\xfa\xb8\x2b\x6f\x98\xf9\xfd\x61\x90\xe2\x99\x1f\xc9\xe9\x36\x8b\xb7\xec\x4e\x85\xe0\xa9\x70\x54\x5b\x1e\x3a\x25\x62\x25\xfc\x0e\x82\x2b\x74\xff\x9c\x67\x3d\x45\x64\xd5\xbd\x6b\x86\xe7\x8d\x35\x65\x8c\xbb\x0f\x97\xb1\xbf\xbd\xaa\xd6\xf5\xc6\x6d\x7c\xa8\xbb\x16\x69\xdf\x9f\x8c\x1b\xaf\x8f\x31\x6d\xe3\x2a\xbb\x9d\xa7\xd6\x35\xc9\xed\x2a\x58\xe7\xcb\xf7\x94\xc7\xc5\x7c\x8f\x9b\xd8\x3d\x43\x21\x8a\x2e\xd0\x25\x2f\x9d\xb1\x71\xfb\xdb\x1c\xef\x18\xd9\x31\x6e\x40\x11\x63\x04\x76\xce\x09\x83\x95\x2b\x12\xc2\x41\xec\x61\xbd\xd0\x6c\xa0\x18\xd7\xe5\x6e\xbc\x54\x53\xe0\x3e\xb9\x52\x13\xdb\x77\x6b\x22\x12\x20\x71\x82\x20\x76\xcf\xbf\x6c\xa8\xd8\xe6\x83\x55\xb6\xc7\x4b\xe4\x09\x2c\x1c\x9c\x0e\xc8\xc4\x6a\xc3\x39\xbb\x38\x6d\xc4\xb6\x8a\x1c\xb7\x57\x16\x62\x91\x2d\x17\x4d\x46\x8e\x2d\xec\xe9\x9c\xeb\x9f\xa9\x06\x1c\xcf\x44\x93\x03\x3d\xfe\x4f\x3f\xd5\x26\xa3\x3a\xd4\xa8\x1e\x3e\x36\x35\x04\xa1\x86\x90\x75\x8a\x4d\xa7\xd8\x74\x8a\x4d\x5b\x7b\x3d\xc5\xa6\x5f\x68\x6c\xfa\x4d\xfd\xbf\x25\x4e\x02\xde\x93\xed\x04\x93\x26\x98\x54\x7b\x2a\x74\x62\x42\x49\xfb\x43\x49\x82\x99\xd7\xe1\x26\xdd\x36\x57\x15\xa5\x96\x6d\x76\xbf\xb4\xb1\x25\x9a\x61\x88\xc1\x94\xe2\x49\x19\x5c\x53\xdb\xe5\xb6\x60\x38\x0a\xb6\x5c\x6f\x45\xc2\x86\xaf\x8a\x73\xa6\x78\xce\x26\x33\x7d\x33\x31\x21\xbc\xa3\x44\x78\xff\x82\x01\x7c\xcb\xad\xfe\x04\xf5\xd0\x04\xf5\x26\xa8\x37\x41\x3d\xd4\x84\x7a\xdc\xe4\x9d\xe3\x14\x4f\x68\x6f\x42\x7b\xb5\xa7\xa5\x5a\x4c\x80\x6f\x02\x7c\x3a\xde\x3f\x0f\xc0\xd7\x78\xc8\xf7\x69\x4d\x20\x10\x4d\x20\x70\x02\x81\x5d\xbd\x9e\x40\xe0\xd7\x04\x02\xf9\x27\x2c\x9f\x27\x00\x34\x7d\xb6\x59\x3c\x2d\x1e\x75\x6f\x9f\x1c\x04\x18\xb5\x4e\x4d\xfa\xd6\xb1\xd6\xb4\xa8\xe1\x1c\x62\x1e\x39\x8c\xe4\x8a\x35\x41\xc8\x69\x65\xb5\xfa\xf7\x75\x40\xae\x09\x69\xa1\x09\x69\x4d\x48\x6b\x42\x5a\xa8\x89\xb4\xa2\x38\xfa\xeb\x21\x36\xa9\xea\x3f\x1e\x19\xf4\x75\x9a\x71\xd3\x9c\x4e\x74\x16\xf4\x5a\x32\x8e\x03\x29\x9a\x96\xab\x07\x92\x33\xa0\x61\x65\x64\xaf\x1b\x18\x5a\x33\xa4\x83\x04\x2e\xef\x62\x1e\xd8\x09\x45\xd1\x3a\xd8\x57\x76\x64\xda\x7e\x4d\x7b\x06\x80\x2b\x47\x8c\x94\xd5\x8f\xe1\x04\x04\x83\xa5\x13\xd0\x73\x3a\x07\x3d\xc6\x4b\x09\x17\x5c\x7f\xe7\x3e\x1c\xb8\x68\x8f\x5d\x2d\xfb\x67\x79\xb4\xfb\x7c\xd7\x9d\xb9\x74\x1a\xad\x35\xbc\x1e\xd1\xa0\xe6\x2b\xd0\x5e\x70\x67\x50\x93\x6d\x98\xa8\xd3\x66\x0f\x68\xb1\xeb\x73\x8e\x0e\x20\x36\xa4\x45\x17\x68\x6d\x40\xbb\x4e\x20\xdd\x90\xfe\xba\xc0\x7d\xa3\xfa\x3b\x0a\x1c\xda\xb6\x2c\xf9\x97\x98\x89\x38\xe4\xa2\x88\x99\x86\x01\x49\x07\x2d\x9f\xe7\xf3\xe9\xc5\x20\xf0\x39\x40\xe6\xa3\x10\xea\x3e\x25\xbd\xef\x86\xdb\x05\x6d\x81\x81\x07\x08\xbb\x13\x28\x83\xc0\xcd\xc7\x28\x1c\x42\xea\x07\x69\xbd\x5d\xf4\xa6\xb4\xdf\x18\x06\x72\xaf\x7f\x26\x67\x3e\x6c\x23\x94\x21\x96\xcc\x18\xc6\x74\x7e\xfa\xce\xcb\x44\xdb\xc3\x1e\xa1\xd0\xcc\xa8\xca\x95\x6b\xbf\x9a\x99\xd3\x0a\x4d\x3c\xe8\x01\x95\x26\x15\x36\x06\x4f\xd5\x02\x28\x8b\x94\xd3\xa3\x89\x51\xb7\xd0\x29\x1b\xb5\x1f\x94\x97\xfa\xb3\xfd\x7a\x32\x28\x0f\xd6\xa8\xa1\xc3\x41\xa0\xa2\xb6\x96\xb3\x09\x4d\x9d\x33\x1d\x5a\x68\xdb\x29\x3d\x77\x86\x98\xc1\x1c\x21\xe5\x7d\xea\x9e\x7b\x7d\x38\xeb\x27\x50\x9f\x72\x6c\x0e\x62\xc2\x69\x9c\x0c\x89\x4e\x12\x88\xf9\x2f\xa3\xc0\x78\x30\xe3\xe0\x23\xd8\xee\x43\xe5\x84\x54\xbd\x0c\x78\x41\x03\x26\x74\x7f\x24\x62\xb1\x02\x50\x17\xe8\x2e\x66\xb4\xfe\xba\xb0\x76\x0c\xcc\x17\x1e\x46\xbb\x38\xb0\x64\x0a\x9d\x8f\x26\x74\x7e\x14\x14\xe4\x66\xe9\xca\x6e\x4b\xcf\xde\xcc\xd9\xf1\x1a\xa3\xe6\x6a\x97\x85\x45\x1a\x75\xb3\xc3\x74\xfe\xd0\xb4\x12\xd9\x45\x69\x5a\x89\x9c\x56\x22\xa7\x95\xc8\xc7\x5b\x89\x7c\x04\xc8\x28\xf9\x24\xdd\xb5\x89\x63\x2f\x29\x2c\x69\xbe\xcb\x31\x0c\xbf\x16\x62\xa6\xf4\xb7\xe3\xd2\x42\x1d\x8d\xe1\xfe\x52\x75\x8a\x4a\x0c\xab\x77\x16\x56\x37\x4c\x98\xce\xd5\x97\x25\x2d\x87\x59\xfb\xf1\xf8\x16\x5b\x9d\xb4\x5d\x10\xa7\xed\x6e\x70\xe7\x65\x75\x7a\x3b\x04\x4a\x40\xef\x87\xd4\x04\x95\x4f\xe8\x32\x53\x0f\x6f\x1e\x0d\x02\xef\x12\xbc\xd9\xb8\x3a\xae\xfc\x58\xe6\x2a\xbf\xfc\xd3\x95\x06\xf5\xb9\x5e\xcc\xb5\xb6\x8d\x3c\x77\xd6\x19\xc0\x3f\x96\x71\xed\xb8\x7a\x76\xb8\xad\xd3\x9d\xc5\x6b\x95\xed\x5a\x62\x46\xbd\xb3\x2c\x5d\xf3\x7b\x24\xf2\xcd\xa6\x0b\xe5\xe8\xfd\x46\x0a\xcc\x8a\x30\xde\xd0\xbf\x93\xad\x1b\x5a\x31\x06\x06\x5f\x5e\x40\x60\x46\x3d\x9a\xba\xa4\x79\x85\x19\xbb\x8b\x13\xdf\x25\xcd\xb3\x0d\xe7\xd3\xa1\x28\x0b\xb2\x9e\x47\x18\xfb\x29\xf6\x89\x96\x6a\xf5\xf7\xb5\x56\xf3\xda\xc6\x79\xbf\x96\xe6\x31\x4e\xd2\x15\xbd\x75\xb9\xf5\xf9\xf8\x4c\x49\x63\x7e\x1d\x60\x0c\x1b\x28\xa2\xb1\xfd\xed\xc0\x23\x9c\x77\xbf\x7b\x88\x87\x7a\xae\x7e\xbb\xf8\x5b\xcf\x36\x6b\xe4\x42\x9c\x1f\xe5\x71\x7c\xba\x69\xb0\xd7\x87\xd5\xd1\x55\x10\xdf\x49\x77\x1c\x00\x4f\x71\x52\xdc\x27\xfb\x5b\x9f\x7b\xe9\xdc\x68\x6c\x2e\x14\x8b\x24\x18\xe7\x7b\x74\x6b\xb4\x10\x7e\x77\x7b\xcc\x83\x5e\xdb\x5e\x84\x22\xfa\xb0\xc8\x6b\x68\xa9\x29\x52\xee\xd1\x13\x8b\xbb\x87\x3f\xff\x59\xa1\x20\x8e\xc7\x9d\x15\x69\xfc\x07\xf9\xf2\x67\xc3\xa6\x10\xfa\xa1\x67\x43\x25\xdd\x69\x16\xc8\xb3\x40\x87\x91\xa7\x89\x50\xb4\xbc\xc7\x89\x80\x77\x72\x9f\xe6\xc2\xb1\xcc\x05\x35\xb0\x3b\x32\xa4\xf4\xf5\x4d\x93\x6a\x48\xbe\x30\xfc\x34\x4d\x42\xa4\x9f\x84\x8b\xe6\x28\x3a\x58\x78\x90\xbb\x2c\xb7\x2a\xdf\x3d\xea\x70\x49\xa6\xba\xe2\x59\x91\x6f\xc7\x3a\x4c\xe3\x0a\xc0\x6e\x96\x34\x5f\x4f\xef\x68\xa0\x88\x10\x9f\xf8\x28\x8d\xc5\xd9\x37\x08\x17\xf7\xf9\xe5\xf7\xb4\x06\x81\xf6\xfa\x89\x92\x37\xd9\x88\x69\xba\x3e\x38\xdd\xa9\xdc\x3b\x2f\x89\xc8\x9a\x8c\xe1\xce\x37\x6d\x1a\xce\x7a\x1d\xac\x76\x37\xeb\x20\xe1\xa7\x09\x8e\x18\xf0\xc4\x2f\xff\x48\x63\x2f\x0e\xca\xef\xd8\xc5\x75\xff\x6d\xe2\x34\xce\x7e\x9d\x79\x14\x1b\x92\x64\x0b\xc1\x9f\x30\xf9\xd1\x9d\xf2\xbb\x66\xf0\x7a\xcb\xa6\x65\x3b\x86\x89\x79\x95\xf5\x99\xc7\x6e\xa5\xab\x44\xe4\x9f\xa9\xfc\x73\x43\x37\xba\x8b\xc7\x77\x4b\x47\x82\x5c\x2b\x97\xbb\xb3\x7f\x0e\xc5\x6e\xed\x41\x58\x3f\x74\xc8\x8e\xff\xe6\xd6\x3e\x47\xdb\xfa\x4a\xf2\x7a\x27\xe1\x74\x2b\xdf\xae\xa9\xe6\x16\x1b\x67\xdb\xf7\x2a\x03\xae\xdb\x1b\xe0\xf2\x6b\xb7\xaa\x21\x65\x47\x8e\xb3\x2f\xdc\xca\x26\x5a\x76\xff\xb8\xff\xaa\xad\xea\x97\xb2\x87\xc7\xd9\x97\x6c\x6a\xbf\x9c\xb6\xa5\xdf\x50\x54\x1b\x2f\x65\xeb\x8f\xfb\x2f\x7a\x6a\x52\xdc\x6b\x6b\xf2\x17\x3c\x3b\xac\xd0\xdc\x90\xe4\xec\xcb\xb4\x9a\x18\x95\xbd\x93\xfb\x94\xe2\x3e\x1b\xd3\x0b\x51\xbf\xe7\xc9\xe9\x57\x67\xd5\x44\x88\xdc\x29\x7f\xd4\x54\x78\x19\x0c\x59\x20\x68\x7d\x54\x2a\x78\x52\xfc\xd5\xa8\x4d\x32\xbd\x3e\x2c\x97\x61\xfa\x37\xfc\x7f\x0f\xff\x0b\x00\x00\xff\xff\x31\x8b\xeb\xb6\x54\x9c\x00\x00")
+
+func v2SchemaJSONBytes() ([]byte, error) {
+ return bindataRead(
+ _v2SchemaJSON,
+ "v2/schema.json",
+ )
+}
+
+func v2SchemaJSON() (*asset, error) {
+ bytes, err := v2SchemaJSONBytes()
+ if err != nil {
+ return nil, err
+ }
+
+ info := bindataFileInfo{name: "v2/schema.json", size: 40020, mode: os.FileMode(420), modTime: time.Unix(1446147817, 0)}
+ a := &asset{bytes: bytes, info: info}
+ return a, nil
+}
+
+// Asset loads and returns the asset for the given name.
+// It returns an error if the asset could not be found or
+// could not be loaded.
+func Asset(name string) ([]byte, error) {
+ cannonicalName := strings.Replace(name, "\\", "/", -1)
+ if f, ok := _bindata[cannonicalName]; ok {
+ a, err := f()
+ if err != nil {
+ return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
+ }
+ return a.bytes, nil
+ }
+ return nil, fmt.Errorf("Asset %s not found", name)
+}
+
+// MustAsset is like Asset but panics when Asset would return an error.
+// It simplifies safe initialization of global variables.
+func MustAsset(name string) []byte {
+ a, err := Asset(name)
+ if err != nil {
+ panic("asset: Asset(" + name + "): " + err.Error())
+ }
+
+ return a
+}
+
+// AssetInfo loads and returns the asset info for the given name.
+// It returns an error if the asset could not be found or
+// could not be loaded.
+func AssetInfo(name string) (os.FileInfo, error) {
+ cannonicalName := strings.Replace(name, "\\", "/", -1)
+ if f, ok := _bindata[cannonicalName]; ok {
+ a, err := f()
+ if err != nil {
+ return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
+ }
+ return a.info, nil
+ }
+ return nil, fmt.Errorf("AssetInfo %s not found", name)
+}
+
+// AssetNames returns the names of the assets.
+func AssetNames() []string {
+ names := make([]string, 0, len(_bindata))
+ for name := range _bindata {
+ names = append(names, name)
+ }
+ return names
+}
+
+// _bindata is a table, holding each asset generator, mapped to its name.
+var _bindata = map[string]func() (*asset, error){
+ "jsonschema-draft-04.json": jsonschemaDraft04JSON,
+ "v2/schema.json": v2SchemaJSON,
+}
+
+// AssetDir returns the file names below a certain
+// directory embedded in the file by go-bindata.
+// For example if you run go-bindata on data/... and data contains the
+// following hierarchy:
+// data/
+// foo.txt
+// img/
+// a.png
+// b.png
+// then AssetDir("data") would return []string{"foo.txt", "img"}
+// AssetDir("data/img") would return []string{"a.png", "b.png"}
+// AssetDir("foo.txt") and AssetDir("notexist") would return an error
+// AssetDir("") will return []string{"data"}.
+func AssetDir(name string) ([]string, error) {
+ node := _bintree
+ if len(name) != 0 {
+ cannonicalName := strings.Replace(name, "\\", "/", -1)
+ pathList := strings.Split(cannonicalName, "/")
+ for _, p := range pathList {
+ node = node.Children[p]
+ if node == nil {
+ return nil, fmt.Errorf("Asset %s not found", name)
+ }
+ }
+ }
+ if node.Func != nil {
+ return nil, fmt.Errorf("Asset %s not found", name)
+ }
+ rv := make([]string, 0, len(node.Children))
+ for childName := range node.Children {
+ rv = append(rv, childName)
+ }
+ return rv, nil
+}
+
+type bintree struct {
+ Func func() (*asset, error)
+ Children map[string]*bintree
+}
+
+var _bintree = &bintree{nil, map[string]*bintree{
+ "jsonschema-draft-04.json": &bintree{jsonschemaDraft04JSON, map[string]*bintree{}},
+ "v2": &bintree{nil, map[string]*bintree{
+ "schema.json": &bintree{v2SchemaJSON, map[string]*bintree{}},
+ }},
+}}
+
+// RestoreAsset restores an asset under the given directory
+func RestoreAsset(dir, name string) error {
+ data, err := Asset(name)
+ if err != nil {
+ return err
+ }
+ info, err := AssetInfo(name)
+ if err != nil {
+ return err
+ }
+ err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
+ if err != nil {
+ return err
+ }
+ err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
+ if err != nil {
+ return err
+ }
+ err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// RestoreAssets restores an asset under the given directory recursively
+func RestoreAssets(dir, name string) error {
+ children, err := AssetDir(name)
+ // File
+ if err != nil {
+ return RestoreAsset(dir, name)
+ }
+ // Dir
+ for _, child := range children {
+ err = RestoreAssets(dir, filepath.Join(name, child))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func _filePath(dir, name string) string {
+ cannonicalName := strings.Replace(name, "\\", "/", -1)
+ return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
+}
diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go
new file mode 100644
index 000000000..f285970aa
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/contact_info.go
@@ -0,0 +1,24 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+// ContactInfo contact information for the exposed API.
+//
+// For more information: http://goo.gl/8us55a#contactObject
+type ContactInfo struct {
+ Name string `json:"name,omitempty"`
+ URL string `json:"url,omitempty"`
+ Email string `json:"email,omitempty"`
+}
diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go
new file mode 100644
index 000000000..eb1490b05
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/expander.go
@@ -0,0 +1,626 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "reflect"
+ "strings"
+ "sync"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+// ResolutionCache a cache for resolving urls
+type ResolutionCache interface {
+ Get(string) (interface{}, bool)
+ Set(string, interface{})
+}
+
+type simpleCache struct {
+ lock sync.Mutex
+ store map[string]interface{}
+}
+
+var resCache = initResolutionCache()
+
+func initResolutionCache() ResolutionCache {
+ return &simpleCache{store: map[string]interface{}{
+ "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(),
+ "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(),
+ }}
+}
+
+func (s *simpleCache) Get(uri string) (interface{}, bool) {
+ s.lock.Lock()
+ v, ok := s.store[uri]
+ s.lock.Unlock()
+ return v, ok
+}
+
+func (s *simpleCache) Set(uri string, data interface{}) {
+ s.lock.Lock()
+ s.store[uri] = data
+ s.lock.Unlock()
+}
+
+// ResolveRef resolves a reference against a context root
+func ResolveRef(root interface{}, ref *Ref) (*Schema, error) {
+ resolver, err := defaultSchemaLoader(root, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ result := new(Schema)
+ if err := resolver.Resolve(ref, result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+// ResolveParameter resolves a paramter reference against a context root
+func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) {
+ resolver, err := defaultSchemaLoader(root, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ result := new(Parameter)
+ if err := resolver.Resolve(&ref, result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+// ResolveResponse resolves response a reference against a context root
+func ResolveResponse(root interface{}, ref Ref) (*Response, error) {
+ resolver, err := defaultSchemaLoader(root, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ result := new(Response)
+ if err := resolver.Resolve(&ref, result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+type schemaLoader struct {
+ loadingRef *Ref
+ startingRef *Ref
+ currentRef *Ref
+ root interface{}
+ cache ResolutionCache
+ loadDoc func(string) (json.RawMessage, error)
+}
+
+var idPtr, _ = jsonpointer.New("/id")
+var schemaPtr, _ = jsonpointer.New("/$schema")
+var refPtr, _ = jsonpointer.New("/$ref")
+
+func defaultSchemaLoader(root interface{}, ref *Ref, cache ResolutionCache) (*schemaLoader, error) {
+ if cache == nil {
+ cache = resCache
+ }
+
+ var ptr *jsonpointer.Pointer
+ if ref != nil {
+ ptr = ref.GetPointer()
+ }
+
+ currentRef := nextRef(root, ref, ptr)
+
+ return &schemaLoader{
+ root: root,
+ loadingRef: ref,
+ startingRef: ref,
+ cache: cache,
+ loadDoc: func(path string) (json.RawMessage, error) {
+ data, err := swag.LoadFromFileOrHTTP(path)
+ if err != nil {
+ return nil, err
+ }
+ return json.RawMessage(data), nil
+ },
+ currentRef: currentRef,
+ }, nil
+}
+
+func idFromNode(node interface{}) (*Ref, error) {
+ if idValue, _, err := idPtr.Get(node); err == nil {
+ if refStr, ok := idValue.(string); ok && refStr != "" {
+ idRef, err := NewRef(refStr)
+ if err != nil {
+ return nil, err
+ }
+ return &idRef, nil
+ }
+ }
+ return nil, nil
+}
+
+func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref {
+ if startingRef == nil {
+ return nil
+ }
+ if ptr == nil {
+ return startingRef
+ }
+
+ ret := startingRef
+ var idRef *Ref
+ node := startingNode
+
+ for _, tok := range ptr.DecodedTokens() {
+ node, _, _ = jsonpointer.GetForToken(node, tok)
+ if node == nil {
+ break
+ }
+
+ idRef, _ = idFromNode(node)
+ if idRef != nil {
+ nw, err := ret.Inherits(*idRef)
+ if err != nil {
+ break
+ }
+ ret = nw
+ }
+
+ refRef, _, _ := refPtr.Get(node)
+ if refRef != nil {
+ rf, _ := NewRef(refRef.(string))
+ nw, err := ret.Inherits(rf)
+ if err != nil {
+ break
+ }
+ ret = nw
+ }
+
+ }
+ return ret
+}
+
+func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}) error {
+ tgt := reflect.ValueOf(target)
+ if tgt.Kind() != reflect.Ptr {
+ return fmt.Errorf("resolve ref: target needs to be a pointer")
+ }
+
+ oldRef := currentRef
+ if currentRef != nil {
+ var err error
+ currentRef, err = currentRef.Inherits(*nextRef(node, ref, currentRef.GetPointer()))
+ if err != nil {
+ return err
+ }
+ }
+ if currentRef == nil {
+ currentRef = ref
+ }
+
+ refURL := currentRef.GetURL()
+ if refURL == nil {
+ return nil
+ }
+ if currentRef.IsRoot() {
+ nv := reflect.ValueOf(node)
+ reflect.Indirect(tgt).Set(reflect.Indirect(nv))
+ return nil
+ }
+
+ if strings.HasPrefix(refURL.String(), "#") {
+ res, _, err := ref.GetPointer().Get(node)
+ if err != nil {
+ res, _, err = ref.GetPointer().Get(r.root)
+ if err != nil {
+ return err
+ }
+ }
+ rv := reflect.Indirect(reflect.ValueOf(res))
+ tgtType := reflect.Indirect(tgt).Type()
+ if rv.Type().AssignableTo(tgtType) {
+ reflect.Indirect(tgt).Set(reflect.Indirect(reflect.ValueOf(res)))
+ } else {
+ if err := swag.DynamicJSONToStruct(rv.Interface(), target); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ }
+
+ if refURL.Scheme != "" && refURL.Host != "" {
+ // most definitely take the red pill
+ data, _, _, err := r.load(refURL)
+ if err != nil {
+ return err
+ }
+
+ if ((oldRef == nil && currentRef != nil) ||
+ (oldRef != nil && currentRef == nil) ||
+ oldRef.String() != currentRef.String()) &&
+ ((oldRef == nil && ref != nil) ||
+ (oldRef != nil && ref == nil) ||
+ (oldRef.String() != ref.String())) {
+
+ return r.resolveRef(currentRef, ref, data, target)
+ }
+
+ var res interface{}
+ if currentRef.String() != "" {
+ res, _, err = currentRef.GetPointer().Get(data)
+ if err != nil {
+ return err
+ }
+ } else {
+ res = data
+ }
+
+ if err := swag.DynamicJSONToStruct(res, target); err != nil {
+ return err
+ }
+
+ }
+ return nil
+}
+
+func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) {
+ toFetch := *refURL
+ toFetch.Fragment = ""
+
+ data, fromCache := r.cache.Get(toFetch.String())
+ if !fromCache {
+ b, err := r.loadDoc(toFetch.String())
+ if err != nil {
+ return nil, url.URL{}, false, err
+ }
+
+ if err := json.Unmarshal(b, &data); err != nil {
+ return nil, url.URL{}, false, err
+ }
+ r.cache.Set(toFetch.String(), data)
+ }
+
+ return data, toFetch, fromCache, nil
+}
+func (r *schemaLoader) Resolve(ref *Ref, target interface{}) error {
+ if err := r.resolveRef(r.currentRef, ref, r.root, target); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type specExpander struct {
+ spec *Swagger
+ resolver *schemaLoader
+}
+
+// ExpandSpec expands the references in a swagger spec
+func ExpandSpec(spec *Swagger) error {
+ resolver, err := defaultSchemaLoader(spec, nil, nil)
+ if err != nil {
+ return err
+ }
+
+ for key, defintition := range spec.Definitions {
+ var def *Schema
+ var err error
+ if def, err = expandSchema(defintition, []string{"#/definitions/" + key}, resolver); err != nil {
+ return err
+ }
+ spec.Definitions[key] = *def
+ }
+
+ for key, parameter := range spec.Parameters {
+ if err := expandParameter(&parameter, resolver); err != nil {
+ return err
+ }
+ spec.Parameters[key] = parameter
+ }
+
+ for key, response := range spec.Responses {
+ if err := expandResponse(&response, resolver); err != nil {
+ return err
+ }
+ spec.Responses[key] = response
+ }
+
+ if spec.Paths != nil {
+ for key, path := range spec.Paths.Paths {
+ if err := expandPathItem(&path, resolver); err != nil {
+ return err
+ }
+ spec.Paths.Paths[key] = path
+ }
+ }
+
+ return nil
+}
+
+// ExpandSchema expands the refs in the schema object
+func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error {
+
+ if schema == nil {
+ return nil
+ }
+ if root == nil {
+ root = schema
+ }
+
+ nrr, _ := NewRef(schema.ID)
+ var rrr *Ref
+ if nrr.String() != "" {
+ switch root.(type) {
+ case *Schema:
+ rid, _ := NewRef(root.(*Schema).ID)
+ rrr, _ = rid.Inherits(nrr)
+ case *Swagger:
+ rid, _ := NewRef(root.(*Swagger).ID)
+ rrr, _ = rid.Inherits(nrr)
+ }
+
+ }
+
+ resolver, err := defaultSchemaLoader(root, rrr, cache)
+ if err != nil {
+ return err
+ }
+
+ refs := []string{""}
+ if rrr != nil {
+ refs[0] = rrr.String()
+ }
+ var s *Schema
+ if s, err = expandSchema(*schema, refs, resolver); err != nil {
+ return nil
+ }
+ *schema = *s
+ return nil
+}
+
+func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*Schema, error) {
+ if target.Items != nil {
+ if target.Items.Schema != nil {
+ t, err := expandSchema(*target.Items.Schema, parentRefs, resolver)
+ if err != nil {
+ return nil, err
+ }
+ *target.Items.Schema = *t
+ }
+ for i := range target.Items.Schemas {
+ t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver)
+ if err != nil {
+ return nil, err
+ }
+ target.Items.Schemas[i] = *t
+ }
+ }
+ return &target, nil
+}
+
+func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (schema *Schema, err error) {
+ defer func() {
+ schema = &target
+ }()
+ if target.Ref.String() == "" && target.Ref.IsRoot() {
+ target = *resolver.root.(*Schema)
+ return
+ }
+
+ // t is the new expanded schema
+ var t *Schema
+ for target.Ref.String() != "" {
+ // var newTarget Schema
+ pRefs := strings.Join(parentRefs, ",")
+ pRefs += ","
+ if strings.Contains(pRefs, target.Ref.String()+",") {
+ err = nil
+ return
+ }
+
+ if err = resolver.Resolve(&target.Ref, &t); err != nil {
+ return
+ }
+ parentRefs = append(parentRefs, target.Ref.String())
+ target = *t
+ }
+
+ if t, err = expandItems(target, parentRefs, resolver); err != nil {
+ return
+ }
+ target = *t
+
+ for i := range target.AllOf {
+ if t, err = expandSchema(target.AllOf[i], parentRefs, resolver); err != nil {
+ return
+ }
+ target.AllOf[i] = *t
+ }
+ for i := range target.AnyOf {
+ if t, err = expandSchema(target.AnyOf[i], parentRefs, resolver); err != nil {
+ return
+ }
+ target.AnyOf[i] = *t
+ }
+ for i := range target.OneOf {
+ if t, err = expandSchema(target.OneOf[i], parentRefs, resolver); err != nil {
+ return
+ }
+ target.OneOf[i] = *t
+ }
+ if target.Not != nil {
+ if t, err = expandSchema(*target.Not, parentRefs, resolver); err != nil {
+ return
+ }
+ *target.Not = *t
+ }
+ for k, _ := range target.Properties {
+ if t, err = expandSchema(target.Properties[k], parentRefs, resolver); err != nil {
+ return
+ }
+ target.Properties[k] = *t
+ }
+ if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil {
+ if t, err = expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver); err != nil {
+ return
+ }
+ *target.AdditionalProperties.Schema = *t
+ }
+ for k, _ := range target.PatternProperties {
+ if t, err = expandSchema(target.PatternProperties[k], parentRefs, resolver); err != nil {
+ return
+ }
+ target.PatternProperties[k] = *t
+ }
+ for k, _ := range target.Dependencies {
+ if target.Dependencies[k].Schema != nil {
+ if t, err = expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver); err != nil {
+ return
+ }
+ *target.Dependencies[k].Schema = *t
+ }
+ }
+ if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil {
+ if t, err = expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver); err != nil {
+ return
+ }
+ *target.AdditionalItems.Schema = *t
+ }
+ for k, _ := range target.Definitions {
+ if t, err = expandSchema(target.Definitions[k], parentRefs, resolver); err != nil {
+ return
+ }
+ target.Definitions[k] = *t
+ }
+ return
+}
+
+func expandPathItem(pathItem *PathItem, resolver *schemaLoader) error {
+ if pathItem == nil {
+ return nil
+ }
+ if pathItem.Ref.String() != "" {
+ if err := resolver.Resolve(&pathItem.Ref, &pathItem); err != nil {
+ return err
+ }
+ }
+
+ for idx := range pathItem.Parameters {
+ if err := expandParameter(&(pathItem.Parameters[idx]), resolver); err != nil {
+ return err
+ }
+ }
+ if err := expandOperation(pathItem.Get, resolver); err != nil {
+ return err
+ }
+ if err := expandOperation(pathItem.Head, resolver); err != nil {
+ return err
+ }
+ if err := expandOperation(pathItem.Options, resolver); err != nil {
+ return err
+ }
+ if err := expandOperation(pathItem.Put, resolver); err != nil {
+ return err
+ }
+ if err := expandOperation(pathItem.Post, resolver); err != nil {
+ return err
+ }
+ if err := expandOperation(pathItem.Patch, resolver); err != nil {
+ return err
+ }
+ if err := expandOperation(pathItem.Delete, resolver); err != nil {
+ return err
+ }
+ return nil
+}
+
+func expandOperation(op *Operation, resolver *schemaLoader) error {
+ if op == nil {
+ return nil
+ }
+ for i, param := range op.Parameters {
+ if err := expandParameter(&param, resolver); err != nil {
+ return err
+ }
+ op.Parameters[i] = param
+ }
+
+ if op.Responses != nil {
+ responses := op.Responses
+ if err := expandResponse(responses.Default, resolver); err != nil {
+ return err
+ }
+ for code, response := range responses.StatusCodeResponses {
+ if err := expandResponse(&response, resolver); err != nil {
+ return err
+ }
+ responses.StatusCodeResponses[code] = response
+ }
+ }
+ return nil
+}
+
+func expandResponse(response *Response, resolver *schemaLoader) error {
+ if response == nil {
+ return nil
+ }
+
+ if response.Ref.String() != "" {
+ if err := resolver.Resolve(&response.Ref, response); err != nil {
+ return err
+ }
+ }
+
+ if response.Schema != nil {
+ parentRefs := []string{response.Schema.Ref.String()}
+ if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); err != nil {
+ return err
+ }
+ if s, err := expandSchema(*response.Schema, parentRefs, resolver); err != nil {
+ return err
+ } else {
+ *response.Schema = *s
+ }
+ }
+ return nil
+}
+
+func expandParameter(parameter *Parameter, resolver *schemaLoader) error {
+ if parameter == nil {
+ return nil
+ }
+ if parameter.Ref.String() != "" {
+ if err := resolver.Resolve(&parameter.Ref, parameter); err != nil {
+ return err
+ }
+ }
+ if parameter.Schema != nil {
+ parentRefs := []string{parameter.Schema.Ref.String()}
+ if err := resolver.Resolve(&parameter.Schema.Ref, &parameter.Schema); err != nil {
+ return err
+ }
+ if s, err := expandSchema(*parameter.Schema, parentRefs, resolver); err != nil {
+ return err
+ } else {
+ *parameter.Schema = *s
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go
new file mode 100644
index 000000000..88add91b2
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/external_docs.go
@@ -0,0 +1,24 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+// ExternalDocumentation allows referencing an external resource for
+// extended documentation.
+//
+// For more information: http://goo.gl/8us55a#externalDocumentationObject
+type ExternalDocumentation struct {
+ Description string `json:"description,omitempty"`
+ URL string `json:"url,omitempty"`
+}
diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go
new file mode 100644
index 000000000..758b84531
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/header.go
@@ -0,0 +1,165 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/swag"
+)
+
+type HeaderProps struct {
+ Description string `json:"description,omitempty"`
+}
+
+// Header describes a header for a response of the API
+//
+// For more information: http://goo.gl/8us55a#headerObject
+type Header struct {
+ CommonValidations
+ SimpleSchema
+ HeaderProps
+}
+
+// ResponseHeader creates a new header instance for use in a response
+func ResponseHeader() *Header {
+ return new(Header)
+}
+
+// WithDescription sets the description on this response, allows for chaining
+func (h *Header) WithDescription(description string) *Header {
+ h.Description = description
+ return h
+}
+
+// Typed a fluent builder method for the type of parameter
+func (h *Header) Typed(tpe, format string) *Header {
+ h.Type = tpe
+ h.Format = format
+ return h
+}
+
+// CollectionOf a fluent builder method for an array item
+func (h *Header) CollectionOf(items *Items, format string) *Header {
+ h.Type = "array"
+ h.Items = items
+ h.CollectionFormat = format
+ return h
+}
+
+// WithDefault sets the default value on this item
+func (h *Header) WithDefault(defaultValue interface{}) *Header {
+ h.Default = defaultValue
+ return h
+}
+
+// WithMaxLength sets a max length value
+func (h *Header) WithMaxLength(max int64) *Header {
+ h.MaxLength = &max
+ return h
+}
+
+// WithMinLength sets a min length value
+func (h *Header) WithMinLength(min int64) *Header {
+ h.MinLength = &min
+ return h
+}
+
+// WithPattern sets a pattern value
+func (h *Header) WithPattern(pattern string) *Header {
+ h.Pattern = pattern
+ return h
+}
+
+// WithMultipleOf sets a multiple of value
+func (h *Header) WithMultipleOf(number float64) *Header {
+ h.MultipleOf = &number
+ return h
+}
+
+// WithMaximum sets a maximum number value
+func (h *Header) WithMaximum(max float64, exclusive bool) *Header {
+ h.Maximum = &max
+ h.ExclusiveMaximum = exclusive
+ return h
+}
+
+// WithMinimum sets a minimum number value
+func (h *Header) WithMinimum(min float64, exclusive bool) *Header {
+ h.Minimum = &min
+ h.ExclusiveMinimum = exclusive
+ return h
+}
+
+// WithEnum sets a the enum values (replace)
+func (h *Header) WithEnum(values ...interface{}) *Header {
+ h.Enum = append([]interface{}{}, values...)
+ return h
+}
+
+// WithMaxItems sets the max items
+func (h *Header) WithMaxItems(size int64) *Header {
+ h.MaxItems = &size
+ return h
+}
+
+// WithMinItems sets the min items
+func (h *Header) WithMinItems(size int64) *Header {
+ h.MinItems = &size
+ return h
+}
+
+// UniqueValues dictates that this array can only have unique items
+func (h *Header) UniqueValues() *Header {
+ h.UniqueItems = true
+ return h
+}
+
+// AllowDuplicates this array can have duplicates
+func (h *Header) AllowDuplicates() *Header {
+ h.UniqueItems = false
+ return h
+}
+
+// MarshalJSON marshal this to JSON
+func (h Header) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(h.CommonValidations)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(h.SimpleSchema)
+ if err != nil {
+ return nil, err
+ }
+ b3, err := json.Marshal(h.HeaderProps)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// UnmarshalJSON marshal this from JSON
+func (h *Header) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &h.CommonValidations); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &h.SimpleSchema); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &h.HeaderProps); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go
new file mode 100644
index 000000000..fb8b7c4ac
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/info.go
@@ -0,0 +1,168 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "strings"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+// Extensions vendor specific extensions
+type Extensions map[string]interface{}
+
+// Add adds a value to these extensions
+func (e Extensions) Add(key string, value interface{}) {
+ realKey := strings.ToLower(key)
+ e[realKey] = value
+}
+
+// GetString gets a string value from the extensions
+func (e Extensions) GetString(key string) (string, bool) {
+ if v, ok := e[strings.ToLower(key)]; ok {
+ str, ok := v.(string)
+ return str, ok
+ }
+ return "", false
+}
+
+// GetBool gets a string value from the extensions
+func (e Extensions) GetBool(key string) (bool, bool) {
+ if v, ok := e[strings.ToLower(key)]; ok {
+ str, ok := v.(bool)
+ return str, ok
+ }
+ return false, false
+}
+
+// GetStringSlice gets a string value from the extensions
+func (e Extensions) GetStringSlice(key string) ([]string, bool) {
+ if v, ok := e[strings.ToLower(key)]; ok {
+ arr, ok := v.([]interface{})
+ if !ok {
+ return nil, false
+ }
+ var strs []string
+ for _, iface := range arr {
+ str, ok := iface.(string)
+ if !ok {
+ return nil, false
+ }
+ strs = append(strs, str)
+ }
+ return strs, ok
+ }
+ return nil, false
+}
+
+// VendorExtensible composition block.
+type VendorExtensible struct {
+ Extensions Extensions
+}
+
+// AddExtension adds an extension to this extensible object
+func (v *VendorExtensible) AddExtension(key string, value interface{}) {
+ if value == nil {
+ return
+ }
+ if v.Extensions == nil {
+ v.Extensions = make(map[string]interface{})
+ }
+ v.Extensions.Add(key, value)
+}
+
+// MarshalJSON marshals the extensions to json
+func (v VendorExtensible) MarshalJSON() ([]byte, error) {
+ toser := make(map[string]interface{})
+ for k, v := range v.Extensions {
+ lk := strings.ToLower(k)
+ if strings.HasPrefix(lk, "x-") {
+ toser[k] = v
+ }
+ }
+ return json.Marshal(toser)
+}
+
+// UnmarshalJSON for this extensible object
+func (v *VendorExtensible) UnmarshalJSON(data []byte) error {
+ var d map[string]interface{}
+ if err := json.Unmarshal(data, &d); err != nil {
+ return err
+ }
+ for k, vv := range d {
+ lk := strings.ToLower(k)
+ if strings.HasPrefix(lk, "x-") {
+ if v.Extensions == nil {
+ v.Extensions = map[string]interface{}{}
+ }
+ v.Extensions[k] = vv
+ }
+ }
+ return nil
+}
+
+// InfoProps the properties for an info definition
+type InfoProps struct {
+ Description string `json:"description,omitempty"`
+ Title string `json:"title,omitempty"`
+ TermsOfService string `json:"termsOfService,omitempty"`
+ Contact *ContactInfo `json:"contact,omitempty"`
+ License *License `json:"license,omitempty"`
+ Version string `json:"version,omitempty"`
+}
+
+// Info object provides metadata about the API.
+// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience.
+//
+// For more information: http://goo.gl/8us55a#infoObject
+type Info struct {
+ VendorExtensible
+ InfoProps
+}
+
+// JSONLookup look up a value by the json property name
+func (i Info) JSONLookup(token string) (interface{}, error) {
+ if ex, ok := i.Extensions[token]; ok {
+ return &ex, nil
+ }
+ r, _, err := jsonpointer.GetForToken(i.InfoProps, token)
+ return r, err
+}
+
+// MarshalJSON marshal this to JSON
+func (i Info) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(i.InfoProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(i.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2), nil
+}
+
+// UnmarshalJSON marshal this from JSON
+func (i *Info) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &i.InfoProps); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &i.VendorExtensible); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go
new file mode 100644
index 000000000..4d57ea5ca
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/items.go
@@ -0,0 +1,199 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/swag"
+)
+
+type SimpleSchema struct {
+ Type string `json:"type,omitempty"`
+ Format string `json:"format,omitempty"`
+ Items *Items `json:"items,omitempty"`
+ CollectionFormat string `json:"collectionFormat,omitempty"`
+ Default interface{} `json:"default,omitempty"`
+}
+
+func (s *SimpleSchema) TypeName() string {
+ if s.Format != "" {
+ return s.Format
+ }
+ return s.Type
+}
+
+func (s *SimpleSchema) ItemsTypeName() string {
+ if s.Items == nil {
+ return ""
+ }
+ return s.Items.TypeName()
+}
+
+type CommonValidations struct {
+ Maximum *float64 `json:"maximum,omitempty"`
+ ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"`
+ Minimum *float64 `json:"minimum,omitempty"`
+ ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"`
+ MaxLength *int64 `json:"maxLength,omitempty"`
+ MinLength *int64 `json:"minLength,omitempty"`
+ Pattern string `json:"pattern,omitempty"`
+ MaxItems *int64 `json:"maxItems,omitempty"`
+ MinItems *int64 `json:"minItems,omitempty"`
+ UniqueItems bool `json:"uniqueItems,omitempty"`
+ MultipleOf *float64 `json:"multipleOf,omitempty"`
+ Enum []interface{} `json:"enum,omitempty"`
+}
+
+// Items a limited subset of JSON-Schema's items object.
+// It is used by parameter definitions that are not located in "body".
+//
+// For more information: http://goo.gl/8us55a#items-object-
+type Items struct {
+ Refable
+ CommonValidations
+ SimpleSchema
+}
+
+// NewItems creates a new instance of items
+func NewItems() *Items {
+ return &Items{}
+}
+
+// Typed a fluent builder method for the type of item
+func (i *Items) Typed(tpe, format string) *Items {
+ i.Type = tpe
+ i.Format = format
+ return i
+}
+
+// CollectionOf a fluent builder method for an array item
+func (i *Items) CollectionOf(items *Items, format string) *Items {
+ i.Type = "array"
+ i.Items = items
+ i.CollectionFormat = format
+ return i
+}
+
+// WithDefault sets the default value on this item
+func (i *Items) WithDefault(defaultValue interface{}) *Items {
+ i.Default = defaultValue
+ return i
+}
+
+// WithMaxLength sets a max length value
+func (i *Items) WithMaxLength(max int64) *Items {
+ i.MaxLength = &max
+ return i
+}
+
+// WithMinLength sets a min length value
+func (i *Items) WithMinLength(min int64) *Items {
+ i.MinLength = &min
+ return i
+}
+
+// WithPattern sets a pattern value
+func (i *Items) WithPattern(pattern string) *Items {
+ i.Pattern = pattern
+ return i
+}
+
+// WithMultipleOf sets a multiple of value
+func (i *Items) WithMultipleOf(number float64) *Items {
+ i.MultipleOf = &number
+ return i
+}
+
+// WithMaximum sets a maximum number value
+func (i *Items) WithMaximum(max float64, exclusive bool) *Items {
+ i.Maximum = &max
+ i.ExclusiveMaximum = exclusive
+ return i
+}
+
+// WithMinimum sets a minimum number value
+func (i *Items) WithMinimum(min float64, exclusive bool) *Items {
+ i.Minimum = &min
+ i.ExclusiveMinimum = exclusive
+ return i
+}
+
+// WithEnum sets a the enum values (replace)
+func (i *Items) WithEnum(values ...interface{}) *Items {
+ i.Enum = append([]interface{}{}, values...)
+ return i
+}
+
+// WithMaxItems sets the max items
+func (i *Items) WithMaxItems(size int64) *Items {
+ i.MaxItems = &size
+ return i
+}
+
+// WithMinItems sets the min items
+func (i *Items) WithMinItems(size int64) *Items {
+ i.MinItems = &size
+ return i
+}
+
+// UniqueValues dictates that this array can only have unique items
+func (i *Items) UniqueValues() *Items {
+ i.UniqueItems = true
+ return i
+}
+
+// AllowDuplicates this array can have duplicates
+func (i *Items) AllowDuplicates() *Items {
+ i.UniqueItems = false
+ return i
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (i *Items) UnmarshalJSON(data []byte) error {
+ var validations CommonValidations
+ if err := json.Unmarshal(data, &validations); err != nil {
+ return err
+ }
+ var ref Refable
+ if err := json.Unmarshal(data, &ref); err != nil {
+ return err
+ }
+ var simpleSchema SimpleSchema
+ if err := json.Unmarshal(data, &simpleSchema); err != nil {
+ return err
+ }
+ i.Refable = ref
+ i.CommonValidations = validations
+ i.SimpleSchema = simpleSchema
+ return nil
+}
+
+// MarshalJSON converts this items object to JSON
+func (i Items) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(i.CommonValidations)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(i.SimpleSchema)
+ if err != nil {
+ return nil, err
+ }
+ b3, err := json.Marshal(i.Refable)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b3, b1, b2), nil
+}
diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go
new file mode 100644
index 000000000..f20961b4f
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/license.go
@@ -0,0 +1,23 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+// License information for the exposed API.
+//
+// For more information: http://goo.gl/8us55a#licenseObject
+type License struct {
+ Name string `json:"name,omitempty"`
+ URL string `json:"url,omitempty"`
+}
diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go
new file mode 100644
index 000000000..de1db6f02
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/operation.go
@@ -0,0 +1,233 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+type OperationProps struct {
+ Description string `json:"description,omitempty"`
+ Consumes []string `json:"consumes,omitempty"`
+ Produces []string `json:"produces,omitempty"`
+ Schemes []string `json:"schemes,omitempty"` // the scheme, when present must be from [http, https, ws, wss]
+ Tags []string `json:"tags,omitempty"`
+ Summary string `json:"summary,omitempty"`
+ ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
+ ID string `json:"operationId,omitempty"`
+ Deprecated bool `json:"deprecated,omitempty"`
+ Security []map[string][]string `json:"security,omitempty"`
+ Parameters []Parameter `json:"parameters,omitempty"`
+ Responses *Responses `json:"responses,omitempty"`
+}
+
+// Operation describes a single API operation on a path.
+//
+// For more information: http://goo.gl/8us55a#operationObject
+type Operation struct {
+ VendorExtensible
+ OperationProps
+}
+
+// SuccessResponse gets a success response model
+func (o *Operation) SuccessResponse() (*Response, int, bool) {
+ if o.Responses == nil {
+ return nil, 0, false
+ }
+
+ for k, v := range o.Responses.StatusCodeResponses {
+ if k/100 == 2 {
+ return &v, k, true
+ }
+ }
+
+ return o.Responses.Default, 0, false
+}
+
+// JSONLookup look up a value by the json property name
+func (o Operation) JSONLookup(token string) (interface{}, error) {
+ if ex, ok := o.Extensions[token]; ok {
+ return &ex, nil
+ }
+ r, _, err := jsonpointer.GetForToken(o.OperationProps, token)
+ return r, err
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (o *Operation) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &o.OperationProps); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &o.VendorExtensible); err != nil {
+ return err
+ }
+ return nil
+}
+
+// MarshalJSON converts this items object to JSON
+func (o Operation) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(o.OperationProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(o.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ concated := swag.ConcatJSON(b1, b2)
+ return concated, nil
+}
+
+// NewOperation creates a new operation instance.
+// It expects an ID as parameter but not passing an ID is also valid.
+func NewOperation(id string) *Operation {
+ op := new(Operation)
+ op.ID = id
+ return op
+}
+
+// WithID sets the ID property on this operation, allows for chaining.
+func (o *Operation) WithID(id string) *Operation {
+ o.ID = id
+ return o
+}
+
+// WithDescription sets the description on this operation, allows for chaining
+func (o *Operation) WithDescription(description string) *Operation {
+ o.Description = description
+ return o
+}
+
+// WithSummary sets the summary on this operation, allows for chaining
+func (o *Operation) WithSummary(summary string) *Operation {
+ o.Summary = summary
+ return o
+}
+
+// WithExternalDocs sets/removes the external docs for/from this operation.
+// When you pass empty strings as params the external documents will be removed.
+// When you pass non-empty string as one value then those values will be used on the external docs object.
+// So when you pass a non-empty description, you should also pass the url and vice versa.
+func (o *Operation) WithExternalDocs(description, url string) *Operation {
+ if description == "" && url == "" {
+ o.ExternalDocs = nil
+ return o
+ }
+
+ if o.ExternalDocs == nil {
+ o.ExternalDocs = &ExternalDocumentation{}
+ }
+ o.ExternalDocs.Description = description
+ o.ExternalDocs.URL = url
+ return o
+}
+
+// Deprecate marks the operation as deprecated
+func (o *Operation) Deprecate() *Operation {
+ o.Deprecated = true
+ return o
+}
+
+// Undeprecate marks the operation as not deprected
+func (o *Operation) Undeprecate() *Operation {
+ o.Deprecated = false
+ return o
+}
+
+// WithConsumes adds media types for incoming body values
+func (o *Operation) WithConsumes(mediaTypes ...string) *Operation {
+ o.Consumes = append(o.Consumes, mediaTypes...)
+ return o
+}
+
+// WithProduces adds media types for outgoing body values
+func (o *Operation) WithProduces(mediaTypes ...string) *Operation {
+ o.Produces = append(o.Produces, mediaTypes...)
+ return o
+}
+
+// WithTags adds tags for this operation
+func (o *Operation) WithTags(tags ...string) *Operation {
+ o.Tags = append(o.Tags, tags...)
+ return o
+}
+
+// AddParam adds a parameter to this operation, when a parameter for that location
+// and with that name already exists it will be replaced
+func (o *Operation) AddParam(param *Parameter) *Operation {
+ if param == nil {
+ return o
+ }
+
+ for i, p := range o.Parameters {
+ if p.Name == param.Name && p.In == param.In {
+ params := append(o.Parameters[:i], *param)
+ params = append(params, o.Parameters[i+1:]...)
+ o.Parameters = params
+ return o
+ }
+ }
+
+ o.Parameters = append(o.Parameters, *param)
+ return o
+}
+
+// RemoveParam removes a parameter from the operation
+func (o *Operation) RemoveParam(name, in string) *Operation {
+ for i, p := range o.Parameters {
+ if p.Name == name && p.In == name {
+ o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...)
+ return o
+ }
+ }
+ return o
+}
+
+// SecuredWith adds a security scope to this operation.
+func (o *Operation) SecuredWith(name string, scopes ...string) *Operation {
+ o.Security = append(o.Security, map[string][]string{name: scopes})
+ return o
+}
+
+// WithDefaultResponse adds a default response to the operation.
+// Passing a nil value will remove the response
+func (o *Operation) WithDefaultResponse(response *Response) *Operation {
+ return o.RespondsWith(0, response)
+}
+
+// RespondsWith adds a status code response to the operation.
+// When the code is 0 the value of the response will be used as default response value.
+// When the value of the response is nil it will be removed from the operation
+func (o *Operation) RespondsWith(code int, response *Response) *Operation {
+ if o.Responses == nil {
+ o.Responses = new(Responses)
+ }
+ if code == 0 {
+ o.Responses.Default = response
+ return o
+ }
+ if response == nil {
+ delete(o.Responses.StatusCodeResponses, code)
+ return o
+ }
+ if o.Responses.StatusCodeResponses == nil {
+ o.Responses.StatusCodeResponses = make(map[int]Response)
+ }
+ o.Responses.StatusCodeResponses[code] = *response
+ return o
+}
diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go
new file mode 100644
index 000000000..8fb66d12a
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/parameter.go
@@ -0,0 +1,299 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+// QueryParam creates a query parameter
+func QueryParam(name string) *Parameter {
+ return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}}
+}
+
+// HeaderParam creates a header parameter, this is always required by default
+func HeaderParam(name string) *Parameter {
+ return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}}
+}
+
+// PathParam creates a path parameter, this is always required
+func PathParam(name string) *Parameter {
+ return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}}
+}
+
+// BodyParam creates a body parameter
+func BodyParam(name string, schema *Schema) *Parameter {
+ return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}, SimpleSchema: SimpleSchema{Type: "object"}}
+}
+
+// FormDataParam creates a body parameter
+func FormDataParam(name string) *Parameter {
+ return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}}
+}
+
+// FileParam creates a body parameter
+func FileParam(name string) *Parameter {
+ return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, SimpleSchema: SimpleSchema{Type: "file"}}
+}
+
+// SimpleArrayParam creates a param for a simple array (string, int, date etc)
+func SimpleArrayParam(name, tpe, fmt string) *Parameter {
+ return &Parameter{ParamProps: ParamProps{Name: name}, SimpleSchema: SimpleSchema{Type: "array", CollectionFormat: "csv", Items: &Items{SimpleSchema: SimpleSchema{Type: "string", Format: fmt}}}}
+}
+
+// ParamRef creates a parameter that's a json reference
+func ParamRef(uri string) *Parameter {
+ p := new(Parameter)
+ p.Ref = MustCreateRef(uri)
+ return p
+}
+
+type ParamProps struct {
+ Description string `json:"description,omitempty"`
+ Name string `json:"name,omitempty"`
+ In string `json:"in,omitempty"`
+ Required bool `json:"required,omitempty"`
+ Schema *Schema `json:"schema,omitempty"` // when in == "body"
+ AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` // when in == "query" || "formData"
+}
+
+// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn).
+//
+// There are five possible parameter types.
+// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, the path parameter is `itemId`.
+// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`.
+// * Header - Custom headers that are expected as part of the request.
+// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be *one* body parameter. The name of the body parameter has no effect on the parameter itself and is used for documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist together for the same operation.
+// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or `multipart/form-data` are used as the content type of the request (in Swagger's definition, the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be declared together with a body parameter for the same operation. Form parameters have a different format based on the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4):
+// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple parameters that are being transferred.
+// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is `submit-name`. This type of form parameters is more commonly used for file transfers.
+//
+// For more information: http://goo.gl/8us55a#parameterObject
+type Parameter struct {
+ Refable
+ CommonValidations
+ SimpleSchema
+ VendorExtensible
+ ParamProps
+}
+
+// JSONLookup look up a value by the json property name
+func (p Parameter) JSONLookup(token string) (interface{}, error) {
+ if ex, ok := p.Extensions[token]; ok {
+ return &ex, nil
+ }
+ if token == "$ref" {
+ return &p.Ref, nil
+ }
+ r, _, err := jsonpointer.GetForToken(p.CommonValidations, token)
+ if err != nil {
+ return nil, err
+ }
+ if r != nil {
+ return r, nil
+ }
+ r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token)
+ if err != nil {
+ return nil, err
+ }
+ if r != nil {
+ return r, nil
+ }
+ r, _, err = jsonpointer.GetForToken(p.ParamProps, token)
+ return r, err
+}
+
+// WithDescription a fluent builder method for the description of the parameter
+func (p *Parameter) WithDescription(description string) *Parameter {
+ p.Description = description
+ return p
+}
+
+// Named a fluent builder method to override the name of the parameter
+func (p *Parameter) Named(name string) *Parameter {
+ p.Name = name
+ return p
+}
+
+// WithLocation a fluent builder method to override the location of the parameter
+func (p *Parameter) WithLocation(in string) *Parameter {
+ p.In = in
+ return p
+}
+
+// Typed a fluent builder method for the type of the parameter value
+func (p *Parameter) Typed(tpe, format string) *Parameter {
+ p.Type = tpe
+ p.Format = format
+ return p
+}
+
+// CollectionOf a fluent builder method for an array parameter
+func (p *Parameter) CollectionOf(items *Items, format string) *Parameter {
+ p.Type = "array"
+ p.Items = items
+ p.CollectionFormat = format
+ return p
+}
+
+// WithDefault sets the default value on this parameter
+func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter {
+ p.AsOptional() // with default implies optional
+ p.Default = defaultValue
+ return p
+}
+
+// AllowsEmptyValues flags this parameter as being ok with empty values
+func (p *Parameter) AllowsEmptyValues() *Parameter {
+ p.AllowEmptyValue = true
+ return p
+}
+
+// NoEmptyValues flags this parameter as not liking empty values
+func (p *Parameter) NoEmptyValues() *Parameter {
+ p.AllowEmptyValue = false
+ return p
+}
+
+// AsOptional flags this parameter as optional
+func (p *Parameter) AsOptional() *Parameter {
+ p.Required = false
+ return p
+}
+
+// AsRequired flags this parameter as required
+func (p *Parameter) AsRequired() *Parameter {
+ if p.Default != nil { // with a default required makes no sense
+ return p
+ }
+ p.Required = true
+ return p
+}
+
+// WithMaxLength sets a max length value
+func (p *Parameter) WithMaxLength(max int64) *Parameter {
+ p.MaxLength = &max
+ return p
+}
+
+// WithMinLength sets a min length value
+func (p *Parameter) WithMinLength(min int64) *Parameter {
+ p.MinLength = &min
+ return p
+}
+
+// WithPattern sets a pattern value
+func (p *Parameter) WithPattern(pattern string) *Parameter {
+ p.Pattern = pattern
+ return p
+}
+
+// WithMultipleOf sets a multiple of value
+func (p *Parameter) WithMultipleOf(number float64) *Parameter {
+ p.MultipleOf = &number
+ return p
+}
+
+// WithMaximum sets a maximum number value
+func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter {
+ p.Maximum = &max
+ p.ExclusiveMaximum = exclusive
+ return p
+}
+
+// WithMinimum sets a minimum number value
+func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter {
+ p.Minimum = &min
+ p.ExclusiveMinimum = exclusive
+ return p
+}
+
+// WithEnum sets a the enum values (replace)
+func (p *Parameter) WithEnum(values ...interface{}) *Parameter {
+ p.Enum = append([]interface{}{}, values...)
+ return p
+}
+
+// WithMaxItems sets the max items
+func (p *Parameter) WithMaxItems(size int64) *Parameter {
+ p.MaxItems = &size
+ return p
+}
+
+// WithMinItems sets the min items
+func (p *Parameter) WithMinItems(size int64) *Parameter {
+ p.MinItems = &size
+ return p
+}
+
+// UniqueValues dictates that this array can only have unique items
+func (p *Parameter) UniqueValues() *Parameter {
+ p.UniqueItems = true
+ return p
+}
+
+// AllowDuplicates this array can have duplicates
+func (p *Parameter) AllowDuplicates() *Parameter {
+ p.UniqueItems = false
+ return p
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (p *Parameter) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &p.CommonValidations); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &p.Refable); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &p.SimpleSchema); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &p.VendorExtensible); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &p.ParamProps); err != nil {
+ return err
+ }
+ return nil
+}
+
+// MarshalJSON converts this items object to JSON
+func (p Parameter) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(p.CommonValidations)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(p.SimpleSchema)
+ if err != nil {
+ return nil, err
+ }
+ b3, err := json.Marshal(p.Refable)
+ if err != nil {
+ return nil, err
+ }
+ b4, err := json.Marshal(p.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ b5, err := json.Marshal(p.ParamProps)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b3, b1, b2, b4, b5), nil
+}
diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go
new file mode 100644
index 000000000..9ab3ec538
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/path_item.go
@@ -0,0 +1,90 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+// pathItemProps the path item specific properties
+type PathItemProps struct {
+ Get *Operation `json:"get,omitempty"`
+ Put *Operation `json:"put,omitempty"`
+ Post *Operation `json:"post,omitempty"`
+ Delete *Operation `json:"delete,omitempty"`
+ Options *Operation `json:"options,omitempty"`
+ Head *Operation `json:"head,omitempty"`
+ Patch *Operation `json:"patch,omitempty"`
+ Parameters []Parameter `json:"parameters,omitempty"`
+}
+
+// PathItem describes the operations available on a single path.
+// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering).
+// The path itself is still exposed to the documentation viewer but they will
+// not know which operations and parameters are available.
+//
+// For more information: http://goo.gl/8us55a#pathItemObject
+type PathItem struct {
+ Refable
+ VendorExtensible
+ PathItemProps
+}
+
+// JSONLookup look up a value by the json property name
+func (p PathItem) JSONLookup(token string) (interface{}, error) {
+ if ex, ok := p.Extensions[token]; ok {
+ return &ex, nil
+ }
+ if token == "$ref" {
+ return &p.Ref, nil
+ }
+ r, _, err := jsonpointer.GetForToken(p.PathItemProps, token)
+ return r, err
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (p *PathItem) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &p.Refable); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &p.VendorExtensible); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &p.PathItemProps); err != nil {
+ return err
+ }
+ return nil
+}
+
+// MarshalJSON converts this items object to JSON
+func (p PathItem) MarshalJSON() ([]byte, error) {
+ b3, err := json.Marshal(p.Refable)
+ if err != nil {
+ return nil, err
+ }
+ b4, err := json.Marshal(p.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ b5, err := json.Marshal(p.PathItemProps)
+ if err != nil {
+ return nil, err
+ }
+ concated := swag.ConcatJSON(b3, b4, b5)
+ return concated, nil
+}
diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go
new file mode 100644
index 000000000..9dc82a290
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/paths.go
@@ -0,0 +1,97 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ "github.com/go-openapi/swag"
+)
+
+// Paths holds the relative paths to the individual endpoints.
+// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order
+// to construct the full URL.
+// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering).
+//
+// For more information: http://goo.gl/8us55a#pathsObject
+type Paths struct {
+ VendorExtensible
+ Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/"
+}
+
+// JSONLookup look up a value by the json property name
+func (p Paths) JSONLookup(token string) (interface{}, error) {
+ if pi, ok := p.Paths[token]; ok {
+ return &pi, nil
+ }
+ if ex, ok := p.Extensions[token]; ok {
+ return &ex, nil
+ }
+ return nil, fmt.Errorf("object has no field %q", token)
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (p *Paths) UnmarshalJSON(data []byte) error {
+ var res map[string]json.RawMessage
+ if err := json.Unmarshal(data, &res); err != nil {
+ return err
+ }
+ for k, v := range res {
+ if strings.HasPrefix(strings.ToLower(k), "x-") {
+ if p.Extensions == nil {
+ p.Extensions = make(map[string]interface{})
+ }
+ var d interface{}
+ if err := json.Unmarshal(v, &d); err != nil {
+ return err
+ }
+ p.Extensions[k] = d
+ }
+ if strings.HasPrefix(k, "/") {
+ if p.Paths == nil {
+ p.Paths = make(map[string]PathItem)
+ }
+ var pi PathItem
+ if err := json.Unmarshal(v, &pi); err != nil {
+ return err
+ }
+ p.Paths[k] = pi
+ }
+ }
+ return nil
+}
+
+// MarshalJSON converts this items object to JSON
+func (p Paths) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(p.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+
+ pths := make(map[string]PathItem)
+ for k, v := range p.Paths {
+ if strings.HasPrefix(k, "/") {
+ pths[k] = v
+ }
+ }
+ b2, err := json.Marshal(pths)
+ if err != nil {
+ return nil, err
+ }
+ concated := swag.ConcatJSON(b1, b2)
+ return concated, nil
+}
diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go
new file mode 100644
index 000000000..68631df8b
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/ref.go
@@ -0,0 +1,167 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "net/http"
+ "os"
+ "path/filepath"
+
+ "github.com/go-openapi/jsonreference"
+)
+
+// Refable is a struct for things that accept a $ref property
+type Refable struct {
+ Ref Ref
+}
+
+// MarshalJSON marshals the ref to json
+func (r Refable) MarshalJSON() ([]byte, error) {
+ return r.Ref.MarshalJSON()
+}
+
+// UnmarshalJSON unmarshalss the ref from json
+func (r *Refable) UnmarshalJSON(d []byte) error {
+ return json.Unmarshal(d, &r.Ref)
+}
+
+// Ref represents a json reference that is potentially resolved
+type Ref struct {
+ jsonreference.Ref
+}
+
+// RemoteURI gets the remote uri part of the ref
+func (r *Ref) RemoteURI() string {
+ if r.String() == "" {
+ return r.String()
+ }
+
+ u := *r.GetURL()
+ u.Fragment = ""
+ return u.String()
+}
+
+// IsValidURI returns true when the url the ref points to can be found
+func (r *Ref) IsValidURI() bool {
+ if r.String() == "" {
+ return true
+ }
+
+ v := r.RemoteURI()
+ if v == "" {
+ return true
+ }
+
+ if r.HasFullURL {
+ rr, err := http.Get(v)
+ if err != nil {
+ return false
+ }
+
+ return rr.StatusCode/100 == 2
+ }
+
+ if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) {
+ return false
+ }
+
+ // check for local file
+ pth := v
+ if r.HasURLPathOnly {
+ p, e := filepath.Abs(pth)
+ if e != nil {
+ return false
+ }
+ pth = p
+ }
+
+ fi, err := os.Stat(pth)
+ if err != nil {
+ return false
+ }
+
+ return !fi.IsDir()
+}
+
+// Inherits creates a new reference from a parent and a child
+// If the child cannot inherit from the parent, an error is returned
+func (r *Ref) Inherits(child Ref) (*Ref, error) {
+ ref, err := r.Ref.Inherits(child.Ref)
+ if err != nil {
+ return nil, err
+ }
+ return &Ref{Ref: *ref}, nil
+}
+
+// NewRef creates a new instance of a ref object
+// returns an error when the reference uri is an invalid uri
+func NewRef(refURI string) (Ref, error) {
+ ref, err := jsonreference.New(refURI)
+ if err != nil {
+ return Ref{}, err
+ }
+ return Ref{Ref: ref}, nil
+}
+
+// MustCreateRef creates a ref object but
+func MustCreateRef(refURI string) Ref {
+ return Ref{Ref: jsonreference.MustCreateRef(refURI)}
+}
+
+// // NewResolvedRef creates a resolved ref
+// func NewResolvedRef(refURI string, data interface{}) Ref {
+// return Ref{
+// Ref: jsonreference.MustCreateRef(refURI),
+// Resolved: data,
+// }
+// }
+
+// MarshalJSON marshals this ref into a JSON object
+func (r Ref) MarshalJSON() ([]byte, error) {
+ str := r.String()
+ if str == "" {
+ if r.IsRoot() {
+ return []byte(`{"$ref":"#"}`), nil
+ }
+ return []byte("{}"), nil
+ }
+ v := map[string]interface{}{"$ref": str}
+ return json.Marshal(v)
+}
+
+// UnmarshalJSON unmarshals this ref from a JSON object
+func (r *Ref) UnmarshalJSON(d []byte) error {
+ var v map[string]interface{}
+ if err := json.Unmarshal(d, &v); err != nil {
+ return err
+ }
+
+ if v == nil {
+ return nil
+ }
+
+ if vv, ok := v["$ref"]; ok {
+ if str, ok := vv.(string); ok {
+ ref, err := jsonreference.New(str)
+ if err != nil {
+ return err
+ }
+ *r = Ref{Ref: ref}
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go
new file mode 100644
index 000000000..308cc8478
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/response.go
@@ -0,0 +1,113 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/swag"
+)
+
+// ResponseProps properties specific to a response
+type ResponseProps struct {
+ Description string `json:"description,omitempty"`
+ Schema *Schema `json:"schema,omitempty"`
+ Headers map[string]Header `json:"headers,omitempty"`
+ Examples map[string]interface{} `json:"examples,omitempty"`
+}
+
+// Response describes a single response from an API Operation.
+//
+// For more information: http://goo.gl/8us55a#responseObject
+type Response struct {
+ Refable
+ ResponseProps
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (r *Response) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &r.ResponseProps); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &r.Refable); err != nil {
+ return err
+ }
+ return nil
+}
+
+// MarshalJSON converts this items object to JSON
+func (r Response) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(r.ResponseProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(r.Refable)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2), nil
+}
+
+// NewResponse creates a new response instance
+func NewResponse() *Response {
+ return new(Response)
+}
+
+// ResponseRef creates a response as a json reference
+func ResponseRef(url string) *Response {
+ resp := NewResponse()
+ resp.Ref = MustCreateRef(url)
+ return resp
+}
+
+// WithDescription sets the description on this response, allows for chaining
+func (r *Response) WithDescription(description string) *Response {
+ r.Description = description
+ return r
+}
+
+// WithSchema sets the schema on this response, allows for chaining.
+// Passing a nil argument removes the schema from this response
+func (r *Response) WithSchema(schema *Schema) *Response {
+ r.Schema = schema
+ return r
+}
+
+// AddHeader adds a header to this response
+func (r *Response) AddHeader(name string, header *Header) *Response {
+ if header == nil {
+ return r.RemoveHeader(name)
+ }
+ if r.Headers == nil {
+ r.Headers = make(map[string]Header)
+ }
+ r.Headers[name] = *header
+ return r
+}
+
+// RemoveHeader removes a header from this response
+func (r *Response) RemoveHeader(name string) *Response {
+ delete(r.Headers, name)
+ return r
+}
+
+// AddExample adds an example to this response
+func (r *Response) AddExample(mediaType string, example interface{}) *Response {
+ if r.Examples == nil {
+ r.Examples = make(map[string]interface{})
+ }
+ r.Examples[mediaType] = example
+ return r
+}
diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go
new file mode 100644
index 000000000..ea071ca63
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/responses.go
@@ -0,0 +1,122 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "github.com/go-openapi/swag"
+)
+
+// Responses is a container for the expected responses of an operation.
+// The container maps a HTTP response code to the expected response.
+// It is not expected from the documentation to necessarily cover all possible HTTP response codes,
+// since they may not be known in advance. However, it is expected from the documentation to cover
+// a successful operation response and any known errors.
+//
+// The `default` can be used a default response object for all HTTP codes that are not covered
+// individually by the specification.
+//
+// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response
+// for a successful operation call.
+//
+// For more information: http://goo.gl/8us55a#responsesObject
+type Responses struct {
+ VendorExtensible
+ ResponsesProps
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (r Responses) JSONLookup(token string) (interface{}, error) {
+ if token == "default" {
+ return r.Default, nil
+ }
+ if ex, ok := r.Extensions[token]; ok {
+ return &ex, nil
+ }
+ if i, err := strconv.Atoi(token); err == nil {
+ if scr, ok := r.StatusCodeResponses[i]; ok {
+ return &scr, nil
+ }
+ }
+ return nil, fmt.Errorf("object has no field %q", token)
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (r *Responses) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &r.ResponsesProps); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &r.VendorExtensible); err != nil {
+ return err
+ }
+ if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) {
+ r.ResponsesProps = ResponsesProps{}
+ }
+ return nil
+}
+
+// MarshalJSON converts this items object to JSON
+func (r Responses) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(r.ResponsesProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(r.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ concated := swag.ConcatJSON(b1, b2)
+ return concated, nil
+}
+
+type ResponsesProps struct {
+ Default *Response
+ StatusCodeResponses map[int]Response
+}
+
+func (r ResponsesProps) MarshalJSON() ([]byte, error) {
+ toser := map[string]Response{}
+ if r.Default != nil {
+ toser["default"] = *r.Default
+ }
+ for k, v := range r.StatusCodeResponses {
+ toser[strconv.Itoa(k)] = v
+ }
+ return json.Marshal(toser)
+}
+
+func (r *ResponsesProps) UnmarshalJSON(data []byte) error {
+ var res map[string]Response
+ if err := json.Unmarshal(data, &res); err != nil {
+ return nil
+ }
+ if v, ok := res["default"]; ok {
+ r.Default = &v
+ delete(res, "default")
+ }
+ for k, v := range res {
+ if nk, err := strconv.Atoi(k); err == nil {
+ if r.StatusCodeResponses == nil {
+ r.StatusCodeResponses = map[int]Response{}
+ }
+ r.StatusCodeResponses[nk] = v
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go
new file mode 100644
index 000000000..eb88f005c
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/schema.go
@@ -0,0 +1,628 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+// BooleanProperty creates a boolean property
+func BooleanProperty() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}}
+}
+
+// BoolProperty creates a boolean property
+func BoolProperty() *Schema { return BooleanProperty() }
+
+// StringProperty creates a string property
+func StringProperty() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}
+}
+
+// CharProperty creates a string property
+func CharProperty() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}
+}
+
+// Float64Property creates a float64/double property
+func Float64Property() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}}
+}
+
+// Float32Property creates a float32/float property
+func Float32Property() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}}
+}
+
+// Int8Property creates an int8 property
+func Int8Property() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}}
+}
+
+// Int16Property creates an int16 property
+func Int16Property() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}}
+}
+
+// Int32Property creates an int32 property
+func Int32Property() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}}
+}
+
+// Int64Property creates an int64 property
+func Int64Property() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}}
+}
+
+// StrFmtProperty creates a property for the named string format
+func StrFmtProperty(format string) *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}}
+}
+
+// DateProperty creates a date property
+func DateProperty() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}}
+}
+
+// DateTimeProperty creates a date time property
+func DateTimeProperty() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}}
+}
+
+// MapProperty creates a map property
+func MapProperty(property *Schema) *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}}
+}
+
+// RefProperty creates a ref property
+func RefProperty(name string) *Schema {
+ return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}}
+}
+
+// RefSchema creates a ref property
+func RefSchema(name string) *Schema {
+ return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}}
+}
+
+// ArrayProperty creates an array property
+func ArrayProperty(items *Schema) *Schema {
+ if items == nil {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}}
+ }
+ return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}}
+}
+
+// ComposedSchema creates a schema with allOf
+func ComposedSchema(schemas ...Schema) *Schema {
+ s := new(Schema)
+ s.AllOf = schemas
+ return s
+}
+
+// SchemaURL represents a schema url
+type SchemaURL string
+
+// MarshalJSON marshal this to JSON
+func (r SchemaURL) MarshalJSON() ([]byte, error) {
+ if r == "" {
+ return []byte("{}"), nil
+ }
+ v := map[string]interface{}{"$schema": string(r)}
+ return json.Marshal(v)
+}
+
+// UnmarshalJSON unmarshal this from JSON
+func (r *SchemaURL) UnmarshalJSON(data []byte) error {
+ var v map[string]interface{}
+ if err := json.Unmarshal(data, &v); err != nil {
+ return err
+ }
+ if v == nil {
+ return nil
+ }
+ if vv, ok := v["$schema"]; ok {
+ if str, ok := vv.(string); ok {
+ u, err := url.Parse(str)
+ if err != nil {
+ return err
+ }
+
+ *r = SchemaURL(u.String())
+ }
+ }
+ return nil
+}
+
+// type ExtraSchemaProps map[string]interface{}
+
+// // JSONSchema represents a structure that is a json schema draft 04
+// type JSONSchema struct {
+// SchemaProps
+// ExtraSchemaProps
+// }
+
+// // MarshalJSON marshal this to JSON
+// func (s JSONSchema) MarshalJSON() ([]byte, error) {
+// b1, err := json.Marshal(s.SchemaProps)
+// if err != nil {
+// return nil, err
+// }
+// b2, err := s.Ref.MarshalJSON()
+// if err != nil {
+// return nil, err
+// }
+// b3, err := s.Schema.MarshalJSON()
+// if err != nil {
+// return nil, err
+// }
+// b4, err := json.Marshal(s.ExtraSchemaProps)
+// if err != nil {
+// return nil, err
+// }
+// return swag.ConcatJSON(b1, b2, b3, b4), nil
+// }
+
+// // UnmarshalJSON marshal this from JSON
+// func (s *JSONSchema) UnmarshalJSON(data []byte) error {
+// var sch JSONSchema
+// if err := json.Unmarshal(data, &sch.SchemaProps); err != nil {
+// return err
+// }
+// if err := json.Unmarshal(data, &sch.Ref); err != nil {
+// return err
+// }
+// if err := json.Unmarshal(data, &sch.Schema); err != nil {
+// return err
+// }
+// if err := json.Unmarshal(data, &sch.ExtraSchemaProps); err != nil {
+// return err
+// }
+// *s = sch
+// return nil
+// }
+
+type SchemaProps struct {
+ ID string `json:"id,omitempty"`
+ Ref Ref `json:"-,omitempty"`
+ Schema SchemaURL `json:"-,omitempty"`
+ Description string `json:"description,omitempty"`
+ Type StringOrArray `json:"type,omitempty"`
+ Format string `json:"format,omitempty"`
+ Title string `json:"title,omitempty"`
+ Default interface{} `json:"default,omitempty"`
+ Maximum *float64 `json:"maximum,omitempty"`
+ ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"`
+ Minimum *float64 `json:"minimum,omitempty"`
+ ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"`
+ MaxLength *int64 `json:"maxLength,omitempty"`
+ MinLength *int64 `json:"minLength,omitempty"`
+ Pattern string `json:"pattern,omitempty"`
+ MaxItems *int64 `json:"maxItems,omitempty"`
+ MinItems *int64 `json:"minItems,omitempty"`
+ UniqueItems bool `json:"uniqueItems,omitempty"`
+ MultipleOf *float64 `json:"multipleOf,omitempty"`
+ Enum []interface{} `json:"enum,omitempty"`
+ MaxProperties *int64 `json:"maxProperties,omitempty"`
+ MinProperties *int64 `json:"minProperties,omitempty"`
+ Required []string `json:"required,omitempty"`
+ Items *SchemaOrArray `json:"items,omitempty"`
+ AllOf []Schema `json:"allOf,omitempty"`
+ OneOf []Schema `json:"oneOf,omitempty"`
+ AnyOf []Schema `json:"anyOf,omitempty"`
+ Not *Schema `json:"not,omitempty"`
+ Properties map[string]Schema `json:"properties,omitempty"`
+ AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"`
+ PatternProperties map[string]Schema `json:"patternProperties,omitempty"`
+ Dependencies Dependencies `json:"dependencies,omitempty"`
+ AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"`
+ Definitions Definitions `json:"definitions,omitempty"`
+}
+
+type SwaggerSchemaProps struct {
+ Discriminator string `json:"discriminator,omitempty"`
+ ReadOnly bool `json:"readOnly,omitempty"`
+ XML *XMLObject `json:"xml,omitempty"`
+ ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
+ Example interface{} `json:"example,omitempty"`
+}
+
+// Schema the schema object allows the definition of input and output data types.
+// These types can be objects, but also primitives and arrays.
+// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/)
+// and uses a predefined subset of it.
+// On top of this subset, there are extensions provided by this specification to allow for more complete documentation.
+//
+// For more information: http://goo.gl/8us55a#schemaObject
+type Schema struct {
+ VendorExtensible
+ SchemaProps
+ SwaggerSchemaProps
+ ExtraProps map[string]interface{} `json:"-"`
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (s Schema) JSONLookup(token string) (interface{}, error) {
+ if ex, ok := s.Extensions[token]; ok {
+ return &ex, nil
+ }
+
+ if ex, ok := s.ExtraProps[token]; ok {
+ return &ex, nil
+ }
+
+ r, _, err := jsonpointer.GetForToken(s.SchemaProps, token)
+ if r != nil || err != nil {
+ return r, err
+ }
+ r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token)
+ return r, err
+}
+
+// WithID sets the id for this schema, allows for chaining
+func (s *Schema) WithID(id string) *Schema {
+ s.ID = id
+ return s
+}
+
+// WithTitle sets the title for this schema, allows for chaining
+func (s *Schema) WithTitle(title string) *Schema {
+ s.Title = title
+ return s
+}
+
+// WithDescription sets the description for this schema, allows for chaining
+func (s *Schema) WithDescription(description string) *Schema {
+ s.Description = description
+ return s
+}
+
+// WithProperties sets the properties for this schema
+func (s *Schema) WithProperties(schemas map[string]Schema) *Schema {
+ s.Properties = schemas
+ return s
+}
+
+// SetProperty sets a property on this schema
+func (s *Schema) SetProperty(name string, schema Schema) *Schema {
+ if s.Properties == nil {
+ s.Properties = make(map[string]Schema)
+ }
+ s.Properties[name] = schema
+ return s
+}
+
+// WithAllOf sets the all of property
+func (s *Schema) WithAllOf(schemas ...Schema) *Schema {
+ s.AllOf = schemas
+ return s
+}
+
+// WithMaxProperties sets the max number of properties an object can have
+func (s *Schema) WithMaxProperties(max int64) *Schema {
+ s.MaxProperties = &max
+ return s
+}
+
+// WithMinProperties sets the min number of properties an object must have
+func (s *Schema) WithMinProperties(min int64) *Schema {
+ s.MinProperties = &min
+ return s
+}
+
+// Typed sets the type of this schema for a single value item
+func (s *Schema) Typed(tpe, format string) *Schema {
+ s.Type = []string{tpe}
+ s.Format = format
+ return s
+}
+
+// AddType adds a type with potential format to the types for this schema
+func (s *Schema) AddType(tpe, format string) *Schema {
+ s.Type = append(s.Type, tpe)
+ if format != "" {
+ s.Format = format
+ }
+ return s
+}
+
+// CollectionOf a fluent builder method for an array parameter
+func (s *Schema) CollectionOf(items Schema) *Schema {
+ s.Type = []string{"array"}
+ s.Items = &SchemaOrArray{Schema: &items}
+ return s
+}
+
+// WithDefault sets the default value on this parameter
+func (s *Schema) WithDefault(defaultValue interface{}) *Schema {
+ s.Default = defaultValue
+ return s
+}
+
+// WithRequired flags this parameter as required
+func (s *Schema) WithRequired(items ...string) *Schema {
+ s.Required = items
+ return s
+}
+
+// AddRequired adds field names to the required properties array
+func (s *Schema) AddRequired(items ...string) *Schema {
+ s.Required = append(s.Required, items...)
+ return s
+}
+
+// WithMaxLength sets a max length value
+func (s *Schema) WithMaxLength(max int64) *Schema {
+ s.MaxLength = &max
+ return s
+}
+
+// WithMinLength sets a min length value
+func (s *Schema) WithMinLength(min int64) *Schema {
+ s.MinLength = &min
+ return s
+}
+
+// WithPattern sets a pattern value
+func (s *Schema) WithPattern(pattern string) *Schema {
+ s.Pattern = pattern
+ return s
+}
+
+// WithMultipleOf sets a multiple of value
+func (s *Schema) WithMultipleOf(number float64) *Schema {
+ s.MultipleOf = &number
+ return s
+}
+
+// WithMaximum sets a maximum number value
+func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema {
+ s.Maximum = &max
+ s.ExclusiveMaximum = exclusive
+ return s
+}
+
+// WithMinimum sets a minimum number value
+func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema {
+ s.Minimum = &min
+ s.ExclusiveMinimum = exclusive
+ return s
+}
+
+// WithEnum sets a the enum values (replace)
+func (s *Schema) WithEnum(values ...interface{}) *Schema {
+ s.Enum = append([]interface{}{}, values...)
+ return s
+}
+
+// WithMaxItems sets the max items
+func (s *Schema) WithMaxItems(size int64) *Schema {
+ s.MaxItems = &size
+ return s
+}
+
+// WithMinItems sets the min items
+func (s *Schema) WithMinItems(size int64) *Schema {
+ s.MinItems = &size
+ return s
+}
+
+// UniqueValues dictates that this array can only have unique items
+func (s *Schema) UniqueValues() *Schema {
+ s.UniqueItems = true
+ return s
+}
+
+// AllowDuplicates this array can have duplicates
+func (s *Schema) AllowDuplicates() *Schema {
+ s.UniqueItems = false
+ return s
+}
+
+// AddToAllOf adds a schema to the allOf property
+func (s *Schema) AddToAllOf(schemas ...Schema) *Schema {
+ s.AllOf = append(s.AllOf, schemas...)
+ return s
+}
+
+// WithDiscriminator sets the name of the discriminator field
+func (s *Schema) WithDiscriminator(discriminator string) *Schema {
+ s.Discriminator = discriminator
+ return s
+}
+
+// AsReadOnly flags this schema as readonly
+func (s *Schema) AsReadOnly() *Schema {
+ s.ReadOnly = true
+ return s
+}
+
+// AsWritable flags this schema as writeable (not read-only)
+func (s *Schema) AsWritable() *Schema {
+ s.ReadOnly = false
+ return s
+}
+
+// WithExample sets the example for this schema
+func (s *Schema) WithExample(example interface{}) *Schema {
+ s.Example = example
+ return s
+}
+
+// WithExternalDocs sets/removes the external docs for/from this schema.
+// When you pass empty strings as params the external documents will be removed.
+// When you pass non-empty string as one value then those values will be used on the external docs object.
+// So when you pass a non-empty description, you should also pass the url and vice versa.
+func (s *Schema) WithExternalDocs(description, url string) *Schema {
+ if description == "" && url == "" {
+ s.ExternalDocs = nil
+ return s
+ }
+
+ if s.ExternalDocs == nil {
+ s.ExternalDocs = &ExternalDocumentation{}
+ }
+ s.ExternalDocs.Description = description
+ s.ExternalDocs.URL = url
+ return s
+}
+
+// WithXMLName sets the xml name for the object
+func (s *Schema) WithXMLName(name string) *Schema {
+ if s.XML == nil {
+ s.XML = new(XMLObject)
+ }
+ s.XML.Name = name
+ return s
+}
+
+// WithXMLNamespace sets the xml namespace for the object
+func (s *Schema) WithXMLNamespace(namespace string) *Schema {
+ if s.XML == nil {
+ s.XML = new(XMLObject)
+ }
+ s.XML.Namespace = namespace
+ return s
+}
+
+// WithXMLPrefix sets the xml prefix for the object
+func (s *Schema) WithXMLPrefix(prefix string) *Schema {
+ if s.XML == nil {
+ s.XML = new(XMLObject)
+ }
+ s.XML.Prefix = prefix
+ return s
+}
+
+// AsXMLAttribute flags this object as xml attribute
+func (s *Schema) AsXMLAttribute() *Schema {
+ if s.XML == nil {
+ s.XML = new(XMLObject)
+ }
+ s.XML.Attribute = true
+ return s
+}
+
+// AsXMLElement flags this object as an xml node
+func (s *Schema) AsXMLElement() *Schema {
+ if s.XML == nil {
+ s.XML = new(XMLObject)
+ }
+ s.XML.Attribute = false
+ return s
+}
+
+// AsWrappedXML flags this object as wrapped, this is mostly useful for array types
+func (s *Schema) AsWrappedXML() *Schema {
+ if s.XML == nil {
+ s.XML = new(XMLObject)
+ }
+ s.XML.Wrapped = true
+ return s
+}
+
+// AsUnwrappedXML flags this object as an xml node
+func (s *Schema) AsUnwrappedXML() *Schema {
+ if s.XML == nil {
+ s.XML = new(XMLObject)
+ }
+ s.XML.Wrapped = false
+ return s
+}
+
+// MarshalJSON marshal this to JSON
+func (s Schema) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(s.SchemaProps)
+ if err != nil {
+ return nil, fmt.Errorf("schema props %v", err)
+ }
+ b2, err := json.Marshal(s.VendorExtensible)
+ if err != nil {
+ return nil, fmt.Errorf("vendor props %v", err)
+ }
+ b3, err := s.Ref.MarshalJSON()
+ if err != nil {
+ return nil, fmt.Errorf("ref prop %v", err)
+ }
+ b4, err := s.Schema.MarshalJSON()
+ if err != nil {
+ return nil, fmt.Errorf("schema prop %v", err)
+ }
+ b5, err := json.Marshal(s.SwaggerSchemaProps)
+ if err != nil {
+ return nil, fmt.Errorf("common validations %v", err)
+ }
+ var b6 []byte
+ if s.ExtraProps != nil {
+ jj, err := json.Marshal(s.ExtraProps)
+ if err != nil {
+ return nil, fmt.Errorf("extra props %v", err)
+ }
+ b6 = jj
+ }
+ return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil
+}
+
+// UnmarshalJSON marshal this from JSON
+func (s *Schema) UnmarshalJSON(data []byte) error {
+ var sch Schema
+ if err := json.Unmarshal(data, &sch.SchemaProps); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &sch.Ref); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &sch.Schema); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &sch.SwaggerSchemaProps); err != nil {
+ return err
+ }
+
+ var d map[string]interface{}
+ if err := json.Unmarshal(data, &d); err != nil {
+ return err
+ }
+
+ delete(d, "$ref")
+ delete(d, "$schema")
+ for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) {
+ delete(d, pn)
+ }
+
+ for k, vv := range d {
+ lk := strings.ToLower(k)
+ if strings.HasPrefix(lk, "x-") {
+ if sch.Extensions == nil {
+ sch.Extensions = map[string]interface{}{}
+ }
+ sch.Extensions[k] = vv
+ continue
+ }
+ if sch.ExtraProps == nil {
+ sch.ExtraProps = map[string]interface{}{}
+ }
+ sch.ExtraProps[k] = vv
+ }
+
+ *s = sch
+
+ return nil
+}
diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go
new file mode 100644
index 000000000..22d4f10af
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/security_scheme.go
@@ -0,0 +1,142 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+const (
+ basic = "basic"
+ apiKey = "apiKey"
+ oauth2 = "oauth2"
+ implicit = "implicit"
+ password = "password"
+ application = "application"
+ accessCode = "accessCode"
+)
+
+// BasicAuth creates a basic auth security scheme
+func BasicAuth() *SecurityScheme {
+ return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}}
+}
+
+// APIKeyAuth creates an api key auth security scheme
+func APIKeyAuth(fieldName, valueSource string) *SecurityScheme {
+ return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}}
+}
+
+// OAuth2Implicit creates an implicit flow oauth2 security scheme
+func OAuth2Implicit(authorizationURL string) *SecurityScheme {
+ return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{
+ Type: oauth2,
+ Flow: implicit,
+ AuthorizationURL: authorizationURL,
+ }}
+}
+
+// OAuth2Password creates a password flow oauth2 security scheme
+func OAuth2Password(tokenURL string) *SecurityScheme {
+ return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{
+ Type: oauth2,
+ Flow: password,
+ TokenURL: tokenURL,
+ }}
+}
+
+// OAuth2Application creates an application flow oauth2 security scheme
+func OAuth2Application(tokenURL string) *SecurityScheme {
+ return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{
+ Type: oauth2,
+ Flow: application,
+ TokenURL: tokenURL,
+ }}
+}
+
+// OAuth2AccessToken creates an access token flow oauth2 security scheme
+func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme {
+ return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{
+ Type: oauth2,
+ Flow: accessCode,
+ AuthorizationURL: authorizationURL,
+ TokenURL: tokenURL,
+ }}
+}
+
+type SecuritySchemeProps struct {
+ Description string `json:"description,omitempty"`
+ Type string `json:"type"`
+ Name string `json:"name,omitempty"` // api key
+ In string `json:"in,omitempty"` // api key
+ Flow string `json:"flow,omitempty"` // oauth2
+ AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2
+ TokenURL string `json:"tokenUrl,omitempty"` // oauth2
+ Scopes map[string]string `json:"scopes,omitempty"` // oauth2
+}
+
+// AddScope adds a scope to this security scheme
+func (s *SecuritySchemeProps) AddScope(scope, description string) {
+ if s.Scopes == nil {
+ s.Scopes = make(map[string]string)
+ }
+ s.Scopes[scope] = description
+}
+
+// SecurityScheme allows the definition of a security scheme that can be used by the operations.
+// Supported schemes are basic authentication, an API key (either as a header or as a query parameter)
+// and OAuth2's common flows (implicit, password, application and access code).
+//
+// For more information: http://goo.gl/8us55a#securitySchemeObject
+type SecurityScheme struct {
+ VendorExtensible
+ SecuritySchemeProps
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (s SecurityScheme) JSONLookup(token string) (interface{}, error) {
+ if ex, ok := s.Extensions[token]; ok {
+ return &ex, nil
+ }
+
+ r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token)
+ return r, err
+}
+
+// MarshalJSON marshal this to JSON
+func (s SecurityScheme) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(s.SecuritySchemeProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(s.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2), nil
+}
+
+// UnmarshalJSON marshal this from JSON
+func (s *SecurityScheme) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &s.VendorExtensible); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go
new file mode 100644
index 000000000..cc2ae56b2
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/spec.go
@@ -0,0 +1,79 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import "encoding/json"
+
+//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/...
+//go:generate perl -pi -e s,Json,JSON,g bindata.go
+
+const (
+ // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs
+ SwaggerSchemaURL = "http://swagger.io/v2/schema.json#"
+ // JSONSchemaURL the url for the json schema schema
+ JSONSchemaURL = "http://json-schema.org/draft-04/schema#"
+)
+
+var (
+ jsonSchema = MustLoadJSONSchemaDraft04()
+ swaggerSchema = MustLoadSwagger20Schema()
+)
+
+// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error
+func MustLoadJSONSchemaDraft04() *Schema {
+ d, e := JSONSchemaDraft04()
+ if e != nil {
+ panic(e)
+ }
+ return d
+}
+
+// JSONSchemaDraft04 loads the json schema document for json shema draft04
+func JSONSchemaDraft04() (*Schema, error) {
+ b, err := Asset("jsonschema-draft-04.json")
+ if err != nil {
+ return nil, err
+ }
+
+ schema := new(Schema)
+ if err := json.Unmarshal(b, schema); err != nil {
+ return nil, err
+ }
+ return schema, nil
+}
+
+// MustLoadSwagger20Schema panics when Swagger20Schema returns an error
+func MustLoadSwagger20Schema() *Schema {
+ d, e := Swagger20Schema()
+ if e != nil {
+ panic(e)
+ }
+ return d
+}
+
+// Swagger20Schema loads the swagger 2.0 schema from the embedded assets
+func Swagger20Schema() (*Schema, error) {
+
+ b, err := Asset("v2/schema.json")
+ if err != nil {
+ return nil, err
+ }
+
+ schema := new(Schema)
+ if err := json.Unmarshal(b, schema); err != nil {
+ return nil, err
+ }
+ return schema, nil
+}
diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go
new file mode 100644
index 000000000..ff3ef875e
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/swagger.go
@@ -0,0 +1,317 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+// Swagger this is the root document object for the API specification.
+// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) together into one document.
+//
+// For more information: http://goo.gl/8us55a#swagger-object-
+type Swagger struct {
+ VendorExtensible
+ SwaggerProps
+}
+
+// JSONLookup look up a value by the json property name
+func (s Swagger) JSONLookup(token string) (interface{}, error) {
+ if ex, ok := s.Extensions[token]; ok {
+ return &ex, nil
+ }
+ r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token)
+ return r, err
+}
+
+// MarshalJSON marshals this swagger structure to json
+func (s Swagger) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(s.SwaggerProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(s.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2), nil
+}
+
+// UnmarshalJSON unmarshals a swagger spec from json
+func (s *Swagger) UnmarshalJSON(data []byte) error {
+ var sw Swagger
+ if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil {
+ return err
+ }
+ *s = sw
+ return nil
+}
+
+type SwaggerProps struct {
+ ID string `json:"id,omitempty"`
+ Consumes []string `json:"consumes,omitempty"`
+ Produces []string `json:"produces,omitempty"`
+ Schemes []string `json:"schemes,omitempty"` // the scheme, when present must be from [http, https, ws, wss]
+ Swagger string `json:"swagger,omitempty"`
+ Info *Info `json:"info,omitempty"`
+ Host string `json:"host,omitempty"`
+ BasePath string `json:"basePath,omitempty"` // must start with a leading "/"
+ Paths *Paths `json:"paths"` // required
+ Definitions Definitions `json:"definitions"`
+ Parameters map[string]Parameter `json:"parameters,omitempty"`
+ Responses map[string]Response `json:"responses,omitempty"`
+ SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"`
+ Security []map[string][]string `json:"security,omitempty"`
+ Tags []Tag `json:"tags,omitempty"`
+ ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
+}
+
+// Dependencies represent a dependencies property
+type Dependencies map[string]SchemaOrStringArray
+
+// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property
+type SchemaOrBool struct {
+ Allows bool
+ Schema *Schema
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) {
+ if token == "allows" {
+ return s.Allows, nil
+ }
+ r, _, err := jsonpointer.GetForToken(s.Schema, token)
+ return r, err
+}
+
+var jsTrue = []byte("true")
+var jsFalse = []byte("false")
+
+// MarshalJSON convert this object to JSON
+func (s SchemaOrBool) MarshalJSON() ([]byte, error) {
+ if s.Schema != nil {
+ return json.Marshal(s.Schema)
+ }
+
+ if s.Schema == nil && !s.Allows {
+ return jsFalse, nil
+ }
+ return jsTrue, nil
+}
+
+// UnmarshalJSON converts this bool or schema object from a JSON structure
+func (s *SchemaOrBool) UnmarshalJSON(data []byte) error {
+ var nw SchemaOrBool
+ if len(data) >= 4 {
+ if data[0] == '{' {
+ var sch Schema
+ if err := json.Unmarshal(data, &sch); err != nil {
+ return err
+ }
+ nw.Schema = &sch
+ }
+ nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e')
+ }
+ *s = nw
+ return nil
+}
+
+// SchemaOrStringArray represents a schema or a string array
+type SchemaOrStringArray struct {
+ Schema *Schema
+ Property []string
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) {
+ r, _, err := jsonpointer.GetForToken(s.Schema, token)
+ return r, err
+}
+
+// MarshalJSON converts this schema object or array into JSON structure
+func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) {
+ if len(s.Property) > 0 {
+ return json.Marshal(s.Property)
+ }
+ if s.Schema != nil {
+ return json.Marshal(s.Schema)
+ }
+ return nil, nil
+}
+
+// UnmarshalJSON converts this schema object or array from a JSON structure
+func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error {
+ var first byte
+ if len(data) > 1 {
+ first = data[0]
+ }
+ var nw SchemaOrStringArray
+ if first == '{' {
+ var sch Schema
+ if err := json.Unmarshal(data, &sch); err != nil {
+ return err
+ }
+ nw.Schema = &sch
+ }
+ if first == '[' {
+ if err := json.Unmarshal(data, &nw.Property); err != nil {
+ return err
+ }
+ }
+ *s = nw
+ return nil
+}
+
+// Definitions contains the models explicitly defined in this spec
+// An object to hold data types that can be consumed and produced by operations.
+// These data types can be primitives, arrays or models.
+//
+// For more information: http://goo.gl/8us55a#definitionsObject
+type Definitions map[string]Schema
+
+// SecurityDefinitions a declaration of the security schemes available to be used in the specification.
+// This does not enforce the security schemes on the operations and only serves to provide
+// the relevant details for each scheme.
+//
+// For more information: http://goo.gl/8us55a#securityDefinitionsObject
+type SecurityDefinitions map[string]*SecurityScheme
+
+// StringOrArray represents a value that can either be a string
+// or an array of strings. Mainly here for serialization purposes
+type StringOrArray []string
+
+// Contains returns true when the value is contained in the slice
+func (s StringOrArray) Contains(value string) bool {
+ for _, str := range s {
+ if str == value {
+ return true
+ }
+ }
+ return false
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) {
+ if _, err := strconv.Atoi(token); err == nil {
+ r, _, err := jsonpointer.GetForToken(s.Schemas, token)
+ return r, err
+ }
+ r, _, err := jsonpointer.GetForToken(s.Schema, token)
+ return r, err
+}
+
+// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string
+func (s *StringOrArray) UnmarshalJSON(data []byte) error {
+ var first byte
+ if len(data) > 1 {
+ first = data[0]
+ }
+
+ if first == '[' {
+ var parsed []string
+ if err := json.Unmarshal(data, &parsed); err != nil {
+ return err
+ }
+ *s = StringOrArray(parsed)
+ return nil
+ }
+
+ var single interface{}
+ if err := json.Unmarshal(data, &single); err != nil {
+ return err
+ }
+ if single == nil {
+ return nil
+ }
+ switch single.(type) {
+ case string:
+ *s = StringOrArray([]string{single.(string)})
+ return nil
+ default:
+ return fmt.Errorf("only string or array is allowed, not %T", single)
+ }
+}
+
+// MarshalJSON converts this string or array to a JSON array or JSON string
+func (s StringOrArray) MarshalJSON() ([]byte, error) {
+ if len(s) == 1 {
+ return json.Marshal([]string(s)[0])
+ }
+ return json.Marshal([]string(s))
+}
+
+// SchemaOrArray represents a value that can either be a Schema
+// or an array of Schema. Mainly here for serialization purposes
+type SchemaOrArray struct {
+ Schema *Schema
+ Schemas []Schema
+}
+
+// Len returns the number of schemas in this property
+func (s SchemaOrArray) Len() int {
+ if s.Schema != nil {
+ return 1
+ }
+ return len(s.Schemas)
+}
+
+// ContainsType returns true when one of the schemas is of the specified type
+func (s *SchemaOrArray) ContainsType(name string) bool {
+ if s.Schema != nil {
+ return s.Schema.Type != nil && s.Schema.Type.Contains(name)
+ }
+ return false
+}
+
+// MarshalJSON converts this schema object or array into JSON structure
+func (s SchemaOrArray) MarshalJSON() ([]byte, error) {
+ if len(s.Schemas) > 0 {
+ return json.Marshal(s.Schemas)
+ }
+ return json.Marshal(s.Schema)
+}
+
+// UnmarshalJSON converts this schema object or array from a JSON structure
+func (s *SchemaOrArray) UnmarshalJSON(data []byte) error {
+ var nw SchemaOrArray
+ var first byte
+ if len(data) > 1 {
+ first = data[0]
+ }
+ if first == '{' {
+ var sch Schema
+ if err := json.Unmarshal(data, &sch); err != nil {
+ return err
+ }
+ nw.Schema = &sch
+ }
+ if first == '[' {
+ if err := json.Unmarshal(data, &nw.Schemas); err != nil {
+ return err
+ }
+ }
+ *s = nw
+ return nil
+}
+
+// vim:set ft=go noet sts=2 sw=2 ts=2:
diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go
new file mode 100644
index 000000000..97f555840
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/tag.go
@@ -0,0 +1,73 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+type TagProps struct {
+ Description string `json:"description,omitempty"`
+ Name string `json:"name,omitempty"`
+ ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
+}
+
+// NewTag creates a new tag
+func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag {
+ return Tag{TagProps: TagProps{description, name, externalDocs}}
+}
+
+// Tag allows adding meta data to a single tag that is used by the [Operation Object](http://goo.gl/8us55a#operationObject).
+// It is not mandatory to have a Tag Object per tag used there.
+//
+// For more information: http://goo.gl/8us55a#tagObject
+type Tag struct {
+ VendorExtensible
+ TagProps
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (t Tag) JSONLookup(token string) (interface{}, error) {
+ if ex, ok := t.Extensions[token]; ok {
+ return &ex, nil
+ }
+
+ r, _, err := jsonpointer.GetForToken(t.TagProps, token)
+ return r, err
+}
+
+// MarshalJSON marshal this to JSON
+func (t Tag) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(t.TagProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(t.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2), nil
+}
+
+// UnmarshalJSON marshal this from JSON
+func (t *Tag) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &t.TagProps); err != nil {
+ return err
+ }
+ return json.Unmarshal(data, &t.VendorExtensible)
+}
diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go
new file mode 100644
index 000000000..945a46703
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/xml_object.go
@@ -0,0 +1,68 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+// XMLObject a metadata object that allows for more fine-tuned XML model definitions.
+//
+// For more information: http://goo.gl/8us55a#xmlObject
+type XMLObject struct {
+ Name string `json:"name,omitempty"`
+ Namespace string `json:"namespace,omitempty"`
+ Prefix string `json:"prefix,omitempty"`
+ Attribute bool `json:"attribute,omitempty"`
+ Wrapped bool `json:"wrapped,omitempty"`
+}
+
+// WithName sets the xml name for the object
+func (x *XMLObject) WithName(name string) *XMLObject {
+ x.Name = name
+ return x
+}
+
+// WithNamespace sets the xml namespace for the object
+func (x *XMLObject) WithNamespace(namespace string) *XMLObject {
+ x.Namespace = namespace
+ return x
+}
+
+// WithPrefix sets the xml prefix for the object
+func (x *XMLObject) WithPrefix(prefix string) *XMLObject {
+ x.Prefix = prefix
+ return x
+}
+
+// AsAttribute flags this object as xml attribute
+func (x *XMLObject) AsAttribute() *XMLObject {
+ x.Attribute = true
+ return x
+}
+
+// AsElement flags this object as an xml node
+func (x *XMLObject) AsElement() *XMLObject {
+ x.Attribute = false
+ return x
+}
+
+// AsWrapped flags this object as wrapped, this is mostly useful for array types
+func (x *XMLObject) AsWrapped() *XMLObject {
+ x.Wrapped = true
+ return x
+}
+
+// AsUnwrapped flags this object as an xml node
+func (x *XMLObject) AsUnwrapped() *XMLObject {
+ x.Wrapped = false
+ return x
+}
diff --git a/vendor/github.com/go-openapi/swag/LICENSE b/vendor/github.com/go-openapi/swag/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md
new file mode 100644
index 000000000..c1d3c196c
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/README.md
@@ -0,0 +1,12 @@
+# Swag [![Build Status](https://ci.vmware.run/api/badges/go-openapi/swag/status.svg)](https://ci.vmware.run/go-openapi/swag) [![Coverage](https://coverage.vmware.run/badges/go-openapi/swag/coverage.svg)](https://coverage.vmware.run/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
+
+[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag)
+
+Contains a bunch of helper functions:
+
+* convert between value and pointers for builtins
+* convert from string to builtin
+* fast json concatenation
+* search in path
+* load from file or http
+* name manglin \ No newline at end of file
diff --git a/vendor/github.com/go-openapi/swag/convert.go b/vendor/github.com/go-openapi/swag/convert.go
new file mode 100644
index 000000000..28d912410
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/convert.go
@@ -0,0 +1,188 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "math"
+ "strconv"
+ "strings"
+)
+
+// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER
+const (
+ maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1
+ minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1
+)
+
+// IsFloat64AJSONInteger allow for integers [-2^53, 2^53-1] inclusive
+func IsFloat64AJSONInteger(f float64) bool {
+ if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat {
+ return false
+ }
+
+ return f == float64(int64(f)) || f == float64(uint64(f))
+}
+
+var evaluatesAsTrue = map[string]struct{}{
+ "true": struct{}{},
+ "1": struct{}{},
+ "yes": struct{}{},
+ "ok": struct{}{},
+ "y": struct{}{},
+ "on": struct{}{},
+ "selected": struct{}{},
+ "checked": struct{}{},
+ "t": struct{}{},
+ "enabled": struct{}{},
+}
+
+// ConvertBool turn a string into a boolean
+func ConvertBool(str string) (bool, error) {
+ _, ok := evaluatesAsTrue[strings.ToLower(str)]
+ return ok, nil
+}
+
+// ConvertFloat32 turn a string into a float32
+func ConvertFloat32(str string) (float32, error) {
+ f, err := strconv.ParseFloat(str, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+}
+
+// ConvertFloat64 turn a string into a float64
+func ConvertFloat64(str string) (float64, error) {
+ return strconv.ParseFloat(str, 64)
+}
+
+// ConvertInt8 turn a string into int8 boolean
+func ConvertInt8(str string) (int8, error) {
+ i, err := strconv.ParseInt(str, 10, 8)
+ if err != nil {
+ return 0, err
+ }
+ return int8(i), nil
+}
+
+// ConvertInt16 turn a string into a int16
+func ConvertInt16(str string) (int16, error) {
+ i, err := strconv.ParseInt(str, 10, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int16(i), nil
+}
+
+// ConvertInt32 turn a string into a int32
+func ConvertInt32(str string) (int32, error) {
+ i, err := strconv.ParseInt(str, 10, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(i), nil
+}
+
+// ConvertInt64 turn a string into a int64
+func ConvertInt64(str string) (int64, error) {
+ return strconv.ParseInt(str, 10, 64)
+}
+
+// ConvertUint8 turn a string into a uint8
+func ConvertUint8(str string) (uint8, error) {
+ i, err := strconv.ParseUint(str, 10, 8)
+ if err != nil {
+ return 0, err
+ }
+ return uint8(i), nil
+}
+
+// ConvertUint16 turn a string into a uint16
+func ConvertUint16(str string) (uint16, error) {
+ i, err := strconv.ParseUint(str, 10, 16)
+ if err != nil {
+ return 0, err
+ }
+ return uint16(i), nil
+}
+
+// ConvertUint32 turn a string into a uint32
+func ConvertUint32(str string) (uint32, error) {
+ i, err := strconv.ParseUint(str, 10, 32)
+ if err != nil {
+ return 0, err
+ }
+ return uint32(i), nil
+}
+
+// ConvertUint64 turn a string into a uint64
+func ConvertUint64(str string) (uint64, error) {
+ return strconv.ParseUint(str, 10, 64)
+}
+
+// FormatBool turns a boolean into a string
+func FormatBool(value bool) string {
+ return strconv.FormatBool(value)
+}
+
+// FormatFloat32 turns a float32 into a string
+func FormatFloat32(value float32) string {
+ return strconv.FormatFloat(float64(value), 'f', -1, 32)
+}
+
+// FormatFloat64 turns a float64 into a string
+func FormatFloat64(value float64) string {
+ return strconv.FormatFloat(value, 'f', -1, 64)
+}
+
+// FormatInt8 turns an int8 into a string
+func FormatInt8(value int8) string {
+ return strconv.FormatInt(int64(value), 10)
+}
+
+// FormatInt16 turns an int16 into a string
+func FormatInt16(value int16) string {
+ return strconv.FormatInt(int64(value), 10)
+}
+
+// FormatInt32 turns an int32 into a string
+func FormatInt32(value int32) string {
+ return strconv.FormatInt(int64(value), 10)
+}
+
+// FormatInt64 turns an int64 into a string
+func FormatInt64(value int64) string {
+ return strconv.FormatInt(value, 10)
+}
+
+// FormatUint8 turns an uint8 into a string
+func FormatUint8(value uint8) string {
+ return strconv.FormatUint(uint64(value), 10)
+}
+
+// FormatUint16 turns an uint16 into a string
+func FormatUint16(value uint16) string {
+ return strconv.FormatUint(uint64(value), 10)
+}
+
+// FormatUint32 turns an uint32 into a string
+func FormatUint32(value uint32) string {
+ return strconv.FormatUint(uint64(value), 10)
+}
+
+// FormatUint64 turns an uint64 into a string
+func FormatUint64(value uint64) string {
+ return strconv.FormatUint(value, 10)
+}
diff --git a/vendor/github.com/go-openapi/swag/convert_types.go b/vendor/github.com/go-openapi/swag/convert_types.go
new file mode 100644
index 000000000..c95e4e78b
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/convert_types.go
@@ -0,0 +1,595 @@
+package swag
+
+import "time"
+
+// This file was taken from the aws go sdk
+
+// String returns a pointer to of the string value passed in.
+func String(v string) *string {
+ return &v
+}
+
+// StringValue returns the value of the string pointer passed in or
+// "" if the pointer is nil.
+func StringValue(v *string) string {
+ if v != nil {
+ return *v
+ }
+ return ""
+}
+
+// StringSlice converts a slice of string values into a slice of
+// string pointers
+func StringSlice(src []string) []*string {
+ dst := make([]*string, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// StringValueSlice converts a slice of string pointers into a slice of
+// string values
+func StringValueSlice(src []*string) []string {
+ dst := make([]string, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// StringMap converts a string map of string values into a string
+// map of string pointers
+func StringMap(src map[string]string) map[string]*string {
+ dst := make(map[string]*string)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// StringValueMap converts a string map of string pointers into a string
+// map of string values
+func StringValueMap(src map[string]*string) map[string]string {
+ dst := make(map[string]string)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Bool returns a pointer to of the bool value passed in.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// BoolValue returns the value of the bool pointer passed in or
+// false if the pointer is nil.
+func BoolValue(v *bool) bool {
+ if v != nil {
+ return *v
+ }
+ return false
+}
+
+// BoolSlice converts a slice of bool values into a slice of
+// bool pointers
+func BoolSlice(src []bool) []*bool {
+ dst := make([]*bool, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// BoolValueSlice converts a slice of bool pointers into a slice of
+// bool values
+func BoolValueSlice(src []*bool) []bool {
+ dst := make([]bool, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// BoolMap converts a string map of bool values into a string
+// map of bool pointers
+func BoolMap(src map[string]bool) map[string]*bool {
+ dst := make(map[string]*bool)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// BoolValueMap converts a string map of bool pointers into a string
+// map of bool values
+func BoolValueMap(src map[string]*bool) map[string]bool {
+ dst := make(map[string]bool)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int returns a pointer to of the int value passed in.
+func Int(v int) *int {
+ return &v
+}
+
+// IntValue returns the value of the int pointer passed in or
+// 0 if the pointer is nil.
+func IntValue(v *int) int {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// IntSlice converts a slice of int values into a slice of
+// int pointers
+func IntSlice(src []int) []*int {
+ dst := make([]*int, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// IntValueSlice converts a slice of int pointers into a slice of
+// int values
+func IntValueSlice(src []*int) []int {
+ dst := make([]int, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// IntMap converts a string map of int values into a string
+// map of int pointers
+func IntMap(src map[string]int) map[string]*int {
+ dst := make(map[string]*int)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// IntValueMap converts a string map of int pointers into a string
+// map of int values
+func IntValueMap(src map[string]*int) map[string]int {
+ dst := make(map[string]int)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int32 returns a pointer to of the int64 value passed in.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int32Value returns the value of the int64 pointer passed in or
+// 0 if the pointer is nil.
+func Int32Value(v *int32) int32 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int32Slice converts a slice of int64 values into a slice of
+// int32 pointers
+func Int32Slice(src []int32) []*int32 {
+ dst := make([]*int32, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int32ValueSlice converts a slice of int32 pointers into a slice of
+// int32 values
+func Int32ValueSlice(src []*int32) []int32 {
+ dst := make([]int32, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int32Map converts a string map of int32 values into a string
+// map of int32 pointers
+func Int32Map(src map[string]int32) map[string]*int32 {
+ dst := make(map[string]*int32)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int32ValueMap converts a string map of int32 pointers into a string
+// map of int32 values
+func Int32ValueMap(src map[string]*int32) map[string]int32 {
+ dst := make(map[string]int32)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int64 returns a pointer to of the int64 value passed in.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Int64Value returns the value of the int64 pointer passed in or
+// 0 if the pointer is nil.
+func Int64Value(v *int64) int64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int64Slice converts a slice of int64 values into a slice of
+// int64 pointers
+func Int64Slice(src []int64) []*int64 {
+ dst := make([]*int64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int64ValueSlice converts a slice of int64 pointers into a slice of
+// int64 values
+func Int64ValueSlice(src []*int64) []int64 {
+ dst := make([]int64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int64Map converts a string map of int64 values into a string
+// map of int64 pointers
+func Int64Map(src map[string]int64) map[string]*int64 {
+ dst := make(map[string]*int64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int64ValueMap converts a string map of int64 pointers into a string
+// map of int64 values
+func Int64ValueMap(src map[string]*int64) map[string]int64 {
+ dst := make(map[string]int64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Uint returns a pouinter to of the uint value passed in.
+func Uint(v uint) *uint {
+ return &v
+}
+
+// UintValue returns the value of the uint pouinter passed in or
+// 0 if the pouinter is nil.
+func UintValue(v *uint) uint {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// UintSlice converts a slice of uint values uinto a slice of
+// uint pouinters
+func UintSlice(src []uint) []*uint {
+ dst := make([]*uint, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// UintValueSlice converts a slice of uint pouinters uinto a slice of
+// uint values
+func UintValueSlice(src []*uint) []uint {
+ dst := make([]uint, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// UintMap converts a string map of uint values uinto a string
+// map of uint pouinters
+func UintMap(src map[string]uint) map[string]*uint {
+ dst := make(map[string]*uint)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// UintValueMap converts a string map of uint pouinters uinto a string
+// map of uint values
+func UintValueMap(src map[string]*uint) map[string]uint {
+ dst := make(map[string]uint)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Uint32 returns a pouinter to of the uint64 value passed in.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint32Value returns the value of the uint64 pouinter passed in or
+// 0 if the pouinter is nil.
+func Uint32Value(v *uint32) uint32 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Uint32Slice converts a slice of uint64 values uinto a slice of
+// uint32 pouinters
+func Uint32Slice(src []uint32) []*uint32 {
+ dst := make([]*uint32, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Uint32ValueSlice converts a slice of uint32 pouinters uinto a slice of
+// uint32 values
+func Uint32ValueSlice(src []*uint32) []uint32 {
+ dst := make([]uint32, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Uint32Map converts a string map of uint32 values uinto a string
+// map of uint32 pouinters
+func Uint32Map(src map[string]uint32) map[string]*uint32 {
+ dst := make(map[string]*uint32)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Uint32ValueMap converts a string map of uint32 pouinters uinto a string
+// map of uint32 values
+func Uint32ValueMap(src map[string]*uint32) map[string]uint32 {
+ dst := make(map[string]uint32)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Uint64 returns a pouinter to of the uint64 value passed in.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// Uint64Value returns the value of the uint64 pouinter passed in or
+// 0 if the pouinter is nil.
+func Uint64Value(v *uint64) uint64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Uint64Slice converts a slice of uint64 values uinto a slice of
+// uint64 pouinters
+func Uint64Slice(src []uint64) []*uint64 {
+ dst := make([]*uint64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Uint64ValueSlice converts a slice of uint64 pouinters uinto a slice of
+// uint64 values
+func Uint64ValueSlice(src []*uint64) []uint64 {
+ dst := make([]uint64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Uint64Map converts a string map of uint64 values uinto a string
+// map of uint64 pouinters
+func Uint64Map(src map[string]uint64) map[string]*uint64 {
+ dst := make(map[string]*uint64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Uint64ValueMap converts a string map of uint64 pouinters uinto a string
+// map of uint64 values
+func Uint64ValueMap(src map[string]*uint64) map[string]uint64 {
+ dst := make(map[string]uint64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Float64 returns a pointer to of the float64 value passed in.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Float64Value returns the value of the float64 pointer passed in or
+// 0 if the pointer is nil.
+func Float64Value(v *float64) float64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Float64Slice converts a slice of float64 values into a slice of
+// float64 pointers
+func Float64Slice(src []float64) []*float64 {
+ dst := make([]*float64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Float64ValueSlice converts a slice of float64 pointers into a slice of
+// float64 values
+func Float64ValueSlice(src []*float64) []float64 {
+ dst := make([]float64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Float64Map converts a string map of float64 values into a string
+// map of float64 pointers
+func Float64Map(src map[string]float64) map[string]*float64 {
+ dst := make(map[string]*float64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Float64ValueMap converts a string map of float64 pointers into a string
+// map of float64 values
+func Float64ValueMap(src map[string]*float64) map[string]float64 {
+ dst := make(map[string]float64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Time returns a pointer to of the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+ return &v
+}
+
+// TimeValue returns the value of the time.Time pointer passed in or
+// time.Time{} if the pointer is nil.
+func TimeValue(v *time.Time) time.Time {
+ if v != nil {
+ return *v
+ }
+ return time.Time{}
+}
+
+// TimeSlice converts a slice of time.Time values into a slice of
+// time.Time pointers
+func TimeSlice(src []time.Time) []*time.Time {
+ dst := make([]*time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// TimeValueSlice converts a slice of time.Time pointers into a slice of
+// time.Time values
+func TimeValueSlice(src []*time.Time) []time.Time {
+ dst := make([]time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// TimeMap converts a string map of time.Time values into a string
+// map of time.Time pointers
+func TimeMap(src map[string]time.Time) map[string]*time.Time {
+ dst := make(map[string]*time.Time)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// TimeValueMap converts a string map of time.Time pointers into a string
+// map of time.Time values
+func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
+ dst := make(map[string]time.Time)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go
new file mode 100644
index 000000000..6e9ec20fc
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/json.go
@@ -0,0 +1,270 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "bytes"
+ "encoding/json"
+ "reflect"
+ "strings"
+ "sync"
+
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+// DefaultJSONNameProvider the default cache for types
+var DefaultJSONNameProvider = NewNameProvider()
+
+const comma = byte(',')
+
+var closers = map[byte]byte{
+ '{': '}',
+ '[': ']',
+}
+
+type ejMarshaler interface {
+ MarshalEasyJSON(w *jwriter.Writer)
+}
+
+type ejUnmarshaler interface {
+ UnmarshalEasyJSON(w *jlexer.Lexer)
+}
+
+// WriteJSON writes json data, prefers finding an appropriate interface to short-circuit the marshaller
+// so it takes the fastest option available.
+func WriteJSON(data interface{}) ([]byte, error) {
+ if d, ok := data.(ejMarshaler); ok {
+ jw := new(jwriter.Writer)
+ d.MarshalEasyJSON(jw)
+ return jw.BuildBytes()
+ }
+ if d, ok := data.(json.Marshaler); ok {
+ return d.MarshalJSON()
+ }
+ return json.Marshal(data)
+}
+
+// ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaller
+// so it takes the fastes option available
+func ReadJSON(data []byte, value interface{}) error {
+ if d, ok := value.(ejUnmarshaler); ok {
+ jl := &jlexer.Lexer{Data: data}
+ d.UnmarshalEasyJSON(jl)
+ return jl.Error()
+ }
+ if d, ok := value.(json.Unmarshaler); ok {
+ return d.UnmarshalJSON(data)
+ }
+ return json.Unmarshal(data, value)
+}
+
+// DynamicJSONToStruct converts an untyped json structure into a struct
+func DynamicJSONToStruct(data interface{}, target interface{}) error {
+ // TODO: convert straight to a json typed map (mergo + iterate?)
+ b, err := WriteJSON(data)
+ if err != nil {
+ return err
+ }
+ if err := ReadJSON(b, target); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ConcatJSON concatenates multiple json objects efficiently
+func ConcatJSON(blobs ...[]byte) []byte {
+ if len(blobs) == 0 {
+ return nil
+ }
+ if len(blobs) == 1 {
+ return blobs[0]
+ }
+
+ last := len(blobs) - 1
+ var opening, closing byte
+ a := 0
+ idx := 0
+ buf := bytes.NewBuffer(nil)
+
+ for i, b := range blobs {
+ if len(b) > 0 && opening == 0 { // is this an array or an object?
+ opening, closing = b[0], closers[b[0]]
+ }
+
+ if opening != '{' && opening != '[' {
+ continue // don't know how to concatenate non container objects
+ }
+
+ if len(b) < 3 { // yep empty but also the last one, so closing this thing
+ if i == last && a > 0 {
+ buf.WriteByte(closing)
+ }
+ continue
+ }
+
+ idx = 0
+ if a > 0 { // we need to join with a comma for everything beyond the first non-empty item
+ buf.WriteByte(comma)
+ idx = 1 // this is not the first or the last so we want to drop the leading bracket
+ }
+
+ if i != last { // not the last one, strip brackets
+ buf.Write(b[idx : len(b)-1])
+ } else { // last one, strip only the leading bracket
+ buf.Write(b[idx:])
+ }
+ a++
+ }
+ // somehow it ended up being empty, so provide a default value
+ if buf.Len() == 0 {
+ buf.WriteByte(opening)
+ buf.WriteByte(closing)
+ }
+ return buf.Bytes()
+}
+
+// ToDynamicJSON turns an object into a properly JSON typed structure
+func ToDynamicJSON(data interface{}) interface{} {
+ // TODO: convert straight to a json typed map (mergo + iterate?)
+ b, _ := json.Marshal(data)
+ var res interface{}
+ json.Unmarshal(b, &res)
+ return res
+}
+
+// FromDynamicJSON turns an object into a properly JSON typed structure
+func FromDynamicJSON(data, target interface{}) error {
+ b, _ := json.Marshal(data)
+ return json.Unmarshal(b, target)
+}
+
+// NameProvider represents an object capabale of translating from go property names
+// to json property names
+// This type is thread-safe.
+type NameProvider struct {
+ lock *sync.Mutex
+ index map[reflect.Type]nameIndex
+}
+
+type nameIndex struct {
+ jsonNames map[string]string
+ goNames map[string]string
+}
+
+// NewNameProvider creates a new name provider
+func NewNameProvider() *NameProvider {
+ return &NameProvider{
+ lock: &sync.Mutex{},
+ index: make(map[reflect.Type]nameIndex),
+ }
+}
+
+func buildnameIndex(tpe reflect.Type, idx, reverseIdx map[string]string) {
+ for i := 0; i < tpe.NumField(); i++ {
+ targetDes := tpe.Field(i)
+
+ if targetDes.PkgPath != "" { // unexported
+ continue
+ }
+
+ if targetDes.Anonymous { // walk embedded structures tree down first
+ buildnameIndex(targetDes.Type, idx, reverseIdx)
+ continue
+ }
+
+ if tag := targetDes.Tag.Get("json"); tag != "" {
+
+ parts := strings.Split(tag, ",")
+ if len(parts) == 0 {
+ continue
+ }
+
+ nm := parts[0]
+ if nm == "-" {
+ continue
+ }
+ if nm == "" { // empty string means we want to use the Go name
+ nm = targetDes.Name
+ }
+
+ idx[nm] = targetDes.Name
+ reverseIdx[targetDes.Name] = nm
+ }
+ }
+}
+
+func newNameIndex(tpe reflect.Type) nameIndex {
+ var idx = make(map[string]string, tpe.NumField())
+ var reverseIdx = make(map[string]string, tpe.NumField())
+
+ buildnameIndex(tpe, idx, reverseIdx)
+ return nameIndex{jsonNames: idx, goNames: reverseIdx}
+}
+
+// GetJSONNames gets all the json property names for a type
+func (n *NameProvider) GetJSONNames(subject interface{}) []string {
+ tpe := reflect.Indirect(reflect.ValueOf(subject)).Type()
+ names, ok := n.index[tpe]
+ if !ok {
+ names = n.makeNameIndex(tpe)
+ }
+
+ var res []string
+ for k := range names.jsonNames {
+ res = append(res, k)
+ }
+ return res
+}
+
+// GetJSONName gets the json name for a go property name
+func (n *NameProvider) GetJSONName(subject interface{}, name string) (string, bool) {
+ tpe := reflect.Indirect(reflect.ValueOf(subject)).Type()
+ return n.GetJSONNameForType(tpe, name)
+}
+
+// GetJSONNameForType gets the json name for a go property name on a given type
+func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) {
+ names, ok := n.index[tpe]
+ if !ok {
+ names = n.makeNameIndex(tpe)
+ }
+ nme, ok := names.goNames[name]
+ return nme, ok
+}
+
+func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+ names := newNameIndex(tpe)
+ n.index[tpe] = names
+ return names
+}
+
+// GetGoName gets the go name for a json property name
+func (n *NameProvider) GetGoName(subject interface{}, name string) (string, bool) {
+ tpe := reflect.Indirect(reflect.ValueOf(subject)).Type()
+ return n.GetGoNameForType(tpe, name)
+}
+
+// GetGoNameForType gets the go name for a given type for a json property name
+func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) {
+ names, ok := n.index[tpe]
+ if !ok {
+ names = n.makeNameIndex(tpe)
+ }
+ nme, ok := names.jsonNames[name]
+ return nme, ok
+}
diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go
new file mode 100644
index 000000000..6dbc31330
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/loading.go
@@ -0,0 +1,49 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "strings"
+)
+
+// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in
+func LoadFromFileOrHTTP(path string) ([]byte, error) {
+ return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes)(path)
+}
+
+// LoadStrategy returns a loader function for a given path or uri
+func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
+ if strings.HasPrefix(path, "http") {
+ return remote
+ }
+ return local
+}
+
+func loadHTTPBytes(path string) ([]byte, error) {
+ resp, err := http.Get(path)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status)
+ }
+
+ return ioutil.ReadAll(resp.Body)
+}
diff --git a/vendor/github.com/go-openapi/swag/net.go b/vendor/github.com/go-openapi/swag/net.go
new file mode 100644
index 000000000..8323fa37b
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/net.go
@@ -0,0 +1,24 @@
+package swag
+
+import (
+ "net"
+ "strconv"
+)
+
+// SplitHostPort splits a network address into a host and a port.
+// The port is -1 when there is no port to be found
+func SplitHostPort(addr string) (host string, port int, err error) {
+ h, p, err := net.SplitHostPort(addr)
+ if err != nil {
+ return "", -1, err
+ }
+ if p == "" {
+ return "", -1, &net.AddrError{Err: "missing port in address", Addr: addr}
+ }
+
+ pi, err := strconv.Atoi(p)
+ if err != nil {
+ return "", -1, err
+ }
+ return h, pi, nil
+}
diff --git a/vendor/github.com/go-openapi/swag/path.go b/vendor/github.com/go-openapi/swag/path.go
new file mode 100644
index 000000000..273e9fbed
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/path.go
@@ -0,0 +1,56 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+const (
+ // GOPATHKey represents the env key for gopath
+ GOPATHKey = "GOPATH"
+)
+
+// FindInSearchPath finds a package in a provided lists of paths
+func FindInSearchPath(searchPath, pkg string) string {
+ pathsList := filepath.SplitList(searchPath)
+ for _, path := range pathsList {
+ if evaluatedPath, err := filepath.EvalSymlinks(filepath.Join(path, "src", pkg)); err == nil {
+ if _, err := os.Stat(evaluatedPath); err == nil {
+ return evaluatedPath
+ }
+ }
+ }
+ return ""
+}
+
+// FindInGoSearchPath finds a package in the $GOPATH:$GOROOT
+func FindInGoSearchPath(pkg string) string {
+ return FindInSearchPath(FullGoSearchPath(), pkg)
+}
+
+// FullGoSearchPath gets the search paths for finding packages
+func FullGoSearchPath() string {
+ allPaths := os.Getenv(GOPATHKey)
+ if allPaths != "" {
+ allPaths = strings.Join([]string{allPaths, runtime.GOROOT()}, ":")
+ } else {
+ allPaths = runtime.GOROOT()
+ }
+ return allPaths
+}
diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go
new file mode 100644
index 000000000..d8b54ee61
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/util.go
@@ -0,0 +1,318 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "math"
+ "reflect"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+// Taken from https://github.com/golang/lint/blob/1fab560e16097e5b69afb66eb93aab843ef77845/lint.go#L663-L698
+var commonInitialisms = map[string]bool{
+ "API": true,
+ "ASCII": true,
+ "CPU": true,
+ "CSS": true,
+ "DNS": true,
+ "EOF": true,
+ "GUID": true,
+ "HTML": true,
+ "HTTPS": true,
+ "HTTP": true,
+ "ID": true,
+ "IP": true,
+ "JSON": true,
+ "LHS": true,
+ "QPS": true,
+ "RAM": true,
+ "RHS": true,
+ "RPC": true,
+ "SLA": true,
+ "SMTP": true,
+ "SSH": true,
+ "TCP": true,
+ "TLS": true,
+ "TTL": true,
+ "UDP": true,
+ "UUID": true,
+ "UID": true,
+ "UI": true,
+ "URI": true,
+ "URL": true,
+ "UTF8": true,
+ "VM": true,
+ "XML": true,
+ "XSRF": true,
+ "XSS": true,
+}
+var initialisms []string
+
+func init() {
+ for k := range commonInitialisms {
+ initialisms = append(initialisms, k)
+ }
+ sort.Sort(sort.Reverse(byLength(initialisms)))
+}
+
+// JoinByFormat joins a string array by a known format:
+// ssv: space separated value
+// tsv: tab separated value
+// pipes: pipe (|) separated value
+// csv: comma separated value (default)
+func JoinByFormat(data []string, format string) []string {
+ if len(data) == 0 {
+ return data
+ }
+ var sep string
+ switch format {
+ case "ssv":
+ sep = " "
+ case "tsv":
+ sep = "\t"
+ case "pipes":
+ sep = "|"
+ case "multi":
+ return data
+ default:
+ sep = ","
+ }
+ return []string{strings.Join(data, sep)}
+}
+
+// SplitByFormat splits a string by a known format:
+// ssv: space separated value
+// tsv: tab separated value
+// pipes: pipe (|) separated value
+// csv: comma separated value (default)
+func SplitByFormat(data, format string) []string {
+ if data == "" {
+ return nil
+ }
+ var sep string
+ switch format {
+ case "ssv":
+ sep = " "
+ case "tsv":
+ sep = "\t"
+ case "pipes":
+ sep = "|"
+ case "multi":
+ return nil
+ default:
+ sep = ","
+ }
+ var result []string
+ for _, s := range strings.Split(data, sep) {
+ if ts := strings.TrimSpace(s); ts != "" {
+ result = append(result, ts)
+ }
+ }
+ return result
+}
+
+type byLength []string
+
+func (s byLength) Len() int {
+ return len(s)
+}
+func (s byLength) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+func (s byLength) Less(i, j int) bool {
+ return len(s[i]) < len(s[j])
+}
+
+// Prepares strings by splitting by caps, spaces, dashes, and underscore
+func split(str string) (words []string) {
+ repl := strings.NewReplacer(
+ "@", "At ",
+ "&", "And ",
+ "|", "Pipe ",
+ "$", "Dollar ",
+ "!", "Bang ",
+ "-", " ",
+ "_", " ",
+ )
+
+ rex1 := regexp.MustCompile(`(\p{Lu})`)
+ rex2 := regexp.MustCompile(`(\pL|\pM|\pN|\p{Pc})+`)
+
+ str = trim(str)
+
+ // Convert dash and underscore to spaces
+ str = repl.Replace(str)
+
+ // Split when uppercase is found (needed for Snake)
+ str = rex1.ReplaceAllString(str, " $1")
+ // check if consecutive single char things make up an initialism
+
+ for _, k := range initialisms {
+ str = strings.Replace(str, rex1.ReplaceAllString(k, " $1"), " "+k, -1)
+ }
+ // Get the final list of words
+ words = rex2.FindAllString(str, -1)
+
+ return
+}
+
+// Removes leading whitespaces
+func trim(str string) string {
+ return strings.Trim(str, " ")
+}
+
+// Shortcut to strings.ToUpper()
+func upper(str string) string {
+ return strings.ToUpper(trim(str))
+}
+
+// Shortcut to strings.ToLower()
+func lower(str string) string {
+ return strings.ToLower(trim(str))
+}
+
+// ToFileName lowercases and underscores a go type name
+func ToFileName(name string) string {
+ var out []string
+ for _, w := range split(name) {
+ out = append(out, lower(w))
+ }
+ return strings.Join(out, "_")
+}
+
+// ToCommandName lowercases and underscores a go type name
+func ToCommandName(name string) string {
+ var out []string
+ for _, w := range split(name) {
+ out = append(out, lower(w))
+ }
+ return strings.Join(out, "-")
+}
+
+// ToHumanNameLower represents a code name as a human series of words
+func ToHumanNameLower(name string) string {
+ var out []string
+ for _, w := range split(name) {
+ if !commonInitialisms[upper(w)] {
+ out = append(out, lower(w))
+ } else {
+ out = append(out, w)
+ }
+ }
+ return strings.Join(out, " ")
+}
+
+// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized
+func ToHumanNameTitle(name string) string {
+ var out []string
+ for _, w := range split(name) {
+ uw := upper(w)
+ if !commonInitialisms[uw] {
+ out = append(out, upper(w[:1])+lower(w[1:]))
+ } else {
+ out = append(out, w)
+ }
+ }
+ return strings.Join(out, " ")
+}
+
+// ToJSONName camelcases a name which can be underscored or pascal cased
+func ToJSONName(name string) string {
+ var out []string
+ for i, w := range split(name) {
+ if i == 0 {
+ out = append(out, lower(w))
+ continue
+ }
+ out = append(out, upper(w[:1])+lower(w[1:]))
+ }
+ return strings.Join(out, "")
+}
+
+// ToVarName camelcases a name which can be underscored or pascal cased
+func ToVarName(name string) string {
+ res := ToGoName(name)
+ if len(res) <= 1 {
+ return lower(res)
+ }
+ return lower(res[:1]) + res[1:]
+}
+
+// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes
+func ToGoName(name string) string {
+ var out []string
+ for _, w := range split(name) {
+ uw := upper(w)
+ mod := int(math.Min(float64(len(uw)), 2))
+ if !commonInitialisms[uw] && !commonInitialisms[uw[:len(uw)-mod]] {
+ uw = upper(w[:1]) + lower(w[1:])
+ }
+ out = append(out, uw)
+ }
+ return strings.Join(out, "")
+}
+
+// ContainsStringsCI searches a slice of strings for a case-insensitive match
+func ContainsStringsCI(coll []string, item string) bool {
+ for _, a := range coll {
+ if strings.EqualFold(a, item) {
+ return true
+ }
+ }
+ return false
+}
+
+type zeroable interface {
+ IsZero() bool
+}
+
+// IsZero returns true when the value passed into the function is a zero value.
+// This allows for safer checking of interface values.
+func IsZero(data interface{}) bool {
+ // check for things that have an IsZero method instead
+ if vv, ok := data.(zeroable); ok {
+ return vv.IsZero()
+ }
+ // continue with slightly more complex reflection
+ v := reflect.ValueOf(data)
+ switch v.Kind() {
+ case reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ case reflect.Struct, reflect.Array:
+ return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface())
+ case reflect.Invalid:
+ return true
+ }
+ return false
+}
+
+// CommandLineOptionsGroup represents a group of user-defined command line options
+type CommandLineOptionsGroup struct {
+ ShortDescription string
+ LongDescription string
+ Options interface{}
+}
diff --git a/vendor/github.com/go-zoo/bone/LICENSE b/vendor/github.com/go-zoo/bone/LICENSE
new file mode 100644
index 000000000..652583b76
--- /dev/null
+++ b/vendor/github.com/go-zoo/bone/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 CodingFerret
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, Subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or Substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/go-zoo/bone/README.md b/vendor/github.com/go-zoo/bone/README.md
new file mode 100644
index 000000000..29ea5f6de
--- /dev/null
+++ b/vendor/github.com/go-zoo/bone/README.md
@@ -0,0 +1,81 @@
+bone [![GoDoc](https://godoc.org/github.com/squiidz/bone?status.png)](http://godoc.org/github.com/go-zoo/bone) [![Build Status](https://travis-ci.org/go-zoo/bone.svg)](https://travis-ci.org/go-zoo/bone) [![Go Report Card](https://goreportcard.com/badge/go-zoo/bone)](https://goreportcard.com/report/go-zoo/bone) [![Sourcegraph](https://sourcegraph.com/github.com/go-zoo/bone/-/badge.svg)](https://sourcegraph.com/github.com/go-zoo/bone?badge)
+=======
+
+## What is bone ?
+
+Bone is a lightweight and lightning fast HTTP Multiplexer for Golang. It support :
+
+- URL Parameters
+- REGEX Parameters
+- Wildcard routes
+- Router Prefix
+- Sub Router, `mux.SubRoute()`, support most standard router (bone, gorilla/mux, httpRouter etc...)
+- Http method declaration
+- Support for `http.Handler` and `http.HandlerFunc`
+- Custom NotFound handler
+- Respect the Go standard `http.Handler` interface
+
+![alt tag](https://c2.staticflickr.com/2/1070/540747396_5542b42cca_z.jpg)
+
+## Speed
+
+```
+- BenchmarkBoneMux 10000000 118 ns/op
+- BenchmarkZeusMux 100000 144 ns/op
+- BenchmarkHttpRouterMux 10000000 134 ns/op
+- BenchmarkNetHttpMux 3000000 580 ns/op
+- BenchmarkGorillaMux 300000 3333 ns/op
+- BenchmarkGorillaPatMux 1000000 1889 ns/op
+```
+
+ These test are just for fun, all these router are great and really efficient.
+ Bone do not pretend to be the fastest router for every job.
+
+## Example
+
+``` go
+
+package main
+
+import(
+ "net/http"
+
+ "github.com/go-zoo/bone"
+)
+
+func main () {
+ mux := bone.New()
+
+ // mux.Get, Post, etc ... takes http.Handler
+ mux.Get("/home/:id", http.HandlerFunc(HomeHandler))
+ mux.Get("/profil/:id/:var", http.HandlerFunc(ProfilHandler))
+ mux.Post("/data", http.HandlerFunc(DataHandler))
+
+ // Support REGEX Route params
+ mux.Get("/index/#id^[0-9]$", http.HandlerFunc(IndexHandler))
+
+ // Handle take http.Handler
+ mux.Handle("/", http.HandlerFunc(RootHandler))
+
+ // GetFunc, PostFunc etc ... takes http.HandlerFunc
+ mux.GetFunc("/test", Handler)
+
+ http.ListenAndServe(":8080", mux)
+}
+
+func Handler(rw http.ResponseWriter, req *http.Request) {
+ // Get the value of the "id" parameters.
+ val := bone.GetValue(req, "id")
+
+ rw.Write([]byte(val))
+}
+
+```
+
+## Blog Posts
+- http://www.peterbe.com/plog/my-favorite-go-multiplexer
+- https://harshladha.xyz/my-first-library-in-go-language-hasty-791b8e2b9e69
+
+## Libs
+- Errors dump for Go : [Trash](https://github.com/go-zoo/trash)
+- Middleware Chaining module : [Claw](https://github.com/go-zoo/claw)
diff --git a/vendor/github.com/go-zoo/bone/bone.go b/vendor/github.com/go-zoo/bone/bone.go
new file mode 100644
index 000000000..d00a0b083
--- /dev/null
+++ b/vendor/github.com/go-zoo/bone/bone.go
@@ -0,0 +1,74 @@
+/********************************
+*** Multiplexer for Go ***
+*** Bone is under MIT license ***
+*** Code by CodingFerret ***
+*** github.com/go-zoo ***
+*********************************/
+
+package bone
+
+import (
+ "net/http"
+ "strings"
+)
+
+// Mux have routes and a notFound handler
+// Route: all the registred route
+// notFound: 404 handler, default http.NotFound if not provided
+type Mux struct {
+ Routes map[string][]*Route
+ prefix string
+ notFound http.Handler
+ Serve func(rw http.ResponseWriter, req *http.Request)
+ CaseSensitive bool
+}
+
+var (
+ static = "static"
+ method = []string{"GET", "POST", "PUT", "DELETE", "HEAD", "PATCH", "OPTIONS"}
+)
+
+type adapter func(*Mux) *Mux
+
+// New create a pointer to a Mux instance
+func New(adapters ...adapter) *Mux {
+ m := &Mux{Routes: make(map[string][]*Route), Serve: nil, CaseSensitive: true}
+ for _, adap := range adapters {
+ adap(m)
+ }
+ if m.Serve == nil {
+ m.Serve = m.DefaultServe
+ }
+ return m
+}
+
+// Prefix set a default prefix for all routes registred on the router
+func (m *Mux) Prefix(p string) *Mux {
+ m.prefix = strings.TrimSuffix(p, "/")
+ return m
+}
+
+// DefaultServe is the default http request handler
+func (m *Mux) DefaultServe(rw http.ResponseWriter, req *http.Request) {
+ // Check if a route match
+ if !m.parse(rw, req) {
+ // Check if it's a static ressource
+ if !m.staticRoute(rw, req) {
+ // Check if the request path doesn't end with /
+ if !m.validate(rw, req) {
+ // Check if same route exists for another HTTP method
+ if !m.otherMethods(rw, req) {
+ m.HandleNotFound(rw, req)
+ }
+ }
+ }
+ }
+}
+
+// ServeHTTP pass the request to the serve method of Mux
+func (m *Mux) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ if !m.CaseSensitive {
+ req.URL.Path = strings.ToLower(req.URL.Path)
+ }
+ m.Serve(rw, req)
+}
diff --git a/vendor/github.com/go-zoo/bone/helper.go b/vendor/github.com/go-zoo/bone/helper.go
new file mode 100644
index 000000000..bd7b45e95
--- /dev/null
+++ b/vendor/github.com/go-zoo/bone/helper.go
@@ -0,0 +1,169 @@
+/********************************
+*** Multiplexer for Go ***
+*** Bone is under MIT license ***
+*** Code by CodingFerret ***
+*** github.com/go-zoo ***
+*********************************/
+
+package bone
+
+import (
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+func (m *Mux) ListenAndServe(port string) error {
+ return http.ListenAndServe(port, m)
+}
+
+func (m *Mux) parse(rw http.ResponseWriter, req *http.Request) bool {
+ for _, r := range m.Routes[req.Method] {
+ ok := r.parse(rw, req)
+ if ok {
+ return true
+ }
+ }
+ // If no HEAD method, default to GET
+ if req.Method == "HEAD" {
+ for _, r := range m.Routes["GET"] {
+ ok := r.parse(rw, req)
+ if ok {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// StaticRoute check if the request path is for Static route
+func (m *Mux) staticRoute(rw http.ResponseWriter, req *http.Request) bool {
+ for _, s := range m.Routes[static] {
+ if len(req.URL.Path) >= s.Size {
+ if req.URL.Path[:s.Size] == s.Path {
+ s.Handler.ServeHTTP(rw, req)
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// HandleNotFound handle when a request does not match a registered handler.
+func (m *Mux) HandleNotFound(rw http.ResponseWriter, req *http.Request) {
+ if m.notFound != nil {
+ m.notFound.ServeHTTP(rw, req)
+ } else {
+ http.NotFound(rw, req)
+ }
+}
+
+// Check if the path don't end with a /
+func (m *Mux) validate(rw http.ResponseWriter, req *http.Request) bool {
+ plen := len(req.URL.Path)
+ if plen > 1 && req.URL.Path[plen-1:] == "/" {
+ cleanURL(&req.URL.Path)
+ rw.Header().Set("Location", req.URL.String())
+ rw.WriteHeader(http.StatusFound)
+ return true
+ }
+ // Retry to find a route that match
+ return m.parse(rw, req)
+}
+
+func valid(path string) bool {
+ plen := len(path)
+ if plen > 1 && path[plen-1:] == "/" {
+ return false
+ }
+ return true
+}
+
+// Clean url path
+func cleanURL(url *string) {
+ ulen := len((*url))
+ if ulen > 1 {
+ if (*url)[ulen-1:] == "/" {
+ *url = (*url)[:ulen-1]
+ cleanURL(url)
+ }
+ }
+}
+
+// GetValue return the key value, of the current *http.Request
+func GetValue(req *http.Request, key string) string {
+ return GetAllValues(req)[key]
+}
+
+// GetRequestRoute returns the route of given Request
+func (m *Mux) GetRequestRoute(req *http.Request) string {
+ cleanURL(&req.URL.Path)
+ for _, r := range m.Routes[req.Method] {
+ if r.Atts != 0 {
+ if r.Atts&SUB != 0 {
+ return r.Handler.(*Mux).GetRequestRoute(req)
+ }
+ if r.Match(req) {
+ return r.Path
+ }
+ }
+ if req.URL.Path == r.Path {
+ return r.Path
+ }
+ }
+
+ for _, s := range m.Routes[static] {
+ if len(req.URL.Path) >= s.Size {
+ if req.URL.Path[:s.Size] == s.Path {
+ return s.Path
+ }
+ }
+ }
+
+ return "NotFound"
+}
+
+// GetQuery return the key value, of the current *http.Request query
+func GetQuery(req *http.Request, key string) []string {
+ if ok, value := extractQueries(req); ok {
+ return value[key]
+ }
+ return nil
+}
+
+// GetAllQueries return all queries of the current *http.Request
+func GetAllQueries(req *http.Request) map[string][]string {
+ if ok, values := extractQueries(req); ok {
+ return values
+ }
+ return nil
+}
+
+func extractQueries(req *http.Request) (bool, map[string][]string) {
+ if q, err := url.ParseQuery(req.URL.RawQuery); err == nil {
+ var queries = make(map[string][]string)
+ for k, v := range q {
+ for _, item := range v {
+ values := strings.Split(item, ",")
+ queries[k] = append(queries[k], values...)
+ }
+ }
+ return true, queries
+ }
+ return false, nil
+}
+
+func (m *Mux) otherMethods(rw http.ResponseWriter, req *http.Request) bool {
+ for _, met := range method {
+ if met != req.Method {
+ for _, r := range m.Routes[met] {
+ ok := r.exists(rw, req)
+ if ok {
+ rw.WriteHeader(http.StatusMethodNotAllowed)
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/go-zoo/bone/helper_15.go b/vendor/github.com/go-zoo/bone/helper_15.go
new file mode 100644
index 000000000..068ce18e0
--- /dev/null
+++ b/vendor/github.com/go-zoo/bone/helper_15.go
@@ -0,0 +1,45 @@
+// +build !go1.7
+
+/********************************
+*** Multiplexer for Go ***
+*** Bone is under MIT license ***
+*** Code by CodingFerret ***
+*** github.com/go-zoo ***
+*********************************/
+
+package bone
+
+import (
+ "net/http"
+ "sync"
+)
+
+var globalVars = struct {
+ sync.RWMutex
+ v map[*http.Request]map[string]string
+}{v: make(map[*http.Request]map[string]string)}
+
+// GetAllValues return the req PARAMs
+func GetAllValues(req *http.Request) map[string]string {
+ globalVars.RLock()
+ values := globalVars.v[req]
+ globalVars.RUnlock()
+ return values
+}
+
+// serveMatchedRequest is an extension point for Route which allows us to conditionally compile for
+// go1.7 and <go1.7
+func (r *Route) serveMatchedRequest(rw http.ResponseWriter, req *http.Request, vars map[string]string) {
+ globalVars.Lock()
+ globalVars.v[req] = vars
+ globalVars.Unlock()
+
+ // Regardless if ServeHTTP panics (and potentially recovers) we can make sure to not leak
+ // memory in globalVars for this request
+ defer func() {
+ globalVars.Lock()
+ delete(globalVars.v, req)
+ globalVars.Unlock()
+ }()
+ r.Handler.ServeHTTP(rw, req)
+}
diff --git a/vendor/github.com/go-zoo/bone/helper_17.go b/vendor/github.com/go-zoo/bone/helper_17.go
new file mode 100644
index 000000000..2e41ea4be
--- /dev/null
+++ b/vendor/github.com/go-zoo/bone/helper_17.go
@@ -0,0 +1,39 @@
+// +build go1.7
+
+/********************************
+*** Multiplexer for Go ***
+*** Bone is under MIT license ***
+*** Code by CodingFerret ***
+*** github.com/go-zoo ***
+*********************************/
+
+package bone
+
+import (
+ "context"
+ "net/http"
+)
+
+// contextKeyType is a private struct that is used for storing bone values in net.Context
+type contextKeyType struct{}
+
+// contextKey is the key that is used to store bone values in the net.Context for each request
+var contextKey = contextKeyType{}
+
+// GetAllValues return the req PARAMs
+func GetAllValues(req *http.Request) map[string]string {
+ values, ok := req.Context().Value(contextKey).(map[string]string)
+ if ok {
+ return values
+ }
+
+ return map[string]string{}
+}
+
+// serveMatchedRequest is an extension point for Route which allows us to conditionally compile for
+// go1.7 and <go1.7
+func (r *Route) serveMatchedRequest(rw http.ResponseWriter, req *http.Request, vars map[string]string) {
+ ctx := context.WithValue(req.Context(), contextKey, vars)
+ newReq := req.WithContext(ctx)
+ r.Handler.ServeHTTP(rw, newReq)
+}
diff --git a/vendor/github.com/go-zoo/bone/mux.go b/vendor/github.com/go-zoo/bone/mux.go
new file mode 100644
index 000000000..0ec29a338
--- /dev/null
+++ b/vendor/github.com/go-zoo/bone/mux.go
@@ -0,0 +1,137 @@
+/********************************
+*** Multiplexer for Go ***
+*** Bone is under MIT license ***
+*** Code by CodingFerret ***
+*** github.com/go-zoo ***
+*********************************/
+
+package bone
+
+import "net/http"
+
+// Router is the same as a http.Handler
+type Router interface {
+ ServeHTTP(http.ResponseWriter, *http.Request)
+}
+
+// Register the route in the router
+func (m *Mux) Register(method string, path string, handler http.Handler) *Route {
+ return m.register(method, path, handler)
+}
+
+// GetFunc add a new route to the Mux with the Get method
+func (m *Mux) GetFunc(path string, handler http.HandlerFunc) *Route {
+ return m.register("GET", path, handler)
+}
+
+// PostFunc add a new route to the Mux with the Post method
+func (m *Mux) PostFunc(path string, handler http.HandlerFunc) *Route {
+ return m.register("POST", path, handler)
+}
+
+// PutFunc add a new route to the Mux with the Put method
+func (m *Mux) PutFunc(path string, handler http.HandlerFunc) *Route {
+ return m.register("PUT", path, handler)
+}
+
+// DeleteFunc add a new route to the Mux with the Delete method
+func (m *Mux) DeleteFunc(path string, handler http.HandlerFunc) *Route {
+ return m.register("DELETE", path, handler)
+}
+
+// HeadFunc add a new route to the Mux with the Head method
+func (m *Mux) HeadFunc(path string, handler http.HandlerFunc) *Route {
+ return m.register("HEAD", path, handler)
+}
+
+// PatchFunc add a new route to the Mux with the Patch method
+func (m *Mux) PatchFunc(path string, handler http.HandlerFunc) *Route {
+ return m.register("PATCH", path, handler)
+}
+
+// OptionsFunc add a new route to the Mux with the Options method
+func (m *Mux) OptionsFunc(path string, handler http.HandlerFunc) *Route {
+ return m.register("OPTIONS", path, handler)
+}
+
+// NotFoundFunc the mux custom 404 handler
+func (m *Mux) NotFoundFunc(handler http.HandlerFunc) {
+ m.notFound = handler
+}
+
+// Handle add a new route to the Mux without a HTTP method
+func (m *Mux) Handle(path string, handler http.Handler) {
+ for _, mt := range method {
+ m.register(mt, path, handler)
+ }
+}
+
+// HandleFunc is use to pass a func(http.ResponseWriter, *Http.Request) instead of http.Handler
+func (m *Mux) HandleFunc(path string, handler http.HandlerFunc) {
+ m.Handle(path, handler)
+}
+
+// Get add a new route to the Mux with the Get method
+func (m *Mux) Get(path string, handler http.Handler) *Route {
+ return m.register("GET", path, handler)
+}
+
+// Post add a new route to the Mux with the Post method
+func (m *Mux) Post(path string, handler http.Handler) *Route {
+ return m.register("POST", path, handler)
+}
+
+// Put add a new route to the Mux with the Put method
+func (m *Mux) Put(path string, handler http.Handler) *Route {
+ return m.register("PUT", path, handler)
+}
+
+// Delete add a new route to the Mux with the Delete method
+func (m *Mux) Delete(path string, handler http.Handler) *Route {
+ return m.register("DELETE", path, handler)
+}
+
+// Head add a new route to the Mux with the Head method
+func (m *Mux) Head(path string, handler http.Handler) *Route {
+ return m.register("HEAD", path, handler)
+}
+
+// Patch add a new route to the Mux with the Patch method
+func (m *Mux) Patch(path string, handler http.Handler) *Route {
+ return m.register("PATCH", path, handler)
+}
+
+// Options add a new route to the Mux with the Options method
+func (m *Mux) Options(path string, handler http.Handler) *Route {
+ return m.register("OPTIONS", path, handler)
+}
+
+// NotFound the mux custom 404 handler
+func (m *Mux) NotFound(handler http.Handler) {
+ m.notFound = handler
+}
+
+// Register the new route in the router with the provided method and handler
+func (m *Mux) register(method string, path string, handler http.Handler) *Route {
+ r := NewRoute(m.prefix+path, handler)
+ r.Method = method
+ if valid(path) {
+ m.Routes[method] = append(m.Routes[method], r)
+ return r
+ }
+ m.Routes[static] = append(m.Routes[static], r)
+ return r
+}
+
+// SubRoute register a router as a SubRouter of bone
+func (m *Mux) SubRoute(path string, router Router) *Route {
+ r := NewRoute(m.prefix+path, router)
+ if valid(path) {
+ r.Atts += SUB
+ for _, mt := range method {
+ m.Routes[mt] = append(m.Routes[mt], r)
+ }
+ return r
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-zoo/bone/route.go b/vendor/github.com/go-zoo/bone/route.go
new file mode 100644
index 000000000..7f6c1b1d0
--- /dev/null
+++ b/vendor/github.com/go-zoo/bone/route.go
@@ -0,0 +1,245 @@
+/********************************
+*** Multiplexer for Go ***
+*** Bone is under MIT license ***
+*** Code by CodingFerret ***
+*** github.com/go-zoo ***
+*********************************/
+
+package bone
+
+import (
+ "net/http"
+ "regexp"
+ "strings"
+)
+
+const (
+ //PARAM value store in Atts if the route have parameters
+ PARAM = 2
+ //SUB value store in Atts if the route is a sub router
+ SUB = 4
+ //WC value store in Atts if the route have wildcard
+ WC = 8
+ //REGEX value store in Atts if the route contains regex
+ REGEX = 16
+)
+
+// Route content the required information for a valid route
+// Path: is the Route URL
+// Size: is the length of the path
+// Token: is the value of each part of the path, split by /
+// Pattern: is content information about the route, if it's have a route variable
+// handler: is the handler who handle this route
+// Method: define HTTP method on the route
+type Route struct {
+ Path string
+ Method string
+ Size int
+ Atts int
+ wildPos int
+ Token Token
+ Pattern map[int]string
+ Compile map[int]*regexp.Regexp
+ Tag map[int]string
+ Handler http.Handler
+}
+
+// Token content all value of a spliting route path
+// Tokens: string value of each token
+// size: number of token
+type Token struct {
+ raw []int
+ Tokens []string
+ Size int
+}
+
+// NewRoute return a pointer to a Route instance and call save() on it
+func NewRoute(url string, h http.Handler) *Route {
+ r := &Route{Path: url, Handler: h}
+ r.save()
+ return r
+}
+
+// Save, set automatically the the Route.Size and Route.Pattern value
+func (r *Route) save() {
+ r.Size = len(r.Path)
+ r.Token.Tokens = strings.Split(r.Path, "/")
+ for i, s := range r.Token.Tokens {
+ if len(s) >= 1 {
+ switch s[:1] {
+ case ":":
+ if r.Pattern == nil {
+ r.Pattern = make(map[int]string)
+ }
+ r.Pattern[i] = s[1:]
+ r.Atts |= PARAM
+ case "#":
+ if r.Compile == nil {
+ r.Compile = make(map[int]*regexp.Regexp)
+ r.Tag = make(map[int]string)
+ }
+ tmp := strings.Split(s, "^")
+ r.Tag[i] = tmp[0][1:]
+ r.Compile[i] = regexp.MustCompile("^" + tmp[1][:len(tmp[1])-1])
+ r.Atts |= REGEX
+ case "*":
+ r.wildPos = i
+ r.Atts |= WC
+ default:
+ r.Token.raw = append(r.Token.raw, i)
+ }
+ }
+ r.Token.Size++
+ }
+}
+
+// Match check if the request match the route Pattern
+func (r *Route) Match(req *http.Request) bool {
+ ok, _ := r.matchAndParse(req)
+ return ok
+}
+
+// matchAndParse check if the request matches the route Pattern and returns a map of the parsed
+// variables if it matches
+func (r *Route) matchAndParse(req *http.Request) (bool, map[string]string) {
+ ss := strings.Split(req.URL.EscapedPath(), "/")
+ if r.matchRawTokens(&ss) {
+ if len(ss) == r.Token.Size || r.Atts&WC != 0 {
+ totalSize := len(r.Pattern)
+ if r.Atts&REGEX != 0 {
+ totalSize += len(r.Compile)
+ }
+
+ vars := make(map[string]string, totalSize)
+ for k, v := range r.Pattern {
+ vars[v] = ss[k]
+ }
+
+ if r.Atts&REGEX != 0 {
+ for k, v := range r.Compile {
+ if !v.MatchString(ss[k]) {
+ return false, nil
+ }
+ vars[r.Tag[k]] = ss[k]
+ }
+ }
+
+ return true, vars
+ }
+ }
+
+ return false, nil
+}
+
+func (r *Route) parse(rw http.ResponseWriter, req *http.Request) bool {
+ if r.Atts != 0 {
+ if r.Atts&SUB != 0 {
+ if len(req.URL.Path) >= r.Size {
+ if req.URL.Path[:r.Size] == r.Path {
+ req.URL.Path = req.URL.Path[r.Size:]
+ r.Handler.ServeHTTP(rw, req)
+ return true
+ }
+ }
+ }
+
+ if ok, vars := r.matchAndParse(req); ok {
+ r.serveMatchedRequest(rw, req, vars)
+ return true
+ }
+ }
+ if req.URL.Path == r.Path {
+ r.Handler.ServeHTTP(rw, req)
+ return true
+ }
+ return false
+}
+
+func (r *Route) matchRawTokens(ss *[]string) bool {
+ if len(*ss) >= r.Token.Size {
+ for i, v := range r.Token.raw {
+ if (*ss)[v] != r.Token.Tokens[v] {
+ if r.Atts&WC != 0 && r.wildPos == i {
+ return true
+ }
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func (r *Route) exists(rw http.ResponseWriter, req *http.Request) bool {
+ if r.Atts != 0 {
+ if r.Atts&SUB != 0 {
+ if len(req.URL.Path) >= r.Size {
+ if req.URL.Path[:r.Size] == r.Path {
+ return true
+ }
+ }
+ }
+
+ if ok, _ := r.matchAndParse(req); ok {
+ return true
+ }
+ }
+ if req.URL.Path == r.Path {
+ return true
+ }
+ return false
+}
+
+// Get set the route method to Get
+func (r *Route) Get() *Route {
+ r.Method = "GET"
+ return r
+}
+
+// Post set the route method to Post
+func (r *Route) Post() *Route {
+ r.Method = "POST"
+ return r
+}
+
+// Put set the route method to Put
+func (r *Route) Put() *Route {
+ r.Method = "PUT"
+ return r
+}
+
+// Delete set the route method to Delete
+func (r *Route) Delete() *Route {
+ r.Method = "DELETE"
+ return r
+}
+
+// Head set the route method to Head
+func (r *Route) Head() *Route {
+ r.Method = "HEAD"
+ return r
+}
+
+// Patch set the route method to Patch
+func (r *Route) Patch() *Route {
+ r.Method = "PATCH"
+ return r
+}
+
+// Options set the route method to Options
+func (r *Route) Options() *Route {
+ r.Method = "OPTIONS"
+ return r
+}
+
+func (r *Route) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ if r.Method != "" {
+ if req.Method == r.Method {
+ r.Handler.ServeHTTP(rw, req)
+ return
+ }
+ http.NotFound(rw, req)
+ return
+ }
+ r.Handler.ServeHTTP(rw, req)
+}
diff --git a/vendor/github.com/godbus/dbus/LICENSE b/vendor/github.com/godbus/dbus/LICENSE
new file mode 100644
index 000000000..670d88fca
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2013, Georg Reinke (<guelfey at gmail dot com>), Google
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/godbus/dbus/README.markdown b/vendor/github.com/godbus/dbus/README.markdown
new file mode 100644
index 000000000..d37f4e2ed
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/README.markdown
@@ -0,0 +1,44 @@
+[![Build Status](https://travis-ci.org/godbus/dbus.svg?branch=master)](https://travis-ci.org/godbus/dbus)
+
+dbus
+----
+
+dbus is a simple library that implements native Go client bindings for the
+D-Bus message bus system.
+
+### Features
+
+* Complete native implementation of the D-Bus message protocol
+* Go-like API (channels for signals / asynchronous method calls, Goroutine-safe connections)
+* Subpackages that help with the introspection / property interfaces
+
+### Installation
+
+This packages requires Go 1.1. If you installed it and set up your GOPATH, just run:
+
+```
+go get github.com/godbus/dbus
+```
+
+If you want to use the subpackages, you can install them the same way.
+
+### Usage
+
+The complete package documentation and some simple examples are available at
+[godoc.org](http://godoc.org/github.com/godbus/dbus). Also, the
+[_examples](https://github.com/godbus/dbus/tree/master/_examples) directory
+gives a short overview over the basic usage.
+
+#### Projects using godbus
+- [notify](https://github.com/esiqveland/notify) provides desktop notifications over dbus into a library.
+- [go-bluetooth](https://github.com/muka/go-bluetooth) provides a bluetooth client over bluez dbus API.
+
+Please note that the API is considered unstable for now and may change without
+further notice.
+
+### License
+
+go.dbus is available under the Simplified BSD License; see LICENSE for the full
+text.
+
+Nearly all of the credit for this library goes to github.com/guelfey/go.dbus.
diff --git a/vendor/github.com/godbus/dbus/auth.go b/vendor/github.com/godbus/dbus/auth.go
new file mode 100644
index 000000000..98017b693
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/auth.go
@@ -0,0 +1,253 @@
+package dbus
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "io"
+ "os"
+ "strconv"
+)
+
+// AuthStatus represents the Status of an authentication mechanism.
+type AuthStatus byte
+
+const (
+ // AuthOk signals that authentication is finished; the next command
+ // from the server should be an OK.
+ AuthOk AuthStatus = iota
+
+ // AuthContinue signals that additional data is needed; the next command
+ // from the server should be a DATA.
+ AuthContinue
+
+ // AuthError signals an error; the server sent invalid data or some
+ // other unexpected thing happened and the current authentication
+ // process should be aborted.
+ AuthError
+)
+
+type authState byte
+
+const (
+ waitingForData authState = iota
+ waitingForOk
+ waitingForReject
+)
+
+// Auth defines the behaviour of an authentication mechanism.
+type Auth interface {
+ // Return the name of the mechnism, the argument to the first AUTH command
+ // and the next status.
+ FirstData() (name, resp []byte, status AuthStatus)
+
+ // Process the given DATA command, and return the argument to the DATA
+ // command and the next status. If len(resp) == 0, no DATA command is sent.
+ HandleData(data []byte) (resp []byte, status AuthStatus)
+}
+
+// Auth authenticates the connection, trying the given list of authentication
+// mechanisms (in that order). If nil is passed, the EXTERNAL and
+// DBUS_COOKIE_SHA1 mechanisms are tried for the current user. For private
+// connections, this method must be called before sending any messages to the
+// bus. Auth must not be called on shared connections.
+func (conn *Conn) Auth(methods []Auth) error {
+ if methods == nil {
+ uid := strconv.Itoa(os.Getuid())
+ methods = []Auth{AuthExternal(uid), AuthCookieSha1(uid, getHomeDir())}
+ }
+ in := bufio.NewReader(conn.transport)
+ err := conn.transport.SendNullByte()
+ if err != nil {
+ return err
+ }
+ err = authWriteLine(conn.transport, []byte("AUTH"))
+ if err != nil {
+ return err
+ }
+ s, err := authReadLine(in)
+ if err != nil {
+ return err
+ }
+ if len(s) < 2 || !bytes.Equal(s[0], []byte("REJECTED")) {
+ return errors.New("dbus: authentication protocol error")
+ }
+ s = s[1:]
+ for _, v := range s {
+ for _, m := range methods {
+ if name, data, status := m.FirstData(); bytes.Equal(v, name) {
+ var ok bool
+ err = authWriteLine(conn.transport, []byte("AUTH"), []byte(v), data)
+ if err != nil {
+ return err
+ }
+ switch status {
+ case AuthOk:
+ err, ok = conn.tryAuth(m, waitingForOk, in)
+ case AuthContinue:
+ err, ok = conn.tryAuth(m, waitingForData, in)
+ default:
+ panic("dbus: invalid authentication status")
+ }
+ if err != nil {
+ return err
+ }
+ if ok {
+ if conn.transport.SupportsUnixFDs() {
+ err = authWriteLine(conn, []byte("NEGOTIATE_UNIX_FD"))
+ if err != nil {
+ return err
+ }
+ line, err := authReadLine(in)
+ if err != nil {
+ return err
+ }
+ switch {
+ case bytes.Equal(line[0], []byte("AGREE_UNIX_FD")):
+ conn.EnableUnixFDs()
+ conn.unixFD = true
+ case bytes.Equal(line[0], []byte("ERROR")):
+ default:
+ return errors.New("dbus: authentication protocol error")
+ }
+ }
+ err = authWriteLine(conn.transport, []byte("BEGIN"))
+ if err != nil {
+ return err
+ }
+ go conn.inWorker()
+ go conn.outWorker()
+ return nil
+ }
+ }
+ }
+ }
+ return errors.New("dbus: authentication failed")
+}
+
+// tryAuth tries to authenticate with m as the mechanism, using state as the
+// initial authState and in for reading input. It returns (nil, true) on
+// success, (nil, false) on a REJECTED and (someErr, false) if some other
+// error occured.
+func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, bool) {
+ for {
+ s, err := authReadLine(in)
+ if err != nil {
+ return err, false
+ }
+ switch {
+ case state == waitingForData && string(s[0]) == "DATA":
+ if len(s) != 2 {
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ continue
+ }
+ data, status := m.HandleData(s[1])
+ switch status {
+ case AuthOk, AuthContinue:
+ if len(data) != 0 {
+ err = authWriteLine(conn.transport, []byte("DATA"), data)
+ if err != nil {
+ return err, false
+ }
+ }
+ if status == AuthOk {
+ state = waitingForOk
+ }
+ case AuthError:
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ }
+ case state == waitingForData && string(s[0]) == "REJECTED":
+ return nil, false
+ case state == waitingForData && string(s[0]) == "ERROR":
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ case state == waitingForData && string(s[0]) == "OK":
+ if len(s) != 2 {
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ }
+ conn.uuid = string(s[1])
+ return nil, true
+ case state == waitingForData:
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ case state == waitingForOk && string(s[0]) == "OK":
+ if len(s) != 2 {
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ }
+ conn.uuid = string(s[1])
+ return nil, true
+ case state == waitingForOk && string(s[0]) == "REJECTED":
+ return nil, false
+ case state == waitingForOk && (string(s[0]) == "DATA" ||
+ string(s[0]) == "ERROR"):
+
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ case state == waitingForOk:
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ case state == waitingForReject && string(s[0]) == "REJECTED":
+ return nil, false
+ case state == waitingForReject:
+ return errors.New("dbus: authentication protocol error"), false
+ default:
+ panic("dbus: invalid auth state")
+ }
+ }
+}
+
+// authReadLine reads a line and separates it into its fields.
+func authReadLine(in *bufio.Reader) ([][]byte, error) {
+ data, err := in.ReadBytes('\n')
+ if err != nil {
+ return nil, err
+ }
+ data = bytes.TrimSuffix(data, []byte("\r\n"))
+ return bytes.Split(data, []byte{' '}), nil
+}
+
+// authWriteLine writes the given line in the authentication protocol format
+// (elements of data separated by a " " and terminated by "\r\n").
+func authWriteLine(out io.Writer, data ...[]byte) error {
+ buf := make([]byte, 0)
+ for i, v := range data {
+ buf = append(buf, v...)
+ if i != len(data)-1 {
+ buf = append(buf, ' ')
+ }
+ }
+ buf = append(buf, '\r')
+ buf = append(buf, '\n')
+ n, err := out.Write(buf)
+ if err != nil {
+ return err
+ }
+ if n != len(buf) {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
diff --git a/vendor/github.com/godbus/dbus/auth_external.go b/vendor/github.com/godbus/dbus/auth_external.go
new file mode 100644
index 000000000..7e376d3ef
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/auth_external.go
@@ -0,0 +1,26 @@
+package dbus
+
+import (
+ "encoding/hex"
+)
+
+// AuthExternal returns an Auth that authenticates as the given user with the
+// EXTERNAL mechanism.
+func AuthExternal(user string) Auth {
+ return authExternal{user}
+}
+
+// AuthExternal implements the EXTERNAL authentication mechanism.
+type authExternal struct {
+ user string
+}
+
+func (a authExternal) FirstData() ([]byte, []byte, AuthStatus) {
+ b := make([]byte, 2*len(a.user))
+ hex.Encode(b, []byte(a.user))
+ return []byte("EXTERNAL"), b, AuthOk
+}
+
+func (a authExternal) HandleData(b []byte) ([]byte, AuthStatus) {
+ return nil, AuthError
+}
diff --git a/vendor/github.com/godbus/dbus/auth_sha1.go b/vendor/github.com/godbus/dbus/auth_sha1.go
new file mode 100644
index 000000000..df15b4611
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/auth_sha1.go
@@ -0,0 +1,102 @@
+package dbus
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/hex"
+ "os"
+)
+
+// AuthCookieSha1 returns an Auth that authenticates as the given user with the
+// DBUS_COOKIE_SHA1 mechanism. The home parameter should specify the home
+// directory of the user.
+func AuthCookieSha1(user, home string) Auth {
+ return authCookieSha1{user, home}
+}
+
+type authCookieSha1 struct {
+ user, home string
+}
+
+func (a authCookieSha1) FirstData() ([]byte, []byte, AuthStatus) {
+ b := make([]byte, 2*len(a.user))
+ hex.Encode(b, []byte(a.user))
+ return []byte("DBUS_COOKIE_SHA1"), b, AuthContinue
+}
+
+func (a authCookieSha1) HandleData(data []byte) ([]byte, AuthStatus) {
+ challenge := make([]byte, len(data)/2)
+ _, err := hex.Decode(challenge, data)
+ if err != nil {
+ return nil, AuthError
+ }
+ b := bytes.Split(challenge, []byte{' '})
+ if len(b) != 3 {
+ return nil, AuthError
+ }
+ context := b[0]
+ id := b[1]
+ svchallenge := b[2]
+ cookie := a.getCookie(context, id)
+ if cookie == nil {
+ return nil, AuthError
+ }
+ clchallenge := a.generateChallenge()
+ if clchallenge == nil {
+ return nil, AuthError
+ }
+ hash := sha1.New()
+ hash.Write(bytes.Join([][]byte{svchallenge, clchallenge, cookie}, []byte{':'}))
+ hexhash := make([]byte, 2*hash.Size())
+ hex.Encode(hexhash, hash.Sum(nil))
+ data = append(clchallenge, ' ')
+ data = append(data, hexhash...)
+ resp := make([]byte, 2*len(data))
+ hex.Encode(resp, data)
+ return resp, AuthOk
+}
+
+// getCookie searches for the cookie identified by id in context and returns
+// the cookie content or nil. (Since HandleData can't return a specific error,
+// but only whether an error occured, this function also doesn't bother to
+// return an error.)
+func (a authCookieSha1) getCookie(context, id []byte) []byte {
+ file, err := os.Open(a.home + "/.dbus-keyrings/" + string(context))
+ if err != nil {
+ return nil
+ }
+ defer file.Close()
+ rd := bufio.NewReader(file)
+ for {
+ line, err := rd.ReadBytes('\n')
+ if err != nil {
+ return nil
+ }
+ line = line[:len(line)-1]
+ b := bytes.Split(line, []byte{' '})
+ if len(b) != 3 {
+ return nil
+ }
+ if bytes.Equal(b[0], id) {
+ return b[2]
+ }
+ }
+}
+
+// generateChallenge returns a random, hex-encoded challenge, or nil on error
+// (see above).
+func (a authCookieSha1) generateChallenge() []byte {
+ b := make([]byte, 16)
+ n, err := rand.Read(b)
+ if err != nil {
+ return nil
+ }
+ if n != 16 {
+ return nil
+ }
+ enc := make([]byte, 32)
+ hex.Encode(enc, b)
+ return enc
+}
diff --git a/vendor/github.com/godbus/dbus/call.go b/vendor/github.com/godbus/dbus/call.go
new file mode 100644
index 000000000..ba6e73f60
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/call.go
@@ -0,0 +1,36 @@
+package dbus
+
+import (
+ "errors"
+)
+
+// Call represents a pending or completed method call.
+type Call struct {
+ Destination string
+ Path ObjectPath
+ Method string
+ Args []interface{}
+
+ // Strobes when the call is complete.
+ Done chan *Call
+
+ // After completion, the error status. If this is non-nil, it may be an
+ // error message from the peer (with Error as its type) or some other error.
+ Err error
+
+ // Holds the response once the call is done.
+ Body []interface{}
+}
+
+var errSignature = errors.New("dbus: mismatched signature")
+
+// Store stores the body of the reply into the provided pointers. It returns
+// an error if the signatures of the body and retvalues don't match, or if
+// the error status is not nil.
+func (c *Call) Store(retvalues ...interface{}) error {
+ if c.Err != nil {
+ return c.Err
+ }
+
+ return Store(c.Body, retvalues...)
+}
diff --git a/vendor/github.com/godbus/dbus/conn.go b/vendor/github.com/godbus/dbus/conn.go
new file mode 100644
index 000000000..5720e2ebb
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/conn.go
@@ -0,0 +1,683 @@
+package dbus
+
+import (
+ "errors"
+ "io"
+ "os"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+var (
+ systemBus *Conn
+ systemBusLck sync.Mutex
+ sessionBus *Conn
+ sessionBusLck sync.Mutex
+ sessionEnvLck sync.Mutex
+)
+
+// ErrClosed is the error returned by calls on a closed connection.
+var ErrClosed = errors.New("dbus: connection closed by user")
+
+// Conn represents a connection to a message bus (usually, the system or
+// session bus).
+//
+// Connections are either shared or private. Shared connections
+// are shared between calls to the functions that return them. As a result,
+// the methods Close, Auth and Hello must not be called on them.
+//
+// Multiple goroutines may invoke methods on a connection simultaneously.
+type Conn struct {
+ transport
+
+ busObj BusObject
+ unixFD bool
+ uuid string
+
+ names []string
+ namesLck sync.RWMutex
+
+ serialLck sync.Mutex
+ nextSerial uint32
+ serialUsed map[uint32]bool
+
+ calls map[uint32]*Call
+ callsLck sync.RWMutex
+
+ handler Handler
+
+ out chan *Message
+ closed bool
+ outLck sync.RWMutex
+
+ signalHandler SignalHandler
+
+ eavesdropped chan<- *Message
+ eavesdroppedLck sync.Mutex
+}
+
+// SessionBus returns a shared connection to the session bus, connecting to it
+// if not already done.
+func SessionBus() (conn *Conn, err error) {
+ sessionBusLck.Lock()
+ defer sessionBusLck.Unlock()
+ if sessionBus != nil {
+ return sessionBus, nil
+ }
+ defer func() {
+ if conn != nil {
+ sessionBus = conn
+ }
+ }()
+ conn, err = SessionBusPrivate()
+ if err != nil {
+ return
+ }
+ if err = conn.Auth(nil); err != nil {
+ conn.Close()
+ conn = nil
+ return
+ }
+ if err = conn.Hello(); err != nil {
+ conn.Close()
+ conn = nil
+ }
+ return
+}
+
+func getSessionBusAddress() (string, error) {
+ sessionEnvLck.Lock()
+ defer sessionEnvLck.Unlock()
+ address := os.Getenv("DBUS_SESSION_BUS_ADDRESS")
+ if address != "" && address != "autolaunch:" {
+ return address, nil
+ }
+ return getSessionBusPlatformAddress()
+}
+
+// SessionBusPrivate returns a new private connection to the session bus.
+func SessionBusPrivate() (*Conn, error) {
+ address, err := getSessionBusAddress()
+ if err != nil {
+ return nil, err
+ }
+
+ return Dial(address)
+}
+
+// SessionBusPrivate returns a new private connection to the session bus.
+func SessionBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) {
+ address, err := getSessionBusAddress()
+ if err != nil {
+ return nil, err
+ }
+ return DialHandler(address, handler, signalHandler)
+}
+
+// SystemBus returns a shared connection to the system bus, connecting to it if
+// not already done.
+func SystemBus() (conn *Conn, err error) {
+ systemBusLck.Lock()
+ defer systemBusLck.Unlock()
+ if systemBus != nil {
+ return systemBus, nil
+ }
+ defer func() {
+ if conn != nil {
+ systemBus = conn
+ }
+ }()
+ conn, err = SystemBusPrivate()
+ if err != nil {
+ return
+ }
+ if err = conn.Auth(nil); err != nil {
+ conn.Close()
+ conn = nil
+ return
+ }
+ if err = conn.Hello(); err != nil {
+ conn.Close()
+ conn = nil
+ }
+ return
+}
+
+// SystemBusPrivate returns a new private connection to the system bus.
+func SystemBusPrivate() (*Conn, error) {
+ return Dial(getSystemBusPlatformAddress())
+}
+
+// SystemBusPrivateHandler returns a new private connection to the system bus, using the provided handlers.
+func SystemBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) {
+ return DialHandler(getSystemBusPlatformAddress(), handler, signalHandler)
+}
+
+// Dial establishes a new private connection to the message bus specified by address.
+func Dial(address string) (*Conn, error) {
+ tr, err := getTransport(address)
+ if err != nil {
+ return nil, err
+ }
+ return newConn(tr, NewDefaultHandler(), NewDefaultSignalHandler())
+}
+
+// DialHandler establishes a new private connection to the message bus specified by address, using the supplied handlers.
+func DialHandler(address string, handler Handler, signalHandler SignalHandler) (*Conn, error) {
+ tr, err := getTransport(address)
+ if err != nil {
+ return nil, err
+ }
+ return newConn(tr, handler, signalHandler)
+}
+
+// NewConn creates a new private *Conn from an already established connection.
+func NewConn(conn io.ReadWriteCloser) (*Conn, error) {
+ return NewConnHandler(conn, NewDefaultHandler(), NewDefaultSignalHandler())
+}
+
+// NewConnHandler creates a new private *Conn from an already established connection, using the supplied handlers.
+func NewConnHandler(conn io.ReadWriteCloser, handler Handler, signalHandler SignalHandler) (*Conn, error) {
+ return newConn(genericTransport{conn}, handler, signalHandler)
+}
+
+// newConn creates a new *Conn from a transport.
+func newConn(tr transport, handler Handler, signalHandler SignalHandler) (*Conn, error) {
+ conn := new(Conn)
+ conn.transport = tr
+ conn.calls = make(map[uint32]*Call)
+ conn.out = make(chan *Message, 10)
+ conn.handler = handler
+ conn.signalHandler = signalHandler
+ conn.nextSerial = 1
+ conn.serialUsed = map[uint32]bool{0: true}
+ conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus")
+ return conn, nil
+}
+
+// BusObject returns the object owned by the bus daemon which handles
+// administrative requests.
+func (conn *Conn) BusObject() BusObject {
+ return conn.busObj
+}
+
+// Close closes the connection. Any blocked operations will return with errors
+// and the channels passed to Eavesdrop and Signal are closed. This method must
+// not be called on shared connections.
+func (conn *Conn) Close() error {
+ conn.outLck.Lock()
+ if conn.closed {
+ // inWorker calls Close on read error, the read error may
+ // be caused by another caller calling Close to shutdown the
+ // dbus connection, a double-close scenario we prevent here.
+ conn.outLck.Unlock()
+ return nil
+ }
+ close(conn.out)
+ conn.closed = true
+ conn.outLck.Unlock()
+
+ if term, ok := conn.signalHandler.(Terminator); ok {
+ term.Terminate()
+ }
+
+ if term, ok := conn.handler.(Terminator); ok {
+ term.Terminate()
+ }
+
+ conn.eavesdroppedLck.Lock()
+ if conn.eavesdropped != nil {
+ close(conn.eavesdropped)
+ }
+ conn.eavesdroppedLck.Unlock()
+
+ return conn.transport.Close()
+}
+
+// Eavesdrop causes conn to send all incoming messages to the given channel
+// without further processing. Method replies, errors and signals will not be
+// sent to the appropiate channels and method calls will not be handled. If nil
+// is passed, the normal behaviour is restored.
+//
+// The caller has to make sure that ch is sufficiently buffered;
+// if a message arrives when a write to ch is not possible, the message is
+// discarded.
+func (conn *Conn) Eavesdrop(ch chan<- *Message) {
+ conn.eavesdroppedLck.Lock()
+ conn.eavesdropped = ch
+ conn.eavesdroppedLck.Unlock()
+}
+
+// getSerial returns an unused serial.
+func (conn *Conn) getSerial() uint32 {
+ conn.serialLck.Lock()
+ defer conn.serialLck.Unlock()
+ n := conn.nextSerial
+ for conn.serialUsed[n] {
+ n++
+ }
+ conn.serialUsed[n] = true
+ conn.nextSerial = n + 1
+ return n
+}
+
+// Hello sends the initial org.freedesktop.DBus.Hello call. This method must be
+// called after authentication, but before sending any other messages to the
+// bus. Hello must not be called for shared connections.
+func (conn *Conn) Hello() error {
+ var s string
+ err := conn.busObj.Call("org.freedesktop.DBus.Hello", 0).Store(&s)
+ if err != nil {
+ return err
+ }
+ conn.namesLck.Lock()
+ conn.names = make([]string, 1)
+ conn.names[0] = s
+ conn.namesLck.Unlock()
+ return nil
+}
+
+// inWorker runs in an own goroutine, reading incoming messages from the
+// transport and dispatching them appropiately.
+func (conn *Conn) inWorker() {
+ for {
+ msg, err := conn.ReadMessage()
+ if err == nil {
+ conn.eavesdroppedLck.Lock()
+ if conn.eavesdropped != nil {
+ select {
+ case conn.eavesdropped <- msg:
+ default:
+ }
+ conn.eavesdroppedLck.Unlock()
+ continue
+ }
+ conn.eavesdroppedLck.Unlock()
+ dest, _ := msg.Headers[FieldDestination].value.(string)
+ found := false
+ if dest == "" {
+ found = true
+ } else {
+ conn.namesLck.RLock()
+ if len(conn.names) == 0 {
+ found = true
+ }
+ for _, v := range conn.names {
+ if dest == v {
+ found = true
+ break
+ }
+ }
+ conn.namesLck.RUnlock()
+ }
+ if !found {
+ // Eavesdropped a message, but no channel for it is registered.
+ // Ignore it.
+ continue
+ }
+ switch msg.Type {
+ case TypeMethodReply, TypeError:
+ serial := msg.Headers[FieldReplySerial].value.(uint32)
+ conn.callsLck.Lock()
+ if c, ok := conn.calls[serial]; ok {
+ if msg.Type == TypeError {
+ name, _ := msg.Headers[FieldErrorName].value.(string)
+ c.Err = Error{name, msg.Body}
+ } else {
+ c.Body = msg.Body
+ }
+ c.Done <- c
+ conn.serialLck.Lock()
+ delete(conn.serialUsed, serial)
+ conn.serialLck.Unlock()
+ delete(conn.calls, serial)
+ }
+ conn.callsLck.Unlock()
+ case TypeSignal:
+ iface := msg.Headers[FieldInterface].value.(string)
+ member := msg.Headers[FieldMember].value.(string)
+ // as per http://dbus.freedesktop.org/doc/dbus-specification.html ,
+ // sender is optional for signals.
+ sender, _ := msg.Headers[FieldSender].value.(string)
+ if iface == "org.freedesktop.DBus" && sender == "org.freedesktop.DBus" {
+ if member == "NameLost" {
+ // If we lost the name on the bus, remove it from our
+ // tracking list.
+ name, ok := msg.Body[0].(string)
+ if !ok {
+ panic("Unable to read the lost name")
+ }
+ conn.namesLck.Lock()
+ for i, v := range conn.names {
+ if v == name {
+ conn.names = append(conn.names[:i],
+ conn.names[i+1:]...)
+ }
+ }
+ conn.namesLck.Unlock()
+ } else if member == "NameAcquired" {
+ // If we acquired the name on the bus, add it to our
+ // tracking list.
+ name, ok := msg.Body[0].(string)
+ if !ok {
+ panic("Unable to read the acquired name")
+ }
+ conn.namesLck.Lock()
+ conn.names = append(conn.names, name)
+ conn.namesLck.Unlock()
+ }
+ }
+ conn.handleSignal(msg)
+ case TypeMethodCall:
+ go conn.handleCall(msg)
+ }
+ } else if _, ok := err.(InvalidMessageError); !ok {
+ // Some read error occured (usually EOF); we can't really do
+ // anything but to shut down all stuff and returns errors to all
+ // pending replies.
+ conn.Close()
+ conn.callsLck.RLock()
+ for _, v := range conn.calls {
+ v.Err = err
+ v.Done <- v
+ }
+ conn.callsLck.RUnlock()
+ return
+ }
+ // invalid messages are ignored
+ }
+}
+
+func (conn *Conn) handleSignal(msg *Message) {
+ iface := msg.Headers[FieldInterface].value.(string)
+ member := msg.Headers[FieldMember].value.(string)
+ // as per http://dbus.freedesktop.org/doc/dbus-specification.html ,
+ // sender is optional for signals.
+ sender, _ := msg.Headers[FieldSender].value.(string)
+ signal := &Signal{
+ Sender: sender,
+ Path: msg.Headers[FieldPath].value.(ObjectPath),
+ Name: iface + "." + member,
+ Body: msg.Body,
+ }
+ conn.signalHandler.DeliverSignal(iface, member, signal)
+}
+
+// Names returns the list of all names that are currently owned by this
+// connection. The slice is always at least one element long, the first element
+// being the unique name of the connection.
+func (conn *Conn) Names() []string {
+ conn.namesLck.RLock()
+ // copy the slice so it can't be modified
+ s := make([]string, len(conn.names))
+ copy(s, conn.names)
+ conn.namesLck.RUnlock()
+ return s
+}
+
+// Object returns the object identified by the given destination name and path.
+func (conn *Conn) Object(dest string, path ObjectPath) BusObject {
+ return &Object{conn, dest, path}
+}
+
+// outWorker runs in an own goroutine, encoding and sending messages that are
+// sent to conn.out.
+func (conn *Conn) outWorker() {
+ for msg := range conn.out {
+ err := conn.SendMessage(msg)
+ conn.callsLck.RLock()
+ if err != nil {
+ if c := conn.calls[msg.serial]; c != nil {
+ c.Err = err
+ c.Done <- c
+ }
+ conn.serialLck.Lock()
+ delete(conn.serialUsed, msg.serial)
+ conn.serialLck.Unlock()
+ } else if msg.Type != TypeMethodCall {
+ conn.serialLck.Lock()
+ delete(conn.serialUsed, msg.serial)
+ conn.serialLck.Unlock()
+ }
+ conn.callsLck.RUnlock()
+ }
+}
+
+// Send sends the given message to the message bus. You usually don't need to
+// use this; use the higher-level equivalents (Call / Go, Emit and Export)
+// instead. If msg is a method call and NoReplyExpected is not set, a non-nil
+// call is returned and the same value is sent to ch (which must be buffered)
+// once the call is complete. Otherwise, ch is ignored and a Call structure is
+// returned of which only the Err member is valid.
+func (conn *Conn) Send(msg *Message, ch chan *Call) *Call {
+ var call *Call
+
+ msg.serial = conn.getSerial()
+ if msg.Type == TypeMethodCall && msg.Flags&FlagNoReplyExpected == 0 {
+ if ch == nil {
+ ch = make(chan *Call, 5)
+ } else if cap(ch) == 0 {
+ panic("dbus: unbuffered channel passed to (*Conn).Send")
+ }
+ call = new(Call)
+ call.Destination, _ = msg.Headers[FieldDestination].value.(string)
+ call.Path, _ = msg.Headers[FieldPath].value.(ObjectPath)
+ iface, _ := msg.Headers[FieldInterface].value.(string)
+ member, _ := msg.Headers[FieldMember].value.(string)
+ call.Method = iface + "." + member
+ call.Args = msg.Body
+ call.Done = ch
+ conn.callsLck.Lock()
+ conn.calls[msg.serial] = call
+ conn.callsLck.Unlock()
+ conn.outLck.RLock()
+ if conn.closed {
+ call.Err = ErrClosed
+ call.Done <- call
+ } else {
+ conn.out <- msg
+ }
+ conn.outLck.RUnlock()
+ } else {
+ conn.outLck.RLock()
+ if conn.closed {
+ call = &Call{Err: ErrClosed}
+ } else {
+ conn.out <- msg
+ call = &Call{Err: nil}
+ }
+ conn.outLck.RUnlock()
+ }
+ return call
+}
+
+// sendError creates an error message corresponding to the parameters and sends
+// it to conn.out.
+func (conn *Conn) sendError(err error, dest string, serial uint32) {
+ var e *Error
+ switch em := err.(type) {
+ case Error:
+ e = &em
+ case *Error:
+ e = em
+ case DBusError:
+ name, body := em.DBusError()
+ e = NewError(name, body)
+ default:
+ e = MakeFailedError(err)
+ }
+ msg := new(Message)
+ msg.Type = TypeError
+ msg.serial = conn.getSerial()
+ msg.Headers = make(map[HeaderField]Variant)
+ if dest != "" {
+ msg.Headers[FieldDestination] = MakeVariant(dest)
+ }
+ msg.Headers[FieldErrorName] = MakeVariant(e.Name)
+ msg.Headers[FieldReplySerial] = MakeVariant(serial)
+ msg.Body = e.Body
+ if len(e.Body) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...))
+ }
+ conn.outLck.RLock()
+ if !conn.closed {
+ conn.out <- msg
+ }
+ conn.outLck.RUnlock()
+}
+
+// sendReply creates a method reply message corresponding to the parameters and
+// sends it to conn.out.
+func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) {
+ msg := new(Message)
+ msg.Type = TypeMethodReply
+ msg.serial = conn.getSerial()
+ msg.Headers = make(map[HeaderField]Variant)
+ if dest != "" {
+ msg.Headers[FieldDestination] = MakeVariant(dest)
+ }
+ msg.Headers[FieldReplySerial] = MakeVariant(serial)
+ msg.Body = values
+ if len(values) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
+ }
+ conn.outLck.RLock()
+ if !conn.closed {
+ conn.out <- msg
+ }
+ conn.outLck.RUnlock()
+}
+
+func (conn *Conn) defaultSignalAction(fn func(h *defaultSignalHandler, ch chan<- *Signal), ch chan<- *Signal) {
+ if !isDefaultSignalHandler(conn.signalHandler) {
+ return
+ }
+ handler := conn.signalHandler.(*defaultSignalHandler)
+ fn(handler, ch)
+}
+
+// Signal registers the given channel to be passed all received signal messages.
+// The caller has to make sure that ch is sufficiently buffered; if a message
+// arrives when a write to c is not possible, it is discarded.
+//
+// Multiple of these channels can be registered at the same time.
+//
+// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a
+// channel for eavesdropped messages, this channel receives all signals, and
+// none of the channels passed to Signal will receive any signals.
+func (conn *Conn) Signal(ch chan<- *Signal) {
+ conn.defaultSignalAction((*defaultSignalHandler).addSignal, ch)
+}
+
+// RemoveSignal removes the given channel from the list of the registered channels.
+func (conn *Conn) RemoveSignal(ch chan<- *Signal) {
+ conn.defaultSignalAction((*defaultSignalHandler).removeSignal, ch)
+}
+
+// SupportsUnixFDs returns whether the underlying transport supports passing of
+// unix file descriptors. If this is false, method calls containing unix file
+// descriptors will return an error and emitted signals containing them will
+// not be sent.
+func (conn *Conn) SupportsUnixFDs() bool {
+ return conn.unixFD
+}
+
+// Error represents a D-Bus message of type Error.
+type Error struct {
+ Name string
+ Body []interface{}
+}
+
+func NewError(name string, body []interface{}) *Error {
+ return &Error{name, body}
+}
+
+func (e Error) Error() string {
+ if len(e.Body) >= 1 {
+ s, ok := e.Body[0].(string)
+ if ok {
+ return s
+ }
+ }
+ return e.Name
+}
+
+// Signal represents a D-Bus message of type Signal. The name member is given in
+// "interface.member" notation, e.g. org.freedesktop.D-Bus.NameLost.
+type Signal struct {
+ Sender string
+ Path ObjectPath
+ Name string
+ Body []interface{}
+}
+
+// transport is a D-Bus transport.
+type transport interface {
+ // Read and Write raw data (for example, for the authentication protocol).
+ io.ReadWriteCloser
+
+ // Send the initial null byte used for the EXTERNAL mechanism.
+ SendNullByte() error
+
+ // Returns whether this transport supports passing Unix FDs.
+ SupportsUnixFDs() bool
+
+ // Signal the transport that Unix FD passing is enabled for this connection.
+ EnableUnixFDs()
+
+ // Read / send a message, handling things like Unix FDs.
+ ReadMessage() (*Message, error)
+ SendMessage(*Message) error
+}
+
+var (
+ transports = make(map[string]func(string) (transport, error))
+)
+
+func getTransport(address string) (transport, error) {
+ var err error
+ var t transport
+
+ addresses := strings.Split(address, ";")
+ for _, v := range addresses {
+ i := strings.IndexRune(v, ':')
+ if i == -1 {
+ err = errors.New("dbus: invalid bus address (no transport)")
+ continue
+ }
+ f := transports[v[:i]]
+ if f == nil {
+ err = errors.New("dbus: invalid bus address (invalid or unsupported transport)")
+ continue
+ }
+ t, err = f(v[i+1:])
+ if err == nil {
+ return t, nil
+ }
+ }
+ return nil, err
+}
+
+// dereferenceAll returns a slice that, assuming that vs is a slice of pointers
+// of arbitrary types, containes the values that are obtained from dereferencing
+// all elements in vs.
+func dereferenceAll(vs []interface{}) []interface{} {
+ for i := range vs {
+ v := reflect.ValueOf(vs[i])
+ v = v.Elem()
+ vs[i] = v.Interface()
+ }
+ return vs
+}
+
+// getKey gets a key from a the list of keys. Returns "" on error / not found...
+func getKey(s, key string) string {
+ for _, keyEqualsValue := range strings.Split(s, ",") {
+ keyValue := strings.SplitN(keyEqualsValue, "=", 2)
+ if len(keyValue) == 2 && keyValue[0] == key {
+ return keyValue[1]
+ }
+ }
+ return ""
+}
diff --git a/vendor/github.com/godbus/dbus/conn_darwin.go b/vendor/github.com/godbus/dbus/conn_darwin.go
new file mode 100644
index 000000000..c015f80ce
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/conn_darwin.go
@@ -0,0 +1,33 @@
+package dbus
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+)
+
+const defaultSystemBusAddress = "unix:path=/opt/local/var/run/dbus/system_bus_socket"
+
+func getSessionBusPlatformAddress() (string, error) {
+ cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET")
+ b, err := cmd.CombinedOutput()
+
+ if err != nil {
+ return "", err
+ }
+
+ if len(b) == 0 {
+ return "", errors.New("dbus: couldn't determine address of session bus")
+ }
+
+ return "unix:path=" + string(b[:len(b)-1]), nil
+}
+
+func getSystemBusPlatformAddress() string {
+ address := os.Getenv("DBUS_LAUNCHD_SESSION_BUS_SOCKET")
+ if address != "" {
+ return fmt.Sprintf("unix:path=%s", address)
+ }
+ return defaultSystemBusAddress
+}
diff --git a/vendor/github.com/godbus/dbus/conn_other.go b/vendor/github.com/godbus/dbus/conn_other.go
new file mode 100644
index 000000000..254c9f2ef
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/conn_other.go
@@ -0,0 +1,42 @@
+// +build !darwin
+
+package dbus
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+)
+
+const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket"
+
+func getSessionBusPlatformAddress() (string, error) {
+ cmd := exec.Command("dbus-launch")
+ b, err := cmd.CombinedOutput()
+
+ if err != nil {
+ return "", err
+ }
+
+ i := bytes.IndexByte(b, '=')
+ j := bytes.IndexByte(b, '\n')
+
+ if i == -1 || j == -1 {
+ return "", errors.New("dbus: couldn't determine address of session bus")
+ }
+
+ env, addr := string(b[0:i]), string(b[i+1:j])
+ os.Setenv(env, addr)
+
+ return addr, nil
+}
+
+func getSystemBusPlatformAddress() string {
+ address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS")
+ if address != "" {
+ return fmt.Sprintf("unix:path=%s", address)
+ }
+ return defaultSystemBusAddress
+}
diff --git a/vendor/github.com/godbus/dbus/dbus.go b/vendor/github.com/godbus/dbus/dbus.go
new file mode 100644
index 000000000..c6d0d3ce0
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/dbus.go
@@ -0,0 +1,427 @@
+package dbus
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+var (
+ byteType = reflect.TypeOf(byte(0))
+ boolType = reflect.TypeOf(false)
+ uint8Type = reflect.TypeOf(uint8(0))
+ int16Type = reflect.TypeOf(int16(0))
+ uint16Type = reflect.TypeOf(uint16(0))
+ intType = reflect.TypeOf(int(0))
+ uintType = reflect.TypeOf(uint(0))
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+ stringType = reflect.TypeOf("")
+ signatureType = reflect.TypeOf(Signature{""})
+ objectPathType = reflect.TypeOf(ObjectPath(""))
+ variantType = reflect.TypeOf(Variant{Signature{""}, nil})
+ interfacesType = reflect.TypeOf([]interface{}{})
+ interfaceType = reflect.TypeOf((*interface{})(nil)).Elem()
+ unixFDType = reflect.TypeOf(UnixFD(0))
+ unixFDIndexType = reflect.TypeOf(UnixFDIndex(0))
+)
+
+// An InvalidTypeError signals that a value which cannot be represented in the
+// D-Bus wire format was passed to a function.
+type InvalidTypeError struct {
+ Type reflect.Type
+}
+
+func (e InvalidTypeError) Error() string {
+ return "dbus: invalid type " + e.Type.String()
+}
+
+// Store copies the values contained in src to dest, which must be a slice of
+// pointers. It converts slices of interfaces from src to corresponding structs
+// in dest. An error is returned if the lengths of src and dest or the types of
+// their elements don't match.
+func Store(src []interface{}, dest ...interface{}) error {
+ if len(src) != len(dest) {
+ return errors.New("dbus.Store: length mismatch")
+ }
+
+ for i := range src {
+ if err := storeInterfaces(src[i], dest[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func storeInterfaces(src, dest interface{}) error {
+ return store(reflect.ValueOf(dest), reflect.ValueOf(src))
+}
+
+func store(dest, src reflect.Value) error {
+ if dest.Kind() == reflect.Ptr {
+ return store(dest.Elem(), src)
+ }
+ switch src.Kind() {
+ case reflect.Slice:
+ return storeSlice(dest, src)
+ case reflect.Map:
+ return storeMap(dest, src)
+ default:
+ return storeBase(dest, src)
+ }
+}
+
+func storeBase(dest, src reflect.Value) error {
+ return setDest(dest, src)
+}
+
+func setDest(dest, src reflect.Value) error {
+ if !isVariant(src.Type()) && isVariant(dest.Type()) {
+ //special conversion for dbus.Variant
+ dest.Set(reflect.ValueOf(MakeVariant(src.Interface())))
+ return nil
+ }
+ if isVariant(src.Type()) && !isVariant(dest.Type()) {
+ src = getVariantValue(src)
+ }
+ if !src.Type().ConvertibleTo(dest.Type()) {
+ return fmt.Errorf(
+ "dbus.Store: type mismatch: cannot convert %s to %s",
+ src.Type(), dest.Type())
+ }
+ dest.Set(src.Convert(dest.Type()))
+ return nil
+}
+
+func kindsAreCompatible(dest, src reflect.Type) bool {
+ switch {
+ case isVariant(dest):
+ return true
+ case dest.Kind() == reflect.Interface:
+ return true
+ default:
+ return dest.Kind() == src.Kind()
+ }
+}
+
+func isConvertibleTo(dest, src reflect.Type) bool {
+ switch {
+ case isVariant(dest):
+ return true
+ case dest.Kind() == reflect.Interface:
+ return true
+ case dest.Kind() == reflect.Slice:
+ return src.Kind() == reflect.Slice &&
+ isConvertibleTo(dest.Elem(), src.Elem())
+ case dest.Kind() == reflect.Struct:
+ return src == interfacesType
+ default:
+ return src.ConvertibleTo(dest)
+ }
+}
+
+func storeMap(dest, src reflect.Value) error {
+ switch {
+ case !kindsAreCompatible(dest.Type(), src.Type()):
+ return fmt.Errorf(
+ "dbus.Store: type mismatch: "+
+ "map: cannot store a value of %s into %s",
+ src.Type(), dest.Type())
+ case isVariant(dest.Type()):
+ return storeMapIntoVariant(dest, src)
+ case dest.Kind() == reflect.Interface:
+ return storeMapIntoInterface(dest, src)
+ case isConvertibleTo(dest.Type().Key(), src.Type().Key()) &&
+ isConvertibleTo(dest.Type().Elem(), src.Type().Elem()):
+ return storeMapIntoMap(dest, src)
+ default:
+ return fmt.Errorf(
+ "dbus.Store: type mismatch: "+
+ "map: cannot convert a value of %s into %s",
+ src.Type(), dest.Type())
+ }
+}
+
+func storeMapIntoVariant(dest, src reflect.Value) error {
+ dv := reflect.MakeMap(src.Type())
+ err := store(dv, src)
+ if err != nil {
+ return err
+ }
+ return storeBase(dest, dv)
+}
+
+func storeMapIntoInterface(dest, src reflect.Value) error {
+ var dv reflect.Value
+ if isVariant(src.Type().Elem()) {
+ //Convert variants to interface{} recursively when converting
+ //to interface{}
+ dv = reflect.MakeMap(
+ reflect.MapOf(src.Type().Key(), interfaceType))
+ } else {
+ dv = reflect.MakeMap(src.Type())
+ }
+ err := store(dv, src)
+ if err != nil {
+ return err
+ }
+ return storeBase(dest, dv)
+}
+
+func storeMapIntoMap(dest, src reflect.Value) error {
+ if dest.IsNil() {
+ dest.Set(reflect.MakeMap(dest.Type()))
+ }
+ keys := src.MapKeys()
+ for _, key := range keys {
+ dkey := key.Convert(dest.Type().Key())
+ dval := reflect.New(dest.Type().Elem()).Elem()
+ err := store(dval, getVariantValue(src.MapIndex(key)))
+ if err != nil {
+ return err
+ }
+ dest.SetMapIndex(dkey, dval)
+ }
+ return nil
+}
+
+func storeSlice(dest, src reflect.Value) error {
+ switch {
+ case src.Type() == interfacesType && dest.Kind() == reflect.Struct:
+ //The decoder always decodes structs as slices of interface{}
+ return storeStruct(dest, src)
+ case !kindsAreCompatible(dest.Type(), src.Type()):
+ return fmt.Errorf(
+ "dbus.Store: type mismatch: "+
+ "slice: cannot store a value of %s into %s",
+ src.Type(), dest.Type())
+ case isVariant(dest.Type()):
+ return storeSliceIntoVariant(dest, src)
+ case dest.Kind() == reflect.Interface:
+ return storeSliceIntoInterface(dest, src)
+ case isConvertibleTo(dest.Type().Elem(), src.Type().Elem()):
+ return storeSliceIntoSlice(dest, src)
+ default:
+ return fmt.Errorf(
+ "dbus.Store: type mismatch: "+
+ "slice: cannot convert a value of %s into %s",
+ src.Type(), dest.Type())
+ }
+}
+
+func storeStruct(dest, src reflect.Value) error {
+ if isVariant(dest.Type()) {
+ return storeBase(dest, src)
+ }
+ dval := make([]interface{}, 0, dest.NumField())
+ dtype := dest.Type()
+ for i := 0; i < dest.NumField(); i++ {
+ field := dest.Field(i)
+ ftype := dtype.Field(i)
+ if ftype.PkgPath != "" {
+ continue
+ }
+ if ftype.Tag.Get("dbus") == "-" {
+ continue
+ }
+ dval = append(dval, field.Addr().Interface())
+ }
+ if src.Len() != len(dval) {
+ return fmt.Errorf(
+ "dbus.Store: type mismatch: "+
+ "destination struct does not have "+
+ "enough fields need: %d have: %d",
+ src.Len(), len(dval))
+ }
+ return Store(src.Interface().([]interface{}), dval...)
+}
+
+func storeSliceIntoVariant(dest, src reflect.Value) error {
+ dv := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
+ err := store(dv, src)
+ if err != nil {
+ return err
+ }
+ return storeBase(dest, dv)
+}
+
+func storeSliceIntoInterface(dest, src reflect.Value) error {
+ var dv reflect.Value
+ if isVariant(src.Type().Elem()) {
+ //Convert variants to interface{} recursively when converting
+ //to interface{}
+ dv = reflect.MakeSlice(reflect.SliceOf(interfaceType),
+ src.Len(), src.Cap())
+ } else {
+ dv = reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
+ }
+ err := store(dv, src)
+ if err != nil {
+ return err
+ }
+ return storeBase(dest, dv)
+}
+
+func storeSliceIntoSlice(dest, src reflect.Value) error {
+ if dest.IsNil() || dest.Len() < src.Len() {
+ dest.Set(reflect.MakeSlice(dest.Type(), src.Len(), src.Cap()))
+ }
+ if dest.Len() != src.Len() {
+ return fmt.Errorf(
+ "dbus.Store: type mismatch: "+
+ "slices are different lengths "+
+ "need: %d have: %d",
+ src.Len(), dest.Len())
+ }
+ for i := 0; i < src.Len(); i++ {
+ err := store(dest.Index(i), getVariantValue(src.Index(i)))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func getVariantValue(in reflect.Value) reflect.Value {
+ if isVariant(in.Type()) {
+ return reflect.ValueOf(in.Interface().(Variant).Value())
+ }
+ return in
+}
+
+func isVariant(t reflect.Type) bool {
+ return t == variantType
+}
+
+// An ObjectPath is an object path as defined by the D-Bus spec.
+type ObjectPath string
+
+// IsValid returns whether the object path is valid.
+func (o ObjectPath) IsValid() bool {
+ s := string(o)
+ if len(s) == 0 {
+ return false
+ }
+ if s[0] != '/' {
+ return false
+ }
+ if s[len(s)-1] == '/' && len(s) != 1 {
+ return false
+ }
+ // probably not used, but technically possible
+ if s == "/" {
+ return true
+ }
+ split := strings.Split(s[1:], "/")
+ for _, v := range split {
+ if len(v) == 0 {
+ return false
+ }
+ for _, c := range v {
+ if !isMemberChar(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// A UnixFD is a Unix file descriptor sent over the wire. See the package-level
+// documentation for more information about Unix file descriptor passsing.
+type UnixFD int32
+
+// A UnixFDIndex is the representation of a Unix file descriptor in a message.
+type UnixFDIndex uint32
+
+// alignment returns the alignment of values of type t.
+func alignment(t reflect.Type) int {
+ switch t {
+ case variantType:
+ return 1
+ case objectPathType:
+ return 4
+ case signatureType:
+ return 1
+ case interfacesType:
+ return 4
+ }
+ switch t.Kind() {
+ case reflect.Uint8:
+ return 1
+ case reflect.Uint16, reflect.Int16:
+ return 2
+ case reflect.Uint, reflect.Int, reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map:
+ return 4
+ case reflect.Uint64, reflect.Int64, reflect.Float64, reflect.Struct:
+ return 8
+ case reflect.Ptr:
+ return alignment(t.Elem())
+ }
+ return 1
+}
+
+// isKeyType returns whether t is a valid type for a D-Bus dict.
+func isKeyType(t reflect.Type) bool {
+ switch t.Kind() {
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float64,
+ reflect.String, reflect.Uint, reflect.Int:
+
+ return true
+ }
+ return false
+}
+
+// isValidInterface returns whether s is a valid name for an interface.
+func isValidInterface(s string) bool {
+ if len(s) == 0 || len(s) > 255 || s[0] == '.' {
+ return false
+ }
+ elem := strings.Split(s, ".")
+ if len(elem) < 2 {
+ return false
+ }
+ for _, v := range elem {
+ if len(v) == 0 {
+ return false
+ }
+ if v[0] >= '0' && v[0] <= '9' {
+ return false
+ }
+ for _, c := range v {
+ if !isMemberChar(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// isValidMember returns whether s is a valid name for a member.
+func isValidMember(s string) bool {
+ if len(s) == 0 || len(s) > 255 {
+ return false
+ }
+ i := strings.Index(s, ".")
+ if i != -1 {
+ return false
+ }
+ if s[0] >= '0' && s[0] <= '9' {
+ return false
+ }
+ for _, c := range s {
+ if !isMemberChar(c) {
+ return false
+ }
+ }
+ return true
+}
+
+func isMemberChar(c rune) bool {
+ return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') ||
+ (c >= 'a' && c <= 'z') || c == '_'
+}
diff --git a/vendor/github.com/godbus/dbus/decoder.go b/vendor/github.com/godbus/dbus/decoder.go
new file mode 100644
index 000000000..ef50dcab9
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/decoder.go
@@ -0,0 +1,228 @@
+package dbus
+
+import (
+ "encoding/binary"
+ "io"
+ "reflect"
+)
+
+type decoder struct {
+ in io.Reader
+ order binary.ByteOrder
+ pos int
+}
+
+// newDecoder returns a new decoder that reads values from in. The input is
+// expected to be in the given byte order.
+func newDecoder(in io.Reader, order binary.ByteOrder) *decoder {
+ dec := new(decoder)
+ dec.in = in
+ dec.order = order
+ return dec
+}
+
+// align aligns the input to the given boundary and panics on error.
+func (dec *decoder) align(n int) {
+ if dec.pos%n != 0 {
+ newpos := (dec.pos + n - 1) & ^(n - 1)
+ empty := make([]byte, newpos-dec.pos)
+ if _, err := io.ReadFull(dec.in, empty); err != nil {
+ panic(err)
+ }
+ dec.pos = newpos
+ }
+}
+
+// Calls binary.Read(dec.in, dec.order, v) and panics on read errors.
+func (dec *decoder) binread(v interface{}) {
+ if err := binary.Read(dec.in, dec.order, v); err != nil {
+ panic(err)
+ }
+}
+
+func (dec *decoder) Decode(sig Signature) (vs []interface{}, err error) {
+ defer func() {
+ var ok bool
+ v := recover()
+ if err, ok = v.(error); ok {
+ if err == io.EOF || err == io.ErrUnexpectedEOF {
+ err = FormatError("unexpected EOF")
+ }
+ }
+ }()
+ vs = make([]interface{}, 0)
+ s := sig.str
+ for s != "" {
+ err, rem := validSingle(s, 0)
+ if err != nil {
+ return nil, err
+ }
+ v := dec.decode(s[:len(s)-len(rem)], 0)
+ vs = append(vs, v)
+ s = rem
+ }
+ return vs, nil
+}
+
+func (dec *decoder) decode(s string, depth int) interface{} {
+ dec.align(alignment(typeFor(s)))
+ switch s[0] {
+ case 'y':
+ var b [1]byte
+ if _, err := dec.in.Read(b[:]); err != nil {
+ panic(err)
+ }
+ dec.pos++
+ return b[0]
+ case 'b':
+ i := dec.decode("u", depth).(uint32)
+ switch {
+ case i == 0:
+ return false
+ case i == 1:
+ return true
+ default:
+ panic(FormatError("invalid value for boolean"))
+ }
+ case 'n':
+ var i int16
+ dec.binread(&i)
+ dec.pos += 2
+ return i
+ case 'i':
+ var i int32
+ dec.binread(&i)
+ dec.pos += 4
+ return i
+ case 'x':
+ var i int64
+ dec.binread(&i)
+ dec.pos += 8
+ return i
+ case 'q':
+ var i uint16
+ dec.binread(&i)
+ dec.pos += 2
+ return i
+ case 'u':
+ var i uint32
+ dec.binread(&i)
+ dec.pos += 4
+ return i
+ case 't':
+ var i uint64
+ dec.binread(&i)
+ dec.pos += 8
+ return i
+ case 'd':
+ var f float64
+ dec.binread(&f)
+ dec.pos += 8
+ return f
+ case 's':
+ length := dec.decode("u", depth).(uint32)
+ b := make([]byte, int(length)+1)
+ if _, err := io.ReadFull(dec.in, b); err != nil {
+ panic(err)
+ }
+ dec.pos += int(length) + 1
+ return string(b[:len(b)-1])
+ case 'o':
+ return ObjectPath(dec.decode("s", depth).(string))
+ case 'g':
+ length := dec.decode("y", depth).(byte)
+ b := make([]byte, int(length)+1)
+ if _, err := io.ReadFull(dec.in, b); err != nil {
+ panic(err)
+ }
+ dec.pos += int(length) + 1
+ sig, err := ParseSignature(string(b[:len(b)-1]))
+ if err != nil {
+ panic(err)
+ }
+ return sig
+ case 'v':
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ var variant Variant
+ sig := dec.decode("g", depth).(Signature)
+ if len(sig.str) == 0 {
+ panic(FormatError("variant signature is empty"))
+ }
+ err, rem := validSingle(sig.str, 0)
+ if err != nil {
+ panic(err)
+ }
+ if rem != "" {
+ panic(FormatError("variant signature has multiple types"))
+ }
+ variant.sig = sig
+ variant.value = dec.decode(sig.str, depth+1)
+ return variant
+ case 'h':
+ return UnixFDIndex(dec.decode("u", depth).(uint32))
+ case 'a':
+ if len(s) > 1 && s[1] == '{' {
+ ksig := s[2:3]
+ vsig := s[3 : len(s)-1]
+ v := reflect.MakeMap(reflect.MapOf(typeFor(ksig), typeFor(vsig)))
+ if depth >= 63 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ length := dec.decode("u", depth).(uint32)
+ // Even for empty maps, the correct padding must be included
+ dec.align(8)
+ spos := dec.pos
+ for dec.pos < spos+int(length) {
+ dec.align(8)
+ if !isKeyType(v.Type().Key()) {
+ panic(InvalidTypeError{v.Type()})
+ }
+ kv := dec.decode(ksig, depth+2)
+ vv := dec.decode(vsig, depth+2)
+ v.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv))
+ }
+ return v.Interface()
+ }
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ length := dec.decode("u", depth).(uint32)
+ v := reflect.MakeSlice(reflect.SliceOf(typeFor(s[1:])), 0, int(length))
+ // Even for empty arrays, the correct padding must be included
+ dec.align(alignment(typeFor(s[1:])))
+ spos := dec.pos
+ for dec.pos < spos+int(length) {
+ ev := dec.decode(s[1:], depth+1)
+ v = reflect.Append(v, reflect.ValueOf(ev))
+ }
+ return v.Interface()
+ case '(':
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ dec.align(8)
+ v := make([]interface{}, 0)
+ s = s[1 : len(s)-1]
+ for s != "" {
+ err, rem := validSingle(s, 0)
+ if err != nil {
+ panic(err)
+ }
+ ev := dec.decode(s[:len(s)-len(rem)], depth+1)
+ v = append(v, ev)
+ s = rem
+ }
+ return v
+ default:
+ panic(SignatureError{Sig: s})
+ }
+}
+
+// A FormatError is an error in the wire format.
+type FormatError string
+
+func (e FormatError) Error() string {
+ return "dbus: wire format error: " + string(e)
+}
diff --git a/vendor/github.com/godbus/dbus/default_handler.go b/vendor/github.com/godbus/dbus/default_handler.go
new file mode 100644
index 000000000..e81f73ac5
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/default_handler.go
@@ -0,0 +1,291 @@
+package dbus
+
+import (
+ "bytes"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+func newIntrospectIntf(h *defaultHandler) *exportedIntf {
+ methods := make(map[string]Method)
+ methods["Introspect"] = exportedMethod{
+ reflect.ValueOf(func(msg Message) (string, *Error) {
+ path := msg.Headers[FieldPath].value.(ObjectPath)
+ return h.introspectPath(path), nil
+ }),
+ }
+ return newExportedIntf(methods, true)
+}
+
+//NewDefaultHandler returns an instance of the default
+//call handler. This is useful if you want to implement only
+//one of the two handlers but not both.
+func NewDefaultHandler() *defaultHandler {
+ h := &defaultHandler{
+ objects: make(map[ObjectPath]*exportedObj),
+ defaultIntf: make(map[string]*exportedIntf),
+ }
+ h.defaultIntf["org.freedesktop.DBus.Introspectable"] = newIntrospectIntf(h)
+ return h
+}
+
+type defaultHandler struct {
+ sync.RWMutex
+ objects map[ObjectPath]*exportedObj
+ defaultIntf map[string]*exportedIntf
+}
+
+func (h *defaultHandler) PathExists(path ObjectPath) bool {
+ _, ok := h.objects[path]
+ return ok
+}
+
+func (h *defaultHandler) introspectPath(path ObjectPath) string {
+ subpath := make(map[string]struct{})
+ var xml bytes.Buffer
+ xml.WriteString("<node>")
+ for obj, _ := range h.objects {
+ p := string(path)
+ if p != "/" {
+ p += "/"
+ }
+ if strings.HasPrefix(string(obj), p) {
+ node_name := strings.Split(string(obj[len(p):]), "/")[0]
+ subpath[node_name] = struct{}{}
+ }
+ }
+ for s, _ := range subpath {
+ xml.WriteString("\n\t<node name=\"" + s + "\"/>")
+ }
+ xml.WriteString("\n</node>")
+ return xml.String()
+}
+
+func (h *defaultHandler) LookupObject(path ObjectPath) (ServerObject, bool) {
+ h.RLock()
+ defer h.RUnlock()
+ object, ok := h.objects[path]
+ if ok {
+ return object, ok
+ }
+
+ // If an object wasn't found for this exact path,
+ // look for a matching subtree registration
+ subtreeObject := newExportedObject()
+ path = path[:strings.LastIndex(string(path), "/")]
+ for len(path) > 0 {
+ object, ok = h.objects[path]
+ if ok {
+ for name, iface := range object.interfaces {
+ // Only include this handler if it registered for the subtree
+ if iface.isFallbackInterface() {
+ subtreeObject.interfaces[name] = iface
+ }
+ }
+ break
+ }
+
+ path = path[:strings.LastIndex(string(path), "/")]
+ }
+
+ for name, intf := range h.defaultIntf {
+ if _, exists := subtreeObject.interfaces[name]; exists {
+ continue
+ }
+ subtreeObject.interfaces[name] = intf
+ }
+
+ return subtreeObject, true
+}
+
+func (h *defaultHandler) AddObject(path ObjectPath, object *exportedObj) {
+ h.Lock()
+ h.objects[path] = object
+ h.Unlock()
+}
+
+func (h *defaultHandler) DeleteObject(path ObjectPath) {
+ h.Lock()
+ delete(h.objects, path)
+ h.Unlock()
+}
+
+type exportedMethod struct {
+ reflect.Value
+}
+
+func (m exportedMethod) Call(args ...interface{}) ([]interface{}, error) {
+ t := m.Type()
+
+ params := make([]reflect.Value, len(args))
+ for i := 0; i < len(args); i++ {
+ params[i] = reflect.ValueOf(args[i]).Elem()
+ }
+
+ ret := m.Value.Call(params)
+
+ err := ret[t.NumOut()-1].Interface().(*Error)
+ ret = ret[:t.NumOut()-1]
+ out := make([]interface{}, len(ret))
+ for i, val := range ret {
+ out[i] = val.Interface()
+ }
+ if err == nil {
+ //concrete type to interface nil is a special case
+ return out, nil
+ }
+ return out, err
+}
+
+func (m exportedMethod) NumArguments() int {
+ return m.Value.Type().NumIn()
+}
+
+func (m exportedMethod) ArgumentValue(i int) interface{} {
+ return reflect.Zero(m.Type().In(i)).Interface()
+}
+
+func (m exportedMethod) NumReturns() int {
+ return m.Value.Type().NumOut()
+}
+
+func (m exportedMethod) ReturnValue(i int) interface{} {
+ return reflect.Zero(m.Type().Out(i)).Interface()
+}
+
+func newExportedObject() *exportedObj {
+ return &exportedObj{
+ interfaces: make(map[string]*exportedIntf),
+ }
+}
+
+type exportedObj struct {
+ interfaces map[string]*exportedIntf
+}
+
+func (obj *exportedObj) LookupInterface(name string) (Interface, bool) {
+ if name == "" {
+ return obj, true
+ }
+ intf, exists := obj.interfaces[name]
+ return intf, exists
+}
+
+func (obj *exportedObj) AddInterface(name string, iface *exportedIntf) {
+ obj.interfaces[name] = iface
+}
+
+func (obj *exportedObj) DeleteInterface(name string) {
+ delete(obj.interfaces, name)
+}
+
+func (obj *exportedObj) LookupMethod(name string) (Method, bool) {
+ for _, intf := range obj.interfaces {
+ method, exists := intf.LookupMethod(name)
+ if exists {
+ return method, exists
+ }
+ }
+ return nil, false
+}
+
+func (obj *exportedObj) isFallbackInterface() bool {
+ return false
+}
+
+func newExportedIntf(methods map[string]Method, includeSubtree bool) *exportedIntf {
+ return &exportedIntf{
+ methods: methods,
+ includeSubtree: includeSubtree,
+ }
+}
+
+type exportedIntf struct {
+ methods map[string]Method
+
+ // Whether or not this export is for the entire subtree
+ includeSubtree bool
+}
+
+func (obj *exportedIntf) LookupMethod(name string) (Method, bool) {
+ out, exists := obj.methods[name]
+ return out, exists
+}
+
+func (obj *exportedIntf) isFallbackInterface() bool {
+ return obj.includeSubtree
+}
+
+//NewDefaultSignalHandler returns an instance of the default
+//signal handler. This is useful if you want to implement only
+//one of the two handlers but not both.
+func NewDefaultSignalHandler() *defaultSignalHandler {
+ return &defaultSignalHandler{}
+}
+
+func isDefaultSignalHandler(handler SignalHandler) bool {
+ _, ok := handler.(*defaultSignalHandler)
+ return ok
+}
+
+type defaultSignalHandler struct {
+ sync.RWMutex
+ closed bool
+ signals []chan<- *Signal
+}
+
+func (sh *defaultSignalHandler) DeliverSignal(intf, name string, signal *Signal) {
+ go func() {
+ sh.RLock()
+ defer sh.RUnlock()
+ if sh.closed {
+ return
+ }
+ for _, ch := range sh.signals {
+ ch <- signal
+ }
+ }()
+}
+
+func (sh *defaultSignalHandler) Init() error {
+ sh.Lock()
+ sh.signals = make([]chan<- *Signal, 0)
+ sh.Unlock()
+ return nil
+}
+
+func (sh *defaultSignalHandler) Terminate() {
+ sh.Lock()
+ sh.closed = true
+ for _, ch := range sh.signals {
+ close(ch)
+ }
+ sh.signals = nil
+ sh.Unlock()
+}
+
+func (sh *defaultSignalHandler) addSignal(ch chan<- *Signal) {
+ sh.Lock()
+ defer sh.Unlock()
+ if sh.closed {
+ return
+ }
+ sh.signals = append(sh.signals, ch)
+
+}
+
+func (sh *defaultSignalHandler) removeSignal(ch chan<- *Signal) {
+ sh.Lock()
+ defer sh.Unlock()
+ if sh.closed {
+ return
+ }
+ for i := len(sh.signals) - 1; i >= 0; i-- {
+ if ch == sh.signals[i] {
+ copy(sh.signals[i:], sh.signals[i+1:])
+ sh.signals[len(sh.signals)-1] = nil
+ sh.signals = sh.signals[:len(sh.signals)-1]
+ }
+ }
+}
diff --git a/vendor/github.com/godbus/dbus/doc.go b/vendor/github.com/godbus/dbus/doc.go
new file mode 100644
index 000000000..895036a8c
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/doc.go
@@ -0,0 +1,69 @@
+/*
+Package dbus implements bindings to the D-Bus message bus system.
+
+To use the message bus API, you first need to connect to a bus (usually the
+session or system bus). The acquired connection then can be used to call methods
+on remote objects and emit or receive signals. Using the Export method, you can
+arrange D-Bus methods calls to be directly translated to method calls on a Go
+value.
+
+Conversion Rules
+
+For outgoing messages, Go types are automatically converted to the
+corresponding D-Bus types. The following types are directly encoded as their
+respective D-Bus equivalents:
+
+ Go type | D-Bus type
+ ------------+-----------
+ byte | BYTE
+ bool | BOOLEAN
+ int16 | INT16
+ uint16 | UINT16
+ int | INT32
+ uint | UINT32
+ int32 | INT32
+ uint32 | UINT32
+ int64 | INT64
+ uint64 | UINT64
+ float64 | DOUBLE
+ string | STRING
+ ObjectPath | OBJECT_PATH
+ Signature | SIGNATURE
+ Variant | VARIANT
+ interface{} | VARIANT
+ UnixFDIndex | UNIX_FD
+
+Slices and arrays encode as ARRAYs of their element type.
+
+Maps encode as DICTs, provided that their key type can be used as a key for
+a DICT.
+
+Structs other than Variant and Signature encode as a STRUCT containing their
+exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will
+be skipped.
+
+Pointers encode as the value they're pointed to.
+
+Types convertible to one of the base types above will be mapped as the
+base type.
+
+Trying to encode any other type or a slice, map or struct containing an
+unsupported type will result in an InvalidTypeError.
+
+For incoming messages, the inverse of these rules are used, with the exception
+of STRUCTs. Incoming STRUCTS are represented as a slice of empty interfaces
+containing the struct fields in the correct order. The Store function can be
+used to convert such values to Go structs.
+
+Unix FD passing
+
+Handling Unix file descriptors deserves special mention. To use them, you should
+first check that they are supported on a connection by calling SupportsUnixFDs.
+If it returns true, all method of Connection will translate messages containing
+UnixFD's to messages that are accompanied by the given file descriptors with the
+UnixFD values being substituted by the correct indices. Similarily, the indices
+of incoming messages are automatically resolved. It shouldn't be necessary to use
+UnixFDIndex.
+
+*/
+package dbus
diff --git a/vendor/github.com/godbus/dbus/encoder.go b/vendor/github.com/godbus/dbus/encoder.go
new file mode 100644
index 000000000..8bb717761
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/encoder.go
@@ -0,0 +1,210 @@
+package dbus
+
+import (
+ "bytes"
+ "encoding/binary"
+ "io"
+ "reflect"
+)
+
+// An encoder encodes values to the D-Bus wire format.
+type encoder struct {
+ out io.Writer
+ order binary.ByteOrder
+ pos int
+}
+
+// NewEncoder returns a new encoder that writes to out in the given byte order.
+func newEncoder(out io.Writer, order binary.ByteOrder) *encoder {
+ return newEncoderAtOffset(out, 0, order)
+}
+
+// newEncoderAtOffset returns a new encoder that writes to out in the given
+// byte order. Specify the offset to initialize pos for proper alignment
+// computation.
+func newEncoderAtOffset(out io.Writer, offset int, order binary.ByteOrder) *encoder {
+ enc := new(encoder)
+ enc.out = out
+ enc.order = order
+ enc.pos = offset
+ return enc
+}
+
+// Aligns the next output to be on a multiple of n. Panics on write errors.
+func (enc *encoder) align(n int) {
+ pad := enc.padding(0, n)
+ if pad > 0 {
+ empty := make([]byte, pad)
+ if _, err := enc.out.Write(empty); err != nil {
+ panic(err)
+ }
+ enc.pos += pad
+ }
+}
+
+// pad returns the number of bytes of padding, based on current position and additional offset.
+// and alignment.
+func (enc *encoder) padding(offset, algn int) int {
+ abs := enc.pos + offset
+ if abs%algn != 0 {
+ newabs := (abs + algn - 1) & ^(algn - 1)
+ return newabs - abs
+ }
+ return 0
+}
+
+// Calls binary.Write(enc.out, enc.order, v) and panics on write errors.
+func (enc *encoder) binwrite(v interface{}) {
+ if err := binary.Write(enc.out, enc.order, v); err != nil {
+ panic(err)
+ }
+}
+
+// Encode encodes the given values to the underyling reader. All written values
+// are aligned properly as required by the D-Bus spec.
+func (enc *encoder) Encode(vs ...interface{}) (err error) {
+ defer func() {
+ err, _ = recover().(error)
+ }()
+ for _, v := range vs {
+ enc.encode(reflect.ValueOf(v), 0)
+ }
+ return nil
+}
+
+// encode encodes the given value to the writer and panics on error. depth holds
+// the depth of the container nesting.
+func (enc *encoder) encode(v reflect.Value, depth int) {
+ enc.align(alignment(v.Type()))
+ switch v.Kind() {
+ case reflect.Uint8:
+ var b [1]byte
+ b[0] = byte(v.Uint())
+ if _, err := enc.out.Write(b[:]); err != nil {
+ panic(err)
+ }
+ enc.pos++
+ case reflect.Bool:
+ if v.Bool() {
+ enc.encode(reflect.ValueOf(uint32(1)), depth)
+ } else {
+ enc.encode(reflect.ValueOf(uint32(0)), depth)
+ }
+ case reflect.Int16:
+ enc.binwrite(int16(v.Int()))
+ enc.pos += 2
+ case reflect.Uint16:
+ enc.binwrite(uint16(v.Uint()))
+ enc.pos += 2
+ case reflect.Int, reflect.Int32:
+ enc.binwrite(int32(v.Int()))
+ enc.pos += 4
+ case reflect.Uint, reflect.Uint32:
+ enc.binwrite(uint32(v.Uint()))
+ enc.pos += 4
+ case reflect.Int64:
+ enc.binwrite(v.Int())
+ enc.pos += 8
+ case reflect.Uint64:
+ enc.binwrite(v.Uint())
+ enc.pos += 8
+ case reflect.Float64:
+ enc.binwrite(v.Float())
+ enc.pos += 8
+ case reflect.String:
+ enc.encode(reflect.ValueOf(uint32(len(v.String()))), depth)
+ b := make([]byte, v.Len()+1)
+ copy(b, v.String())
+ b[len(b)-1] = 0
+ n, err := enc.out.Write(b)
+ if err != nil {
+ panic(err)
+ }
+ enc.pos += n
+ case reflect.Ptr:
+ enc.encode(v.Elem(), depth)
+ case reflect.Slice, reflect.Array:
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ // Lookahead offset: 4 bytes for uint32 length (with alignment),
+ // plus alignment for elements.
+ n := enc.padding(0, 4) + 4
+ offset := enc.pos + n + enc.padding(n, alignment(v.Type().Elem()))
+
+ var buf bytes.Buffer
+ bufenc := newEncoderAtOffset(&buf, offset, enc.order)
+
+ for i := 0; i < v.Len(); i++ {
+ bufenc.encode(v.Index(i), depth+1)
+ }
+ enc.encode(reflect.ValueOf(uint32(buf.Len())), depth)
+ length := buf.Len()
+ enc.align(alignment(v.Type().Elem()))
+ if _, err := buf.WriteTo(enc.out); err != nil {
+ panic(err)
+ }
+ enc.pos += length
+ case reflect.Struct:
+ if depth >= 64 && v.Type() != signatureType {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ switch t := v.Type(); t {
+ case signatureType:
+ str := v.Field(0)
+ enc.encode(reflect.ValueOf(byte(str.Len())), depth+1)
+ b := make([]byte, str.Len()+1)
+ copy(b, str.String())
+ b[len(b)-1] = 0
+ n, err := enc.out.Write(b)
+ if err != nil {
+ panic(err)
+ }
+ enc.pos += n
+ case variantType:
+ variant := v.Interface().(Variant)
+ enc.encode(reflect.ValueOf(variant.sig), depth+1)
+ enc.encode(reflect.ValueOf(variant.value), depth+1)
+ default:
+ for i := 0; i < v.Type().NumField(); i++ {
+ field := t.Field(i)
+ if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
+ enc.encode(v.Field(i), depth+1)
+ }
+ }
+ }
+ case reflect.Map:
+ // Maps are arrays of structures, so they actually increase the depth by
+ // 2.
+ if depth >= 63 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ if !isKeyType(v.Type().Key()) {
+ panic(InvalidTypeError{v.Type()})
+ }
+ keys := v.MapKeys()
+ // Lookahead offset: 4 bytes for uint32 length (with alignment),
+ // plus 8-byte alignment
+ n := enc.padding(0, 4) + 4
+ offset := enc.pos + n + enc.padding(n, 8)
+
+ var buf bytes.Buffer
+ bufenc := newEncoderAtOffset(&buf, offset, enc.order)
+ for _, k := range keys {
+ bufenc.align(8)
+ bufenc.encode(k, depth+2)
+ bufenc.encode(v.MapIndex(k), depth+2)
+ }
+ enc.encode(reflect.ValueOf(uint32(buf.Len())), depth)
+ length := buf.Len()
+ enc.align(8)
+ if _, err := buf.WriteTo(enc.out); err != nil {
+ panic(err)
+ }
+ enc.pos += length
+ case reflect.Interface:
+ enc.encode(reflect.ValueOf(MakeVariant(v.Interface())), depth)
+ default:
+ panic(InvalidTypeError{v.Type()})
+ }
+}
diff --git a/vendor/github.com/godbus/dbus/export.go b/vendor/github.com/godbus/dbus/export.go
new file mode 100644
index 000000000..aae970881
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/export.go
@@ -0,0 +1,413 @@
+package dbus
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+var (
+ ErrMsgInvalidArg = Error{
+ "org.freedesktop.DBus.Error.InvalidArgs",
+ []interface{}{"Invalid type / number of args"},
+ }
+ ErrMsgNoObject = Error{
+ "org.freedesktop.DBus.Error.NoSuchObject",
+ []interface{}{"No such object"},
+ }
+ ErrMsgUnknownMethod = Error{
+ "org.freedesktop.DBus.Error.UnknownMethod",
+ []interface{}{"Unknown / invalid method"},
+ }
+ ErrMsgUnknownInterface = Error{
+ "org.freedesktop.DBus.Error.UnknownInterface",
+ []interface{}{"Object does not implement the interface"},
+ }
+)
+
+func MakeFailedError(err error) *Error {
+ return &Error{
+ "org.freedesktop.DBus.Error.Failed",
+ []interface{}{err.Error()},
+ }
+}
+
+// Sender is a type which can be used in exported methods to receive the message
+// sender.
+type Sender string
+
+func computeMethodName(name string, mapping map[string]string) string {
+ newname, ok := mapping[name]
+ if ok {
+ name = newname
+ }
+ return name
+}
+
+func getMethods(in interface{}, mapping map[string]string) map[string]reflect.Value {
+ if in == nil {
+ return nil
+ }
+ methods := make(map[string]reflect.Value)
+ val := reflect.ValueOf(in)
+ typ := val.Type()
+ for i := 0; i < typ.NumMethod(); i++ {
+ methtype := typ.Method(i)
+ method := val.Method(i)
+ t := method.Type()
+ // only track valid methods must return *Error as last arg
+ // and must be exported
+ if t.NumOut() == 0 ||
+ t.Out(t.NumOut()-1) != reflect.TypeOf(&ErrMsgInvalidArg) ||
+ methtype.PkgPath != "" {
+ continue
+ }
+ // map names while building table
+ methods[computeMethodName(methtype.Name, mapping)] = method
+ }
+ return methods
+}
+
+func standardMethodArgumentDecode(m Method, sender string, msg *Message, body []interface{}) ([]interface{}, error) {
+ pointers := make([]interface{}, m.NumArguments())
+ decode := make([]interface{}, 0, len(body))
+
+ for i := 0; i < m.NumArguments(); i++ {
+ tp := reflect.TypeOf(m.ArgumentValue(i))
+ val := reflect.New(tp)
+ pointers[i] = val.Interface()
+ if tp == reflect.TypeOf((*Sender)(nil)).Elem() {
+ val.Elem().SetString(sender)
+ } else if tp == reflect.TypeOf((*Message)(nil)).Elem() {
+ val.Elem().Set(reflect.ValueOf(*msg))
+ } else {
+ decode = append(decode, pointers[i])
+ }
+ }
+
+ if len(decode) != len(body) {
+ return nil, ErrMsgInvalidArg
+ }
+
+ if err := Store(body, decode...); err != nil {
+ return nil, ErrMsgInvalidArg
+ }
+
+ return pointers, nil
+}
+
+func (conn *Conn) decodeArguments(m Method, sender string, msg *Message) ([]interface{}, error) {
+ if decoder, ok := m.(ArgumentDecoder); ok {
+ return decoder.DecodeArguments(conn, sender, msg, msg.Body)
+ }
+ return standardMethodArgumentDecode(m, sender, msg, msg.Body)
+}
+
+// handleCall handles the given method call (i.e. looks if it's one of the
+// pre-implemented ones and searches for a corresponding handler if not).
+func (conn *Conn) handleCall(msg *Message) {
+ name := msg.Headers[FieldMember].value.(string)
+ path := msg.Headers[FieldPath].value.(ObjectPath)
+ ifaceName, _ := msg.Headers[FieldInterface].value.(string)
+ sender, hasSender := msg.Headers[FieldSender].value.(string)
+ serial := msg.serial
+ if ifaceName == "org.freedesktop.DBus.Peer" {
+ switch name {
+ case "Ping":
+ conn.sendReply(sender, serial)
+ case "GetMachineId":
+ conn.sendReply(sender, serial, conn.uuid)
+ default:
+ conn.sendError(ErrMsgUnknownMethod, sender, serial)
+ }
+ return
+ }
+ if len(name) == 0 {
+ conn.sendError(ErrMsgUnknownMethod, sender, serial)
+ }
+
+ object, ok := conn.handler.LookupObject(path)
+ if !ok {
+ conn.sendError(ErrMsgNoObject, sender, serial)
+ return
+ }
+
+ iface, exists := object.LookupInterface(ifaceName)
+ if !exists {
+ conn.sendError(ErrMsgUnknownInterface, sender, serial)
+ return
+ }
+
+ m, exists := iface.LookupMethod(name)
+ if !exists {
+ conn.sendError(ErrMsgUnknownMethod, sender, serial)
+ return
+ }
+ args, err := conn.decodeArguments(m, sender, msg)
+ if err != nil {
+ conn.sendError(err, sender, serial)
+ return
+ }
+
+ ret, err := m.Call(args...)
+ if err != nil {
+ conn.sendError(err, sender, serial)
+ return
+ }
+
+ if msg.Flags&FlagNoReplyExpected == 0 {
+ reply := new(Message)
+ reply.Type = TypeMethodReply
+ reply.serial = conn.getSerial()
+ reply.Headers = make(map[HeaderField]Variant)
+ if hasSender {
+ reply.Headers[FieldDestination] = msg.Headers[FieldSender]
+ }
+ reply.Headers[FieldReplySerial] = MakeVariant(msg.serial)
+ reply.Body = make([]interface{}, len(ret))
+ for i := 0; i < len(ret); i++ {
+ reply.Body[i] = ret[i]
+ }
+ reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...))
+ conn.outLck.RLock()
+ if !conn.closed {
+ conn.out <- reply
+ }
+ conn.outLck.RUnlock()
+ }
+}
+
+// Emit emits the given signal on the message bus. The name parameter must be
+// formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost".
+func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error {
+ if !path.IsValid() {
+ return errors.New("dbus: invalid object path")
+ }
+ i := strings.LastIndex(name, ".")
+ if i == -1 {
+ return errors.New("dbus: invalid method name")
+ }
+ iface := name[:i]
+ member := name[i+1:]
+ if !isValidMember(member) {
+ return errors.New("dbus: invalid method name")
+ }
+ if !isValidInterface(iface) {
+ return errors.New("dbus: invalid interface name")
+ }
+ msg := new(Message)
+ msg.Type = TypeSignal
+ msg.serial = conn.getSerial()
+ msg.Headers = make(map[HeaderField]Variant)
+ msg.Headers[FieldInterface] = MakeVariant(iface)
+ msg.Headers[FieldMember] = MakeVariant(member)
+ msg.Headers[FieldPath] = MakeVariant(path)
+ msg.Body = values
+ if len(values) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
+ }
+ conn.outLck.RLock()
+ defer conn.outLck.RUnlock()
+ if conn.closed {
+ return ErrClosed
+ }
+ conn.out <- msg
+ return nil
+}
+
+// Export registers the given value to be exported as an object on the
+// message bus.
+//
+// If a method call on the given path and interface is received, an exported
+// method with the same name is called with v as the receiver if the
+// parameters match and the last return value is of type *Error. If this
+// *Error is not nil, it is sent back to the caller as an error.
+// Otherwise, a method reply is sent with the other return values as its body.
+//
+// Any parameters with the special type Sender are set to the sender of the
+// dbus message when the method is called. Parameters of this type do not
+// contribute to the dbus signature of the method (i.e. the method is exposed
+// as if the parameters of type Sender were not there).
+//
+// Similarly, any parameters with the type Message are set to the raw message
+// received on the bus. Again, parameters of this type do not contribute to the
+// dbus signature of the method.
+//
+// Every method call is executed in a new goroutine, so the method may be called
+// in multiple goroutines at once.
+//
+// Method calls on the interface org.freedesktop.DBus.Peer will be automatically
+// handled for every object.
+//
+// Passing nil as the first parameter will cause conn to cease handling calls on
+// the given combination of path and interface.
+//
+// Export returns an error if path is not a valid path name.
+func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error {
+ return conn.ExportWithMap(v, nil, path, iface)
+}
+
+// ExportWithMap works exactly like Export but provides the ability to remap
+// method names (e.g. export a lower-case method).
+//
+// The keys in the map are the real method names (exported on the struct), and
+// the values are the method names to be exported on DBus.
+func (conn *Conn) ExportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error {
+ return conn.export(getMethods(v, mapping), path, iface, false)
+}
+
+// ExportSubtree works exactly like Export but registers the given value for
+// an entire subtree rather under the root path provided.
+//
+// In order to make this useful, one parameter in each of the value's exported
+// methods should be a Message, in which case it will contain the raw message
+// (allowing one to get access to the path that caused the method to be called).
+//
+// Note that more specific export paths take precedence over less specific. For
+// example, a method call using the ObjectPath /foo/bar/baz will call a method
+// exported on /foo/bar before a method exported on /foo.
+func (conn *Conn) ExportSubtree(v interface{}, path ObjectPath, iface string) error {
+ return conn.ExportSubtreeWithMap(v, nil, path, iface)
+}
+
+// ExportSubtreeWithMap works exactly like ExportSubtree but provides the
+// ability to remap method names (e.g. export a lower-case method).
+//
+// The keys in the map are the real method names (exported on the struct), and
+// the values are the method names to be exported on DBus.
+func (conn *Conn) ExportSubtreeWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error {
+ return conn.export(getMethods(v, mapping), path, iface, true)
+}
+
+// ExportMethodTable like Export registers the given methods as an object
+// on the message bus. Unlike Export the it uses a method table to define
+// the object instead of a native go object.
+//
+// The method table is a map from method name to function closure
+// representing the method. This allows an object exported on the bus to not
+// necessarily be a native go object. It can be useful for generating exposed
+// methods on the fly.
+//
+// Any non-function objects in the method table are ignored.
+func (conn *Conn) ExportMethodTable(methods map[string]interface{}, path ObjectPath, iface string) error {
+ return conn.exportMethodTable(methods, path, iface, false)
+}
+
+// Like ExportSubtree, but with the same caveats as ExportMethodTable.
+func (conn *Conn) ExportSubtreeMethodTable(methods map[string]interface{}, path ObjectPath, iface string) error {
+ return conn.exportMethodTable(methods, path, iface, true)
+}
+
+func (conn *Conn) exportMethodTable(methods map[string]interface{}, path ObjectPath, iface string, includeSubtree bool) error {
+ out := make(map[string]reflect.Value)
+ for name, method := range methods {
+ rval := reflect.ValueOf(method)
+ if rval.Kind() != reflect.Func {
+ continue
+ }
+ t := rval.Type()
+ // only track valid methods must return *Error as last arg
+ if t.NumOut() == 0 ||
+ t.Out(t.NumOut()-1) != reflect.TypeOf(&ErrMsgInvalidArg) {
+ continue
+ }
+ out[name] = rval
+ }
+ return conn.export(out, path, iface, includeSubtree)
+}
+
+func (conn *Conn) unexport(h *defaultHandler, path ObjectPath, iface string) error {
+ if h.PathExists(path) {
+ obj := h.objects[path]
+ obj.DeleteInterface(iface)
+ if len(obj.interfaces) == 0 {
+ h.DeleteObject(path)
+ }
+ }
+ return nil
+}
+
+// exportWithMap is the worker function for all exports/registrations.
+func (conn *Conn) export(methods map[string]reflect.Value, path ObjectPath, iface string, includeSubtree bool) error {
+ h, ok := conn.handler.(*defaultHandler)
+ if !ok {
+ return fmt.Errorf(
+ `dbus: export only allowed on the default hander handler have %T"`,
+ conn.handler)
+ }
+
+ if !path.IsValid() {
+ return fmt.Errorf(`dbus: Invalid path name: "%s"`, path)
+ }
+
+ // Remove a previous export if the interface is nil
+ if methods == nil {
+ return conn.unexport(h, path, iface)
+ }
+
+ // If this is the first handler for this path, make a new map to hold all
+ // handlers for this path.
+ if !h.PathExists(path) {
+ h.AddObject(path, newExportedObject())
+ }
+
+ exportedMethods := make(map[string]Method)
+ for name, method := range methods {
+ exportedMethods[name] = exportedMethod{method}
+ }
+
+ // Finally, save this handler
+ obj := h.objects[path]
+ obj.AddInterface(iface, newExportedIntf(exportedMethods, includeSubtree))
+
+ return nil
+}
+
+// ReleaseName calls org.freedesktop.DBus.ReleaseName and awaits a response.
+func (conn *Conn) ReleaseName(name string) (ReleaseNameReply, error) {
+ var r uint32
+ err := conn.busObj.Call("org.freedesktop.DBus.ReleaseName", 0, name).Store(&r)
+ if err != nil {
+ return 0, err
+ }
+ return ReleaseNameReply(r), nil
+}
+
+// RequestName calls org.freedesktop.DBus.RequestName and awaits a response.
+func (conn *Conn) RequestName(name string, flags RequestNameFlags) (RequestNameReply, error) {
+ var r uint32
+ err := conn.busObj.Call("org.freedesktop.DBus.RequestName", 0, name, flags).Store(&r)
+ if err != nil {
+ return 0, err
+ }
+ return RequestNameReply(r), nil
+}
+
+// ReleaseNameReply is the reply to a ReleaseName call.
+type ReleaseNameReply uint32
+
+const (
+ ReleaseNameReplyReleased ReleaseNameReply = 1 + iota
+ ReleaseNameReplyNonExistent
+ ReleaseNameReplyNotOwner
+)
+
+// RequestNameFlags represents the possible flags for a RequestName call.
+type RequestNameFlags uint32
+
+const (
+ NameFlagAllowReplacement RequestNameFlags = 1 << iota
+ NameFlagReplaceExisting
+ NameFlagDoNotQueue
+)
+
+// RequestNameReply is the reply to a RequestName call.
+type RequestNameReply uint32
+
+const (
+ RequestNameReplyPrimaryOwner RequestNameReply = 1 + iota
+ RequestNameReplyInQueue
+ RequestNameReplyExists
+ RequestNameReplyAlreadyOwner
+)
diff --git a/vendor/github.com/godbus/dbus/homedir.go b/vendor/github.com/godbus/dbus/homedir.go
new file mode 100644
index 000000000..0b745f931
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/homedir.go
@@ -0,0 +1,28 @@
+package dbus
+
+import (
+ "os"
+ "sync"
+)
+
+var (
+ homeDir string
+ homeDirLock sync.Mutex
+)
+
+func getHomeDir() string {
+ homeDirLock.Lock()
+ defer homeDirLock.Unlock()
+
+ if homeDir != "" {
+ return homeDir
+ }
+
+ homeDir = os.Getenv("HOME")
+ if homeDir != "" {
+ return homeDir
+ }
+
+ homeDir = lookupHomeDir()
+ return homeDir
+}
diff --git a/vendor/github.com/godbus/dbus/homedir_dynamic.go b/vendor/github.com/godbus/dbus/homedir_dynamic.go
new file mode 100644
index 000000000..2732081e7
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/homedir_dynamic.go
@@ -0,0 +1,15 @@
+// +build !static_build
+
+package dbus
+
+import (
+ "os/user"
+)
+
+func lookupHomeDir() string {
+ u, err := user.Current()
+ if err != nil {
+ return "/"
+ }
+ return u.HomeDir
+}
diff --git a/vendor/github.com/godbus/dbus/homedir_static.go b/vendor/github.com/godbus/dbus/homedir_static.go
new file mode 100644
index 000000000..b9d9cb552
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/homedir_static.go
@@ -0,0 +1,45 @@
+// +build static_build
+
+package dbus
+
+import (
+ "bufio"
+ "os"
+ "strconv"
+ "strings"
+)
+
+func lookupHomeDir() string {
+ myUid := os.Getuid()
+
+ f, err := os.Open("/etc/passwd")
+ if err != nil {
+ return "/"
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ break
+ }
+
+ line := strings.TrimSpace(s.Text())
+ if line == "" {
+ continue
+ }
+
+ parts := strings.Split(line, ":")
+
+ if len(parts) >= 6 {
+ uid, err := strconv.Atoi(parts[2])
+ if err == nil && uid == myUid {
+ return parts[5]
+ }
+ }
+ }
+
+ // Default to / if we can't get a better value
+ return "/"
+}
diff --git a/vendor/github.com/godbus/dbus/message.go b/vendor/github.com/godbus/dbus/message.go
new file mode 100644
index 000000000..6a925367e
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/message.go
@@ -0,0 +1,353 @@
+package dbus
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "io"
+ "reflect"
+ "strconv"
+)
+
+const protoVersion byte = 1
+
+// Flags represents the possible flags of a D-Bus message.
+type Flags byte
+
+const (
+ // FlagNoReplyExpected signals that the message is not expected to generate
+ // a reply. If this flag is set on outgoing messages, any possible reply
+ // will be discarded.
+ FlagNoReplyExpected Flags = 1 << iota
+ // FlagNoAutoStart signals that the message bus should not automatically
+ // start an application when handling this message.
+ FlagNoAutoStart
+ // FlagAllowInteractiveAuthorization may be set on a method call
+ // message to inform the receiving side that the caller is prepared
+ // to wait for interactive authorization, which might take a
+ // considerable time to complete. For instance, if this flag is set,
+ // it would be appropriate to query the user for passwords or
+ // confirmation via Polkit or a similar framework.
+ FlagAllowInteractiveAuthorization
+)
+
+// Type represents the possible types of a D-Bus message.
+type Type byte
+
+const (
+ TypeMethodCall Type = 1 + iota
+ TypeMethodReply
+ TypeError
+ TypeSignal
+ typeMax
+)
+
+func (t Type) String() string {
+ switch t {
+ case TypeMethodCall:
+ return "method call"
+ case TypeMethodReply:
+ return "reply"
+ case TypeError:
+ return "error"
+ case TypeSignal:
+ return "signal"
+ }
+ return "invalid"
+}
+
+// HeaderField represents the possible byte codes for the headers
+// of a D-Bus message.
+type HeaderField byte
+
+const (
+ FieldPath HeaderField = 1 + iota
+ FieldInterface
+ FieldMember
+ FieldErrorName
+ FieldReplySerial
+ FieldDestination
+ FieldSender
+ FieldSignature
+ FieldUnixFDs
+ fieldMax
+)
+
+// An InvalidMessageError describes the reason why a D-Bus message is regarded as
+// invalid.
+type InvalidMessageError string
+
+func (e InvalidMessageError) Error() string {
+ return "dbus: invalid message: " + string(e)
+}
+
+// fieldType are the types of the various header fields.
+var fieldTypes = [fieldMax]reflect.Type{
+ FieldPath: objectPathType,
+ FieldInterface: stringType,
+ FieldMember: stringType,
+ FieldErrorName: stringType,
+ FieldReplySerial: uint32Type,
+ FieldDestination: stringType,
+ FieldSender: stringType,
+ FieldSignature: signatureType,
+ FieldUnixFDs: uint32Type,
+}
+
+// requiredFields lists the header fields that are required by the different
+// message types.
+var requiredFields = [typeMax][]HeaderField{
+ TypeMethodCall: {FieldPath, FieldMember},
+ TypeMethodReply: {FieldReplySerial},
+ TypeError: {FieldErrorName, FieldReplySerial},
+ TypeSignal: {FieldPath, FieldInterface, FieldMember},
+}
+
+// Message represents a single D-Bus message.
+type Message struct {
+ Type
+ Flags
+ Headers map[HeaderField]Variant
+ Body []interface{}
+
+ serial uint32
+}
+
+type header struct {
+ Field byte
+ Variant
+}
+
+// DecodeMessage tries to decode a single message in the D-Bus wire format
+// from the given reader. The byte order is figured out from the first byte.
+// The possibly returned error can be an error of the underlying reader, an
+// InvalidMessageError or a FormatError.
+func DecodeMessage(rd io.Reader) (msg *Message, err error) {
+ var order binary.ByteOrder
+ var hlength, length uint32
+ var typ, flags, proto byte
+ var headers []header
+
+ b := make([]byte, 1)
+ _, err = rd.Read(b)
+ if err != nil {
+ return
+ }
+ switch b[0] {
+ case 'l':
+ order = binary.LittleEndian
+ case 'B':
+ order = binary.BigEndian
+ default:
+ return nil, InvalidMessageError("invalid byte order")
+ }
+
+ dec := newDecoder(rd, order)
+ dec.pos = 1
+
+ msg = new(Message)
+ vs, err := dec.Decode(Signature{"yyyuu"})
+ if err != nil {
+ return nil, err
+ }
+ if err = Store(vs, &typ, &flags, &proto, &length, &msg.serial); err != nil {
+ return nil, err
+ }
+ msg.Type = Type(typ)
+ msg.Flags = Flags(flags)
+
+ // get the header length separately because we need it later
+ b = make([]byte, 4)
+ _, err = io.ReadFull(rd, b)
+ if err != nil {
+ return nil, err
+ }
+ binary.Read(bytes.NewBuffer(b), order, &hlength)
+ if hlength+length+16 > 1<<27 {
+ return nil, InvalidMessageError("message is too long")
+ }
+ dec = newDecoder(io.MultiReader(bytes.NewBuffer(b), rd), order)
+ dec.pos = 12
+ vs, err = dec.Decode(Signature{"a(yv)"})
+ if err != nil {
+ return nil, err
+ }
+ if err = Store(vs, &headers); err != nil {
+ return nil, err
+ }
+
+ msg.Headers = make(map[HeaderField]Variant)
+ for _, v := range headers {
+ msg.Headers[HeaderField(v.Field)] = v.Variant
+ }
+
+ dec.align(8)
+ body := make([]byte, int(length))
+ if length != 0 {
+ _, err := io.ReadFull(rd, body)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if err = msg.IsValid(); err != nil {
+ return nil, err
+ }
+ sig, _ := msg.Headers[FieldSignature].value.(Signature)
+ if sig.str != "" {
+ buf := bytes.NewBuffer(body)
+ dec = newDecoder(buf, order)
+ vs, err := dec.Decode(sig)
+ if err != nil {
+ return nil, err
+ }
+ msg.Body = vs
+ }
+
+ return
+}
+
+// EncodeTo encodes and sends a message to the given writer. The byte order must
+// be either binary.LittleEndian or binary.BigEndian. If the message is not
+// valid or an error occurs when writing, an error is returned.
+func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error {
+ if err := msg.IsValid(); err != nil {
+ return err
+ }
+ var vs [7]interface{}
+ switch order {
+ case binary.LittleEndian:
+ vs[0] = byte('l')
+ case binary.BigEndian:
+ vs[0] = byte('B')
+ default:
+ return errors.New("dbus: invalid byte order")
+ }
+ body := new(bytes.Buffer)
+ enc := newEncoder(body, order)
+ if len(msg.Body) != 0 {
+ enc.Encode(msg.Body...)
+ }
+ vs[1] = msg.Type
+ vs[2] = msg.Flags
+ vs[3] = protoVersion
+ vs[4] = uint32(len(body.Bytes()))
+ vs[5] = msg.serial
+ headers := make([]header, 0, len(msg.Headers))
+ for k, v := range msg.Headers {
+ headers = append(headers, header{byte(k), v})
+ }
+ vs[6] = headers
+ var buf bytes.Buffer
+ enc = newEncoder(&buf, order)
+ enc.Encode(vs[:]...)
+ enc.align(8)
+ body.WriteTo(&buf)
+ if buf.Len() > 1<<27 {
+ return InvalidMessageError("message is too long")
+ }
+ if _, err := buf.WriteTo(out); err != nil {
+ return err
+ }
+ return nil
+}
+
+// IsValid checks whether msg is a valid message and returns an
+// InvalidMessageError if it is not.
+func (msg *Message) IsValid() error {
+ if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected|FlagAllowInteractiveAuthorization) != 0 {
+ return InvalidMessageError("invalid flags")
+ }
+ if msg.Type == 0 || msg.Type >= typeMax {
+ return InvalidMessageError("invalid message type")
+ }
+ for k, v := range msg.Headers {
+ if k == 0 || k >= fieldMax {
+ return InvalidMessageError("invalid header")
+ }
+ if reflect.TypeOf(v.value) != fieldTypes[k] {
+ return InvalidMessageError("invalid type of header field")
+ }
+ }
+ for _, v := range requiredFields[msg.Type] {
+ if _, ok := msg.Headers[v]; !ok {
+ return InvalidMessageError("missing required header")
+ }
+ }
+ if path, ok := msg.Headers[FieldPath]; ok {
+ if !path.value.(ObjectPath).IsValid() {
+ return InvalidMessageError("invalid path name")
+ }
+ }
+ if iface, ok := msg.Headers[FieldInterface]; ok {
+ if !isValidInterface(iface.value.(string)) {
+ return InvalidMessageError("invalid interface name")
+ }
+ }
+ if member, ok := msg.Headers[FieldMember]; ok {
+ if !isValidMember(member.value.(string)) {
+ return InvalidMessageError("invalid member name")
+ }
+ }
+ if errname, ok := msg.Headers[FieldErrorName]; ok {
+ if !isValidInterface(errname.value.(string)) {
+ return InvalidMessageError("invalid error name")
+ }
+ }
+ if len(msg.Body) != 0 {
+ if _, ok := msg.Headers[FieldSignature]; !ok {
+ return InvalidMessageError("missing signature")
+ }
+ }
+ return nil
+}
+
+// Serial returns the message's serial number. The returned value is only valid
+// for messages received by eavesdropping.
+func (msg *Message) Serial() uint32 {
+ return msg.serial
+}
+
+// String returns a string representation of a message similar to the format of
+// dbus-monitor.
+func (msg *Message) String() string {
+ if err := msg.IsValid(); err != nil {
+ return "<invalid>"
+ }
+ s := msg.Type.String()
+ if v, ok := msg.Headers[FieldSender]; ok {
+ s += " from " + v.value.(string)
+ }
+ if v, ok := msg.Headers[FieldDestination]; ok {
+ s += " to " + v.value.(string)
+ }
+ s += " serial " + strconv.FormatUint(uint64(msg.serial), 10)
+ if v, ok := msg.Headers[FieldReplySerial]; ok {
+ s += " reply_serial " + strconv.FormatUint(uint64(v.value.(uint32)), 10)
+ }
+ if v, ok := msg.Headers[FieldUnixFDs]; ok {
+ s += " unixfds " + strconv.FormatUint(uint64(v.value.(uint32)), 10)
+ }
+ if v, ok := msg.Headers[FieldPath]; ok {
+ s += " path " + string(v.value.(ObjectPath))
+ }
+ if v, ok := msg.Headers[FieldInterface]; ok {
+ s += " interface " + v.value.(string)
+ }
+ if v, ok := msg.Headers[FieldErrorName]; ok {
+ s += " error " + v.value.(string)
+ }
+ if v, ok := msg.Headers[FieldMember]; ok {
+ s += " member " + v.value.(string)
+ }
+ if len(msg.Body) != 0 {
+ s += "\n"
+ }
+ for i, v := range msg.Body {
+ s += " " + MakeVariant(v).String()
+ if i != len(msg.Body)-1 {
+ s += "\n"
+ }
+ }
+ return s
+}
diff --git a/vendor/github.com/godbus/dbus/object.go b/vendor/github.com/godbus/dbus/object.go
new file mode 100644
index 000000000..6d95583d7
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/object.go
@@ -0,0 +1,147 @@
+package dbus
+
+import (
+ "errors"
+ "strings"
+)
+
+// BusObject is the interface of a remote object on which methods can be
+// invoked.
+type BusObject interface {
+ Call(method string, flags Flags, args ...interface{}) *Call
+ Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call
+ GetProperty(p string) (Variant, error)
+ Destination() string
+ Path() ObjectPath
+}
+
+// Object represents a remote object on which methods can be invoked.
+type Object struct {
+ conn *Conn
+ dest string
+ path ObjectPath
+}
+
+// Call calls a method with (*Object).Go and waits for its reply.
+func (o *Object) Call(method string, flags Flags, args ...interface{}) *Call {
+ return <-o.Go(method, flags, make(chan *Call, 1), args...).Done
+}
+
+// AddMatchSignal subscribes BusObject to signals from specified interface and
+// method (member).
+func (o *Object) AddMatchSignal(iface, member string) *Call {
+ return o.Call(
+ "org.freedesktop.DBus.AddMatch",
+ 0,
+ "type='signal',interface='"+iface+"',member='"+member+"'",
+ )
+}
+
+// Go calls a method with the given arguments asynchronously. It returns a
+// Call structure representing this method call. The passed channel will
+// return the same value once the call is done. If ch is nil, a new channel
+// will be allocated. Otherwise, ch has to be buffered or Go will panic.
+//
+// If the flags include FlagNoReplyExpected, ch is ignored and a Call structure
+// is returned with any error in Err and a closed channel in Done containing
+// the returned Call as it's one entry.
+//
+// If the method parameter contains a dot ('.'), the part before the last dot
+// specifies the interface on which the method is called.
+func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call {
+ iface := ""
+ i := strings.LastIndex(method, ".")
+ if i != -1 {
+ iface = method[:i]
+ }
+ method = method[i+1:]
+ msg := new(Message)
+ msg.Type = TypeMethodCall
+ msg.serial = o.conn.getSerial()
+ msg.Flags = flags & (FlagNoAutoStart | FlagNoReplyExpected)
+ msg.Headers = make(map[HeaderField]Variant)
+ msg.Headers[FieldPath] = MakeVariant(o.path)
+ msg.Headers[FieldDestination] = MakeVariant(o.dest)
+ msg.Headers[FieldMember] = MakeVariant(method)
+ if iface != "" {
+ msg.Headers[FieldInterface] = MakeVariant(iface)
+ }
+ msg.Body = args
+ if len(args) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(args...))
+ }
+ if msg.Flags&FlagNoReplyExpected == 0 {
+ if ch == nil {
+ ch = make(chan *Call, 10)
+ } else if cap(ch) == 0 {
+ panic("dbus: unbuffered channel passed to (*Object).Go")
+ }
+ call := &Call{
+ Destination: o.dest,
+ Path: o.path,
+ Method: method,
+ Args: args,
+ Done: ch,
+ }
+ o.conn.callsLck.Lock()
+ o.conn.calls[msg.serial] = call
+ o.conn.callsLck.Unlock()
+ o.conn.outLck.RLock()
+ if o.conn.closed {
+ call.Err = ErrClosed
+ call.Done <- call
+ } else {
+ o.conn.out <- msg
+ }
+ o.conn.outLck.RUnlock()
+ return call
+ }
+ o.conn.outLck.RLock()
+ defer o.conn.outLck.RUnlock()
+ done := make(chan *Call, 1)
+ call := &Call{
+ Err: nil,
+ Done: done,
+ }
+ defer func() {
+ call.Done <- call
+ close(done)
+ }()
+ if o.conn.closed {
+ call.Err = ErrClosed
+ return call
+ }
+ o.conn.out <- msg
+ return call
+}
+
+// GetProperty calls org.freedesktop.DBus.Properties.GetProperty on the given
+// object. The property name must be given in interface.member notation.
+func (o *Object) GetProperty(p string) (Variant, error) {
+ idx := strings.LastIndex(p, ".")
+ if idx == -1 || idx+1 == len(p) {
+ return Variant{}, errors.New("dbus: invalid property " + p)
+ }
+
+ iface := p[:idx]
+ prop := p[idx+1:]
+
+ result := Variant{}
+ err := o.Call("org.freedesktop.DBus.Properties.Get", 0, iface, prop).Store(&result)
+
+ if err != nil {
+ return Variant{}, err
+ }
+
+ return result, nil
+}
+
+// Destination returns the destination that calls on (o *Object) are sent to.
+func (o *Object) Destination() string {
+ return o.dest
+}
+
+// Path returns the path that calls on (o *Object") are sent to.
+func (o *Object) Path() ObjectPath {
+ return o.path
+}
diff --git a/vendor/github.com/godbus/dbus/server_interfaces.go b/vendor/github.com/godbus/dbus/server_interfaces.go
new file mode 100644
index 000000000..091948aef
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/server_interfaces.go
@@ -0,0 +1,89 @@
+package dbus
+
+// Terminator allows a handler to implement a shutdown mechanism that
+// is called when the connection terminates.
+type Terminator interface {
+ Terminate()
+}
+
+// Handler is the representation of a D-Bus Application.
+//
+// The Handler must have a way to lookup objects given
+// an ObjectPath. The returned object must implement the
+// ServerObject interface.
+type Handler interface {
+ LookupObject(path ObjectPath) (ServerObject, bool)
+}
+
+// ServerObject is the representation of an D-Bus Object.
+//
+// Objects are registered at a path for a given Handler.
+// The Objects implement D-Bus interfaces. The semantics
+// of Interface lookup is up to the implementation of
+// the ServerObject. The ServerObject implementation may
+// choose to implement empty string as a valid interface
+// represeting all methods or not per the D-Bus specification.
+type ServerObject interface {
+ LookupInterface(name string) (Interface, bool)
+}
+
+// An Interface is the representation of a D-Bus Interface.
+//
+// Interfaces are a grouping of methods implemented by the Objects.
+// Interfaces are responsible for routing method calls.
+type Interface interface {
+ LookupMethod(name string) (Method, bool)
+}
+
+// A Method represents the exposed methods on D-Bus.
+type Method interface {
+ // Call requires that all arguments are decoded before being passed to it.
+ Call(args ...interface{}) ([]interface{}, error)
+ NumArguments() int
+ NumReturns() int
+ // ArgumentValue returns a representative value for the argument at position
+ // it should be of the proper type. reflect.Zero would be a good mechanism
+ // to use for this Value.
+ ArgumentValue(position int) interface{}
+ // ReturnValue returns a representative value for the return at position
+ // it should be of the proper type. reflect.Zero would be a good mechanism
+ // to use for this Value.
+ ReturnValue(position int) interface{}
+}
+
+// An Argument Decoder can decode arguments using the non-standard mechanism
+//
+// If a method implements this interface then the non-standard
+// decoder will be used.
+//
+// Method arguments must be decoded from the message.
+// The mechanism for doing this will vary based on the
+// implementation of the method. A normal approach is provided
+// as part of this library, but may be replaced with
+// any other decoding scheme.
+type ArgumentDecoder interface {
+ // To decode the arguments of a method the sender and message are
+ // provided incase the semantics of the implementer provides access
+ // to these as part of the method invocation.
+ DecodeArguments(conn *Conn, sender string, msg *Message, args []interface{}) ([]interface{}, error)
+}
+
+// A SignalHandler is responsible for delivering a signal.
+//
+// Signal delivery may be changed from the default channel
+// based approach by Handlers implementing the SignalHandler
+// interface.
+type SignalHandler interface {
+ DeliverSignal(iface, name string, signal *Signal)
+}
+
+// A DBusError is used to convert a generic object to a D-Bus error.
+//
+// Any custom error mechanism may implement this interface to provide
+// a custom encoding of the error on D-Bus. By default if a normal
+// error is returned, it will be encoded as the generic
+// "org.freedesktop.DBus.Error.Failed" error. By implementing this
+// interface as well a custom encoding may be provided.
+type DBusError interface {
+ DBusError() (string, []interface{})
+}
diff --git a/vendor/github.com/godbus/dbus/sig.go b/vendor/github.com/godbus/dbus/sig.go
new file mode 100644
index 000000000..c1b809202
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/sig.go
@@ -0,0 +1,259 @@
+package dbus
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+var sigToType = map[byte]reflect.Type{
+ 'y': byteType,
+ 'b': boolType,
+ 'n': int16Type,
+ 'q': uint16Type,
+ 'i': int32Type,
+ 'u': uint32Type,
+ 'x': int64Type,
+ 't': uint64Type,
+ 'd': float64Type,
+ 's': stringType,
+ 'g': signatureType,
+ 'o': objectPathType,
+ 'v': variantType,
+ 'h': unixFDIndexType,
+}
+
+// Signature represents a correct type signature as specified by the D-Bus
+// specification. The zero value represents the empty signature, "".
+type Signature struct {
+ str string
+}
+
+// SignatureOf returns the concatenation of all the signatures of the given
+// values. It panics if one of them is not representable in D-Bus.
+func SignatureOf(vs ...interface{}) Signature {
+ var s string
+ for _, v := range vs {
+ s += getSignature(reflect.TypeOf(v))
+ }
+ return Signature{s}
+}
+
+// SignatureOfType returns the signature of the given type. It panics if the
+// type is not representable in D-Bus.
+func SignatureOfType(t reflect.Type) Signature {
+ return Signature{getSignature(t)}
+}
+
+// getSignature returns the signature of the given type and panics on unknown types.
+func getSignature(t reflect.Type) string {
+ // handle simple types first
+ switch t.Kind() {
+ case reflect.Uint8:
+ return "y"
+ case reflect.Bool:
+ return "b"
+ case reflect.Int16:
+ return "n"
+ case reflect.Uint16:
+ return "q"
+ case reflect.Int, reflect.Int32:
+ if t == unixFDType {
+ return "h"
+ }
+ return "i"
+ case reflect.Uint, reflect.Uint32:
+ if t == unixFDIndexType {
+ return "h"
+ }
+ return "u"
+ case reflect.Int64:
+ return "x"
+ case reflect.Uint64:
+ return "t"
+ case reflect.Float64:
+ return "d"
+ case reflect.Ptr:
+ return getSignature(t.Elem())
+ case reflect.String:
+ if t == objectPathType {
+ return "o"
+ }
+ return "s"
+ case reflect.Struct:
+ if t == variantType {
+ return "v"
+ } else if t == signatureType {
+ return "g"
+ }
+ var s string
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
+ s += getSignature(t.Field(i).Type)
+ }
+ }
+ return "(" + s + ")"
+ case reflect.Array, reflect.Slice:
+ return "a" + getSignature(t.Elem())
+ case reflect.Map:
+ if !isKeyType(t.Key()) {
+ panic(InvalidTypeError{t})
+ }
+ return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}"
+ case reflect.Interface:
+ return "v"
+ }
+ panic(InvalidTypeError{t})
+}
+
+// ParseSignature returns the signature represented by this string, or a
+// SignatureError if the string is not a valid signature.
+func ParseSignature(s string) (sig Signature, err error) {
+ if len(s) == 0 {
+ return
+ }
+ if len(s) > 255 {
+ return Signature{""}, SignatureError{s, "too long"}
+ }
+ sig.str = s
+ for err == nil && len(s) != 0 {
+ err, s = validSingle(s, 0)
+ }
+ if err != nil {
+ sig = Signature{""}
+ }
+
+ return
+}
+
+// ParseSignatureMust behaves like ParseSignature, except that it panics if s
+// is not valid.
+func ParseSignatureMust(s string) Signature {
+ sig, err := ParseSignature(s)
+ if err != nil {
+ panic(err)
+ }
+ return sig
+}
+
+// Empty retruns whether the signature is the empty signature.
+func (s Signature) Empty() bool {
+ return s.str == ""
+}
+
+// Single returns whether the signature represents a single, complete type.
+func (s Signature) Single() bool {
+ err, r := validSingle(s.str, 0)
+ return err != nil && r == ""
+}
+
+// String returns the signature's string representation.
+func (s Signature) String() string {
+ return s.str
+}
+
+// A SignatureError indicates that a signature passed to a function or received
+// on a connection is not a valid signature.
+type SignatureError struct {
+ Sig string
+ Reason string
+}
+
+func (e SignatureError) Error() string {
+ return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason)
+}
+
+// Try to read a single type from this string. If it was successful, err is nil
+// and rem is the remaining unparsed part. Otherwise, err is a non-nil
+// SignatureError and rem is "". depth is the current recursion depth which may
+// not be greater than 64 and should be given as 0 on the first call.
+func validSingle(s string, depth int) (err error, rem string) {
+ if s == "" {
+ return SignatureError{Sig: s, Reason: "empty signature"}, ""
+ }
+ if depth > 64 {
+ return SignatureError{Sig: s, Reason: "container nesting too deep"}, ""
+ }
+ switch s[0] {
+ case 'y', 'b', 'n', 'q', 'i', 'u', 'x', 't', 'd', 's', 'g', 'o', 'v', 'h':
+ return nil, s[1:]
+ case 'a':
+ if len(s) > 1 && s[1] == '{' {
+ i := findMatching(s[1:], '{', '}')
+ if i == -1 {
+ return SignatureError{Sig: s, Reason: "unmatched '{'"}, ""
+ }
+ i++
+ rem = s[i+1:]
+ s = s[2:i]
+ if err, _ = validSingle(s[:1], depth+1); err != nil {
+ return err, ""
+ }
+ err, nr := validSingle(s[1:], depth+1)
+ if err != nil {
+ return err, ""
+ }
+ if nr != "" {
+ return SignatureError{Sig: s, Reason: "too many types in dict"}, ""
+ }
+ return nil, rem
+ }
+ return validSingle(s[1:], depth+1)
+ case '(':
+ i := findMatching(s, '(', ')')
+ if i == -1 {
+ return SignatureError{Sig: s, Reason: "unmatched ')'"}, ""
+ }
+ rem = s[i+1:]
+ s = s[1:i]
+ for err == nil && s != "" {
+ err, s = validSingle(s, depth+1)
+ }
+ if err != nil {
+ rem = ""
+ }
+ return
+ }
+ return SignatureError{Sig: s, Reason: "invalid type character"}, ""
+}
+
+func findMatching(s string, left, right rune) int {
+ n := 0
+ for i, v := range s {
+ if v == left {
+ n++
+ } else if v == right {
+ n--
+ }
+ if n == 0 {
+ return i
+ }
+ }
+ return -1
+}
+
+// typeFor returns the type of the given signature. It ignores any left over
+// characters and panics if s doesn't start with a valid type signature.
+func typeFor(s string) (t reflect.Type) {
+ err, _ := validSingle(s, 0)
+ if err != nil {
+ panic(err)
+ }
+
+ if t, ok := sigToType[s[0]]; ok {
+ return t
+ }
+ switch s[0] {
+ case 'a':
+ if s[1] == '{' {
+ i := strings.LastIndex(s, "}")
+ t = reflect.MapOf(sigToType[s[2]], typeFor(s[3:i]))
+ } else {
+ t = reflect.SliceOf(typeFor(s[1:]))
+ }
+ case '(':
+ t = interfacesType
+ }
+ return
+}
diff --git a/vendor/github.com/godbus/dbus/transport_darwin.go b/vendor/github.com/godbus/dbus/transport_darwin.go
new file mode 100644
index 000000000..1bba0d6bf
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/transport_darwin.go
@@ -0,0 +1,6 @@
+package dbus
+
+func (t *unixTransport) SendNullByte() error {
+ _, err := t.Write([]byte{0})
+ return err
+}
diff --git a/vendor/github.com/godbus/dbus/transport_generic.go b/vendor/github.com/godbus/dbus/transport_generic.go
new file mode 100644
index 000000000..3fad859a6
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/transport_generic.go
@@ -0,0 +1,50 @@
+package dbus
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+ "unsafe"
+)
+
+var nativeEndian binary.ByteOrder
+
+func detectEndianness() binary.ByteOrder {
+ var x uint32 = 0x01020304
+ if *(*byte)(unsafe.Pointer(&x)) == 0x01 {
+ return binary.BigEndian
+ }
+ return binary.LittleEndian
+}
+
+func init() {
+ nativeEndian = detectEndianness()
+}
+
+type genericTransport struct {
+ io.ReadWriteCloser
+}
+
+func (t genericTransport) SendNullByte() error {
+ _, err := t.Write([]byte{0})
+ return err
+}
+
+func (t genericTransport) SupportsUnixFDs() bool {
+ return false
+}
+
+func (t genericTransport) EnableUnixFDs() {}
+
+func (t genericTransport) ReadMessage() (*Message, error) {
+ return DecodeMessage(t)
+}
+
+func (t genericTransport) SendMessage(msg *Message) error {
+ for _, v := range msg.Body {
+ if _, ok := v.(UnixFD); ok {
+ return errors.New("dbus: unix fd passing not enabled")
+ }
+ }
+ return msg.EncodeTo(t, nativeEndian)
+}
diff --git a/vendor/github.com/godbus/dbus/transport_tcp.go b/vendor/github.com/godbus/dbus/transport_tcp.go
new file mode 100644
index 000000000..dd1c8e59c
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/transport_tcp.go
@@ -0,0 +1,43 @@
+//+build !windows
+
+package dbus
+
+import (
+ "errors"
+ "net"
+)
+
+func init() {
+ transports["tcp"] = newTcpTransport
+}
+
+func tcpFamily(keys string) (string, error) {
+ switch getKey(keys, "family") {
+ case "":
+ return "tcp", nil
+ case "ipv4":
+ return "tcp4", nil
+ case "ipv6":
+ return "tcp6", nil
+ default:
+ return "", errors.New("dbus: invalid tcp family (must be ipv4 or ipv6)")
+ }
+}
+
+func newTcpTransport(keys string) (transport, error) {
+ host := getKey(keys, "host")
+ port := getKey(keys, "port")
+ if host == "" || port == "" {
+ return nil, errors.New("dbus: unsupported address (must set host and port)")
+ }
+
+ protocol, err := tcpFamily(keys)
+ if err != nil {
+ return nil, err
+ }
+ socket, err := net.Dial(protocol, net.JoinHostPort(host, port))
+ if err != nil {
+ return nil, err
+ }
+ return NewConn(socket)
+}
diff --git a/vendor/github.com/godbus/dbus/transport_unix.go b/vendor/github.com/godbus/dbus/transport_unix.go
new file mode 100644
index 000000000..e56d5ca90
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/transport_unix.go
@@ -0,0 +1,196 @@
+//+build !windows,!solaris
+
+package dbus
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "io"
+ "net"
+ "syscall"
+)
+
+type oobReader struct {
+ conn *net.UnixConn
+ oob []byte
+ buf [4096]byte
+}
+
+func (o *oobReader) Read(b []byte) (n int, err error) {
+ n, oobn, flags, _, err := o.conn.ReadMsgUnix(b, o.buf[:])
+ if err != nil {
+ return n, err
+ }
+ if flags&syscall.MSG_CTRUNC != 0 {
+ return n, errors.New("dbus: control data truncated (too many fds received)")
+ }
+ o.oob = append(o.oob, o.buf[:oobn]...)
+ return n, nil
+}
+
+type unixTransport struct {
+ *net.UnixConn
+ hasUnixFDs bool
+}
+
+func newUnixTransport(keys string) (transport, error) {
+ var err error
+
+ t := new(unixTransport)
+ abstract := getKey(keys, "abstract")
+ path := getKey(keys, "path")
+ switch {
+ case abstract == "" && path == "":
+ return nil, errors.New("dbus: invalid address (neither path nor abstract set)")
+ case abstract != "" && path == "":
+ t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: "@" + abstract, Net: "unix"})
+ if err != nil {
+ return nil, err
+ }
+ return t, nil
+ case abstract == "" && path != "":
+ t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: path, Net: "unix"})
+ if err != nil {
+ return nil, err
+ }
+ return t, nil
+ default:
+ return nil, errors.New("dbus: invalid address (both path and abstract set)")
+ }
+}
+
+func init() {
+ transports["unix"] = newUnixTransport
+}
+
+func (t *unixTransport) EnableUnixFDs() {
+ t.hasUnixFDs = true
+}
+
+func (t *unixTransport) ReadMessage() (*Message, error) {
+ var (
+ blen, hlen uint32
+ csheader [16]byte
+ headers []header
+ order binary.ByteOrder
+ unixfds uint32
+ )
+ // To be sure that all bytes of out-of-band data are read, we use a special
+ // reader that uses ReadUnix on the underlying connection instead of Read
+ // and gathers the out-of-band data in a buffer.
+ rd := &oobReader{conn: t.UnixConn}
+ // read the first 16 bytes (the part of the header that has a constant size),
+ // from which we can figure out the length of the rest of the message
+ if _, err := io.ReadFull(rd, csheader[:]); err != nil {
+ return nil, err
+ }
+ switch csheader[0] {
+ case 'l':
+ order = binary.LittleEndian
+ case 'B':
+ order = binary.BigEndian
+ default:
+ return nil, InvalidMessageError("invalid byte order")
+ }
+ // csheader[4:8] -> length of message body, csheader[12:16] -> length of
+ // header fields (without alignment)
+ binary.Read(bytes.NewBuffer(csheader[4:8]), order, &blen)
+ binary.Read(bytes.NewBuffer(csheader[12:]), order, &hlen)
+ if hlen%8 != 0 {
+ hlen += 8 - (hlen % 8)
+ }
+
+ // decode headers and look for unix fds
+ headerdata := make([]byte, hlen+4)
+ copy(headerdata, csheader[12:])
+ if _, err := io.ReadFull(t, headerdata[4:]); err != nil {
+ return nil, err
+ }
+ dec := newDecoder(bytes.NewBuffer(headerdata), order)
+ dec.pos = 12
+ vs, err := dec.Decode(Signature{"a(yv)"})
+ if err != nil {
+ return nil, err
+ }
+ Store(vs, &headers)
+ for _, v := range headers {
+ if v.Field == byte(FieldUnixFDs) {
+ unixfds, _ = v.Variant.value.(uint32)
+ }
+ }
+ all := make([]byte, 16+hlen+blen)
+ copy(all, csheader[:])
+ copy(all[16:], headerdata[4:])
+ if _, err := io.ReadFull(rd, all[16+hlen:]); err != nil {
+ return nil, err
+ }
+ if unixfds != 0 {
+ if !t.hasUnixFDs {
+ return nil, errors.New("dbus: got unix fds on unsupported transport")
+ }
+ // read the fds from the OOB data
+ scms, err := syscall.ParseSocketControlMessage(rd.oob)
+ if err != nil {
+ return nil, err
+ }
+ if len(scms) != 1 {
+ return nil, errors.New("dbus: received more than one socket control message")
+ }
+ fds, err := syscall.ParseUnixRights(&scms[0])
+ if err != nil {
+ return nil, err
+ }
+ msg, err := DecodeMessage(bytes.NewBuffer(all))
+ if err != nil {
+ return nil, err
+ }
+ // substitute the values in the message body (which are indices for the
+ // array receiver via OOB) with the actual values
+ for i, v := range msg.Body {
+ if j, ok := v.(UnixFDIndex); ok {
+ if uint32(j) >= unixfds {
+ return nil, InvalidMessageError("invalid index for unix fd")
+ }
+ msg.Body[i] = UnixFD(fds[j])
+ }
+ }
+ return msg, nil
+ }
+ return DecodeMessage(bytes.NewBuffer(all))
+}
+
+func (t *unixTransport) SendMessage(msg *Message) error {
+ fds := make([]int, 0)
+ for i, v := range msg.Body {
+ if fd, ok := v.(UnixFD); ok {
+ msg.Body[i] = UnixFDIndex(len(fds))
+ fds = append(fds, int(fd))
+ }
+ }
+ if len(fds) != 0 {
+ if !t.hasUnixFDs {
+ return errors.New("dbus: unix fd passing not enabled")
+ }
+ msg.Headers[FieldUnixFDs] = MakeVariant(uint32(len(fds)))
+ oob := syscall.UnixRights(fds...)
+ buf := new(bytes.Buffer)
+ msg.EncodeTo(buf, nativeEndian)
+ n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil)
+ if err != nil {
+ return err
+ }
+ if n != buf.Len() || oobn != len(oob) {
+ return io.ErrShortWrite
+ }
+ } else {
+ if err := msg.EncodeTo(t, nativeEndian); err != nil {
+ return nil
+ }
+ }
+ return nil
+}
+
+func (t *unixTransport) SupportsUnixFDs() bool {
+ return true
+}
diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go b/vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go
new file mode 100644
index 000000000..a8cd39395
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go
@@ -0,0 +1,95 @@
+// The UnixCredentials system call is currently only implemented on Linux
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// https://golang.org/s/go1.4-syscall
+// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys
+
+// Local implementation of the UnixCredentials system call for DragonFly BSD
+
+package dbus
+
+/*
+#include <sys/ucred.h>
+*/
+import "C"
+
+import (
+ "io"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+// http://golang.org/src/pkg/syscall/ztypes_linux_amd64.go
+// http://golang.org/src/pkg/syscall/ztypes_dragonfly_amd64.go
+type Ucred struct {
+ Pid int32
+ Uid uint32
+ Gid uint32
+}
+
+// http://golang.org/src/pkg/syscall/types_linux.go
+// http://golang.org/src/pkg/syscall/types_dragonfly.go
+// https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/sys/sys/ucred.h
+const (
+ SizeofUcred = C.sizeof_struct_ucred
+)
+
+// http://golang.org/src/pkg/syscall/sockcmsg_unix.go
+func cmsgAlignOf(salen int) int {
+ // From http://golang.org/src/pkg/syscall/sockcmsg_unix.go
+ //salign := sizeofPtr
+ // NOTE: It seems like 64-bit Darwin and DragonFly BSD kernels
+ // still require 32-bit aligned access to network subsystem.
+ //if darwin64Bit || dragonfly64Bit {
+ // salign = 4
+ //}
+ salign := 4
+ return (salen + salign - 1) & ^(salign - 1)
+}
+
+// http://golang.org/src/pkg/syscall/sockcmsg_unix.go
+func cmsgData(h *syscall.Cmsghdr) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(syscall.SizeofCmsghdr)))
+}
+
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// UnixCredentials encodes credentials into a socket control message
+// for sending to another process. This can be used for
+// authentication.
+func UnixCredentials(ucred *Ucred) []byte {
+ b := make([]byte, syscall.CmsgSpace(SizeofUcred))
+ h := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+ h.Level = syscall.SOL_SOCKET
+ h.Type = syscall.SCM_CREDS
+ h.SetLen(syscall.CmsgLen(SizeofUcred))
+ *((*Ucred)(cmsgData(h))) = *ucred
+ return b
+}
+
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// ParseUnixCredentials decodes a socket control message that contains
+// credentials in a Ucred structure. To receive such a message, the
+// SO_PASSCRED option must be enabled on the socket.
+func ParseUnixCredentials(m *syscall.SocketControlMessage) (*Ucred, error) {
+ if m.Header.Level != syscall.SOL_SOCKET {
+ return nil, syscall.EINVAL
+ }
+ if m.Header.Type != syscall.SCM_CREDS {
+ return nil, syscall.EINVAL
+ }
+ ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0]))
+ return &ucred, nil
+}
+
+func (t *unixTransport) SendNullByte() error {
+ ucred := &Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())}
+ b := UnixCredentials(ucred)
+ _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil)
+ if err != nil {
+ return err
+ }
+ if oobn != len(b) {
+ return io.ErrShortWrite
+ }
+ return nil
+}
diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_freebsd.go b/vendor/github.com/godbus/dbus/transport_unixcred_freebsd.go
new file mode 100644
index 000000000..0fc5b9273
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/transport_unixcred_freebsd.go
@@ -0,0 +1,91 @@
+// The UnixCredentials system call is currently only implemented on Linux
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// https://golang.org/s/go1.4-syscall
+// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys
+
+// Local implementation of the UnixCredentials system call for FreeBSD
+
+package dbus
+
+/*
+const int sizeofPtr = sizeof(void*);
+#define _WANT_UCRED
+#include <sys/ucred.h>
+*/
+import "C"
+
+import (
+ "io"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+// http://golang.org/src/pkg/syscall/ztypes_linux_amd64.go
+// https://golang.org/src/syscall/ztypes_freebsd_amd64.go
+type Ucred struct {
+ Pid int32
+ Uid uint32
+ Gid uint32
+}
+
+// http://golang.org/src/pkg/syscall/types_linux.go
+// https://golang.org/src/syscall/types_freebsd.go
+// https://github.com/freebsd/freebsd/blob/master/sys/sys/ucred.h
+const (
+ SizeofUcred = C.sizeof_struct_ucred
+)
+
+// http://golang.org/src/pkg/syscall/sockcmsg_unix.go
+func cmsgAlignOf(salen int) int {
+ salign := C.sizeofPtr
+
+ return (salen + salign - 1) & ^(salign - 1)
+}
+
+// http://golang.org/src/pkg/syscall/sockcmsg_unix.go
+func cmsgData(h *syscall.Cmsghdr) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(syscall.SizeofCmsghdr)))
+}
+
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// UnixCredentials encodes credentials into a socket control message
+// for sending to another process. This can be used for
+// authentication.
+func UnixCredentials(ucred *Ucred) []byte {
+ b := make([]byte, syscall.CmsgSpace(SizeofUcred))
+ h := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+ h.Level = syscall.SOL_SOCKET
+ h.Type = syscall.SCM_CREDS
+ h.SetLen(syscall.CmsgLen(SizeofUcred))
+ *((*Ucred)(cmsgData(h))) = *ucred
+ return b
+}
+
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// ParseUnixCredentials decodes a socket control message that contains
+// credentials in a Ucred structure. To receive such a message, the
+// SO_PASSCRED option must be enabled on the socket.
+func ParseUnixCredentials(m *syscall.SocketControlMessage) (*Ucred, error) {
+ if m.Header.Level != syscall.SOL_SOCKET {
+ return nil, syscall.EINVAL
+ }
+ if m.Header.Type != syscall.SCM_CREDS {
+ return nil, syscall.EINVAL
+ }
+ ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0]))
+ return &ucred, nil
+}
+
+func (t *unixTransport) SendNullByte() error {
+ ucred := &Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())}
+ b := UnixCredentials(ucred)
+ _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil)
+ if err != nil {
+ return err
+ }
+ if oobn != len(b) {
+ return io.ErrShortWrite
+ }
+ return nil
+}
diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_linux.go b/vendor/github.com/godbus/dbus/transport_unixcred_linux.go
new file mode 100644
index 000000000..d9dfdf698
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/transport_unixcred_linux.go
@@ -0,0 +1,25 @@
+// The UnixCredentials system call is currently only implemented on Linux
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// https://golang.org/s/go1.4-syscall
+// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys
+
+package dbus
+
+import (
+ "io"
+ "os"
+ "syscall"
+)
+
+func (t *unixTransport) SendNullByte() error {
+ ucred := &syscall.Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())}
+ b := syscall.UnixCredentials(ucred)
+ _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil)
+ if err != nil {
+ return err
+ }
+ if oobn != len(b) {
+ return io.ErrShortWrite
+ }
+ return nil
+}
diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_openbsd.go b/vendor/github.com/godbus/dbus/transport_unixcred_openbsd.go
new file mode 100644
index 000000000..af7bafdf9
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/transport_unixcred_openbsd.go
@@ -0,0 +1,14 @@
+package dbus
+
+import "io"
+
+func (t *unixTransport) SendNullByte() error {
+ n, _, err := t.UnixConn.WriteMsgUnix([]byte{0}, nil, nil)
+ if err != nil {
+ return err
+ }
+ if n != 1 {
+ return io.ErrShortWrite
+ }
+ return nil
+}
diff --git a/vendor/github.com/godbus/dbus/variant.go b/vendor/github.com/godbus/dbus/variant.go
new file mode 100644
index 000000000..0ca123b01
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/variant.go
@@ -0,0 +1,144 @@
+package dbus
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Variant represents the D-Bus variant type.
+type Variant struct {
+ sig Signature
+ value interface{}
+}
+
+// MakeVariant converts the given value to a Variant. It panics if v cannot be
+// represented as a D-Bus type.
+func MakeVariant(v interface{}) Variant {
+ return MakeVariantWithSignature(v, SignatureOf(v))
+}
+
+// MakeVariantWithSignature converts the given value to a Variant.
+func MakeVariantWithSignature(v interface{}, s Signature) Variant {
+ return Variant{s, v}
+}
+
+// ParseVariant parses the given string as a variant as described at
+// https://developer.gnome.org/glib/unstable/gvariant-text.html. If sig is not
+// empty, it is taken to be the expected signature for the variant.
+func ParseVariant(s string, sig Signature) (Variant, error) {
+ tokens := varLex(s)
+ p := &varParser{tokens: tokens}
+ n, err := varMakeNode(p)
+ if err != nil {
+ return Variant{}, err
+ }
+ if sig.str == "" {
+ sig, err = varInfer(n)
+ if err != nil {
+ return Variant{}, err
+ }
+ }
+ v, err := n.Value(sig)
+ if err != nil {
+ return Variant{}, err
+ }
+ return MakeVariant(v), nil
+}
+
+// format returns a formatted version of v and whether this string can be parsed
+// unambigously.
+func (v Variant) format() (string, bool) {
+ switch v.sig.str[0] {
+ case 'b', 'i':
+ return fmt.Sprint(v.value), true
+ case 'n', 'q', 'u', 'x', 't', 'd', 'h':
+ return fmt.Sprint(v.value), false
+ case 's':
+ return strconv.Quote(v.value.(string)), true
+ case 'o':
+ return strconv.Quote(string(v.value.(ObjectPath))), false
+ case 'g':
+ return strconv.Quote(v.value.(Signature).str), false
+ case 'v':
+ s, unamb := v.value.(Variant).format()
+ if !unamb {
+ return "<@" + v.value.(Variant).sig.str + " " + s + ">", true
+ }
+ return "<" + s + ">", true
+ case 'y':
+ return fmt.Sprintf("%#x", v.value.(byte)), false
+ }
+ rv := reflect.ValueOf(v.value)
+ switch rv.Kind() {
+ case reflect.Slice:
+ if rv.Len() == 0 {
+ return "[]", false
+ }
+ unamb := true
+ buf := bytes.NewBuffer([]byte("["))
+ for i := 0; i < rv.Len(); i++ {
+ // TODO: slooow
+ s, b := MakeVariant(rv.Index(i).Interface()).format()
+ unamb = unamb && b
+ buf.WriteString(s)
+ if i != rv.Len()-1 {
+ buf.WriteString(", ")
+ }
+ }
+ buf.WriteByte(']')
+ return buf.String(), unamb
+ case reflect.Map:
+ if rv.Len() == 0 {
+ return "{}", false
+ }
+ unamb := true
+ var buf bytes.Buffer
+ kvs := make([]string, rv.Len())
+ for i, k := range rv.MapKeys() {
+ s, b := MakeVariant(k.Interface()).format()
+ unamb = unamb && b
+ buf.Reset()
+ buf.WriteString(s)
+ buf.WriteString(": ")
+ s, b = MakeVariant(rv.MapIndex(k).Interface()).format()
+ unamb = unamb && b
+ buf.WriteString(s)
+ kvs[i] = buf.String()
+ }
+ buf.Reset()
+ buf.WriteByte('{')
+ sort.Strings(kvs)
+ for i, kv := range kvs {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(kv)
+ }
+ buf.WriteByte('}')
+ return buf.String(), unamb
+ }
+ return `"INVALID"`, true
+}
+
+// Signature returns the D-Bus signature of the underlying value of v.
+func (v Variant) Signature() Signature {
+ return v.sig
+}
+
+// String returns the string representation of the underlying value of v as
+// described at https://developer.gnome.org/glib/unstable/gvariant-text.html.
+func (v Variant) String() string {
+ s, unamb := v.format()
+ if !unamb {
+ return "@" + v.sig.str + " " + s
+ }
+ return s
+}
+
+// Value returns the underlying value of v.
+func (v Variant) Value() interface{} {
+ return v.value
+}
diff --git a/vendor/github.com/godbus/dbus/variant_lexer.go b/vendor/github.com/godbus/dbus/variant_lexer.go
new file mode 100644
index 000000000..332007d6f
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/variant_lexer.go
@@ -0,0 +1,284 @@
+package dbus
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Heavily inspired by the lexer from text/template.
+
+type varToken struct {
+ typ varTokenType
+ val string
+}
+
+type varTokenType byte
+
+const (
+ tokEOF varTokenType = iota
+ tokError
+ tokNumber
+ tokString
+ tokBool
+ tokArrayStart
+ tokArrayEnd
+ tokDictStart
+ tokDictEnd
+ tokVariantStart
+ tokVariantEnd
+ tokComma
+ tokColon
+ tokType
+ tokByteString
+)
+
+type varLexer struct {
+ input string
+ start int
+ pos int
+ width int
+ tokens []varToken
+}
+
+type lexState func(*varLexer) lexState
+
+func varLex(s string) []varToken {
+ l := &varLexer{input: s}
+ l.run()
+ return l.tokens
+}
+
+func (l *varLexer) accept(valid string) bool {
+ if strings.IndexRune(valid, l.next()) >= 0 {
+ return true
+ }
+ l.backup()
+ return false
+}
+
+func (l *varLexer) backup() {
+ l.pos -= l.width
+}
+
+func (l *varLexer) emit(t varTokenType) {
+ l.tokens = append(l.tokens, varToken{t, l.input[l.start:l.pos]})
+ l.start = l.pos
+}
+
+func (l *varLexer) errorf(format string, v ...interface{}) lexState {
+ l.tokens = append(l.tokens, varToken{
+ tokError,
+ fmt.Sprintf(format, v...),
+ })
+ return nil
+}
+
+func (l *varLexer) ignore() {
+ l.start = l.pos
+}
+
+func (l *varLexer) next() rune {
+ var r rune
+
+ if l.pos >= len(l.input) {
+ l.width = 0
+ return -1
+ }
+ r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
+ l.pos += l.width
+ return r
+}
+
+func (l *varLexer) run() {
+ for state := varLexNormal; state != nil; {
+ state = state(l)
+ }
+}
+
+func (l *varLexer) peek() rune {
+ r := l.next()
+ l.backup()
+ return r
+}
+
+func varLexNormal(l *varLexer) lexState {
+ for {
+ r := l.next()
+ switch {
+ case r == -1:
+ l.emit(tokEOF)
+ return nil
+ case r == '[':
+ l.emit(tokArrayStart)
+ case r == ']':
+ l.emit(tokArrayEnd)
+ case r == '{':
+ l.emit(tokDictStart)
+ case r == '}':
+ l.emit(tokDictEnd)
+ case r == '<':
+ l.emit(tokVariantStart)
+ case r == '>':
+ l.emit(tokVariantEnd)
+ case r == ':':
+ l.emit(tokColon)
+ case r == ',':
+ l.emit(tokComma)
+ case r == '\'' || r == '"':
+ l.backup()
+ return varLexString
+ case r == '@':
+ l.backup()
+ return varLexType
+ case unicode.IsSpace(r):
+ l.ignore()
+ case unicode.IsNumber(r) || r == '+' || r == '-':
+ l.backup()
+ return varLexNumber
+ case r == 'b':
+ pos := l.start
+ if n := l.peek(); n == '"' || n == '\'' {
+ return varLexByteString
+ }
+ // not a byte string; try to parse it as a type or bool below
+ l.pos = pos + 1
+ l.width = 1
+ fallthrough
+ default:
+ // either a bool or a type. Try bools first.
+ l.backup()
+ if l.pos+4 <= len(l.input) {
+ if l.input[l.pos:l.pos+4] == "true" {
+ l.pos += 4
+ l.emit(tokBool)
+ continue
+ }
+ }
+ if l.pos+5 <= len(l.input) {
+ if l.input[l.pos:l.pos+5] == "false" {
+ l.pos += 5
+ l.emit(tokBool)
+ continue
+ }
+ }
+ // must be a type.
+ return varLexType
+ }
+ }
+}
+
+var varTypeMap = map[string]string{
+ "boolean": "b",
+ "byte": "y",
+ "int16": "n",
+ "uint16": "q",
+ "int32": "i",
+ "uint32": "u",
+ "int64": "x",
+ "uint64": "t",
+ "double": "f",
+ "string": "s",
+ "objectpath": "o",
+ "signature": "g",
+}
+
+func varLexByteString(l *varLexer) lexState {
+ q := l.next()
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != -1 {
+ break
+ }
+ fallthrough
+ case -1:
+ return l.errorf("unterminated bytestring")
+ case q:
+ break Loop
+ }
+ }
+ l.emit(tokByteString)
+ return varLexNormal
+}
+
+func varLexNumber(l *varLexer) lexState {
+ l.accept("+-")
+ digits := "0123456789"
+ if l.accept("0") {
+ if l.accept("x") {
+ digits = "0123456789abcdefABCDEF"
+ } else {
+ digits = "01234567"
+ }
+ }
+ for strings.IndexRune(digits, l.next()) >= 0 {
+ }
+ l.backup()
+ if l.accept(".") {
+ for strings.IndexRune(digits, l.next()) >= 0 {
+ }
+ l.backup()
+ }
+ if l.accept("eE") {
+ l.accept("+-")
+ for strings.IndexRune("0123456789", l.next()) >= 0 {
+ }
+ l.backup()
+ }
+ if r := l.peek(); unicode.IsLetter(r) {
+ l.next()
+ return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
+ }
+ l.emit(tokNumber)
+ return varLexNormal
+}
+
+func varLexString(l *varLexer) lexState {
+ q := l.next()
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != -1 {
+ break
+ }
+ fallthrough
+ case -1:
+ return l.errorf("unterminated string")
+ case q:
+ break Loop
+ }
+ }
+ l.emit(tokString)
+ return varLexNormal
+}
+
+func varLexType(l *varLexer) lexState {
+ at := l.accept("@")
+ for {
+ r := l.next()
+ if r == -1 {
+ break
+ }
+ if unicode.IsSpace(r) {
+ l.backup()
+ break
+ }
+ }
+ if at {
+ if _, err := ParseSignature(l.input[l.start+1 : l.pos]); err != nil {
+ return l.errorf("%s", err)
+ }
+ } else {
+ if _, ok := varTypeMap[l.input[l.start:l.pos]]; ok {
+ l.emit(tokType)
+ return varLexNormal
+ }
+ return l.errorf("unrecognized type %q", l.input[l.start:l.pos])
+ }
+ l.emit(tokType)
+ return varLexNormal
+}
diff --git a/vendor/github.com/godbus/dbus/variant_parser.go b/vendor/github.com/godbus/dbus/variant_parser.go
new file mode 100644
index 000000000..d20f5da6d
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/variant_parser.go
@@ -0,0 +1,817 @@
+package dbus
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type varParser struct {
+ tokens []varToken
+ i int
+}
+
+func (p *varParser) backup() {
+ p.i--
+}
+
+func (p *varParser) next() varToken {
+ if p.i < len(p.tokens) {
+ t := p.tokens[p.i]
+ p.i++
+ return t
+ }
+ return varToken{typ: tokEOF}
+}
+
+type varNode interface {
+ Infer() (Signature, error)
+ String() string
+ Sigs() sigSet
+ Value(Signature) (interface{}, error)
+}
+
+func varMakeNode(p *varParser) (varNode, error) {
+ var sig Signature
+
+ for {
+ t := p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokNumber:
+ return varMakeNumNode(t, sig)
+ case tokString:
+ return varMakeStringNode(t, sig)
+ case tokBool:
+ if sig.str != "" && sig.str != "b" {
+ return nil, varTypeError{t.val, sig}
+ }
+ b, err := strconv.ParseBool(t.val)
+ if err != nil {
+ return nil, err
+ }
+ return boolNode(b), nil
+ case tokArrayStart:
+ return varMakeArrayNode(p, sig)
+ case tokVariantStart:
+ return varMakeVariantNode(p, sig)
+ case tokDictStart:
+ return varMakeDictNode(p, sig)
+ case tokType:
+ if sig.str != "" {
+ return nil, errors.New("unexpected type annotation")
+ }
+ if t.val[0] == '@' {
+ sig.str = t.val[1:]
+ } else {
+ sig.str = varTypeMap[t.val]
+ }
+ case tokByteString:
+ if sig.str != "" && sig.str != "ay" {
+ return nil, varTypeError{t.val, sig}
+ }
+ b, err := varParseByteString(t.val)
+ if err != nil {
+ return nil, err
+ }
+ return byteStringNode(b), nil
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ }
+}
+
+type varTypeError struct {
+ val string
+ sig Signature
+}
+
+func (e varTypeError) Error() string {
+ return fmt.Sprintf("dbus: can't parse %q as type %q", e.val, e.sig.str)
+}
+
+type sigSet map[Signature]bool
+
+func (s sigSet) Empty() bool {
+ return len(s) == 0
+}
+
+func (s sigSet) Intersect(s2 sigSet) sigSet {
+ r := make(sigSet)
+ for k := range s {
+ if s2[k] {
+ r[k] = true
+ }
+ }
+ return r
+}
+
+func (s sigSet) Single() (Signature, bool) {
+ if len(s) == 1 {
+ for k := range s {
+ return k, true
+ }
+ }
+ return Signature{}, false
+}
+
+func (s sigSet) ToArray() sigSet {
+ r := make(sigSet, len(s))
+ for k := range s {
+ r[Signature{"a" + k.str}] = true
+ }
+ return r
+}
+
+type numNode struct {
+ sig Signature
+ str string
+ val interface{}
+}
+
+var numSigSet = sigSet{
+ Signature{"y"}: true,
+ Signature{"n"}: true,
+ Signature{"q"}: true,
+ Signature{"i"}: true,
+ Signature{"u"}: true,
+ Signature{"x"}: true,
+ Signature{"t"}: true,
+ Signature{"d"}: true,
+}
+
+func (n numNode) Infer() (Signature, error) {
+ if strings.ContainsAny(n.str, ".e") {
+ return Signature{"d"}, nil
+ }
+ return Signature{"i"}, nil
+}
+
+func (n numNode) String() string {
+ return n.str
+}
+
+func (n numNode) Sigs() sigSet {
+ if n.sig.str != "" {
+ return sigSet{n.sig: true}
+ }
+ if strings.ContainsAny(n.str, ".e") {
+ return sigSet{Signature{"d"}: true}
+ }
+ return numSigSet
+}
+
+func (n numNode) Value(sig Signature) (interface{}, error) {
+ if n.sig.str != "" && n.sig != sig {
+ return nil, varTypeError{n.str, sig}
+ }
+ if n.val != nil {
+ return n.val, nil
+ }
+ return varNumAs(n.str, sig)
+}
+
+func varMakeNumNode(tok varToken, sig Signature) (varNode, error) {
+ if sig.str == "" {
+ return numNode{str: tok.val}, nil
+ }
+ num, err := varNumAs(tok.val, sig)
+ if err != nil {
+ return nil, err
+ }
+ return numNode{sig: sig, val: num}, nil
+}
+
+func varNumAs(s string, sig Signature) (interface{}, error) {
+ isUnsigned := false
+ size := 32
+ switch sig.str {
+ case "n":
+ size = 16
+ case "i":
+ case "x":
+ size = 64
+ case "y":
+ size = 8
+ isUnsigned = true
+ case "q":
+ size = 16
+ isUnsigned = true
+ case "u":
+ isUnsigned = true
+ case "t":
+ size = 64
+ isUnsigned = true
+ case "d":
+ d, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, err
+ }
+ return d, nil
+ default:
+ return nil, varTypeError{s, sig}
+ }
+ base := 10
+ if strings.HasPrefix(s, "0x") {
+ base = 16
+ s = s[2:]
+ }
+ if strings.HasPrefix(s, "0") && len(s) != 1 {
+ base = 8
+ s = s[1:]
+ }
+ if isUnsigned {
+ i, err := strconv.ParseUint(s, base, size)
+ if err != nil {
+ return nil, err
+ }
+ var v interface{} = i
+ switch sig.str {
+ case "y":
+ v = byte(i)
+ case "q":
+ v = uint16(i)
+ case "u":
+ v = uint32(i)
+ }
+ return v, nil
+ }
+ i, err := strconv.ParseInt(s, base, size)
+ if err != nil {
+ return nil, err
+ }
+ var v interface{} = i
+ switch sig.str {
+ case "n":
+ v = int16(i)
+ case "i":
+ v = int32(i)
+ }
+ return v, nil
+}
+
+type stringNode struct {
+ sig Signature
+ str string // parsed
+ val interface{} // has correct type
+}
+
+var stringSigSet = sigSet{
+ Signature{"s"}: true,
+ Signature{"g"}: true,
+ Signature{"o"}: true,
+}
+
+func (n stringNode) Infer() (Signature, error) {
+ return Signature{"s"}, nil
+}
+
+func (n stringNode) String() string {
+ return n.str
+}
+
+func (n stringNode) Sigs() sigSet {
+ if n.sig.str != "" {
+ return sigSet{n.sig: true}
+ }
+ return stringSigSet
+}
+
+func (n stringNode) Value(sig Signature) (interface{}, error) {
+ if n.sig.str != "" && n.sig != sig {
+ return nil, varTypeError{n.str, sig}
+ }
+ if n.val != nil {
+ return n.val, nil
+ }
+ switch {
+ case sig.str == "g":
+ return Signature{n.str}, nil
+ case sig.str == "o":
+ return ObjectPath(n.str), nil
+ case sig.str == "s":
+ return n.str, nil
+ default:
+ return nil, varTypeError{n.str, sig}
+ }
+}
+
+func varMakeStringNode(tok varToken, sig Signature) (varNode, error) {
+ if sig.str != "" && sig.str != "s" && sig.str != "g" && sig.str != "o" {
+ return nil, fmt.Errorf("invalid type %q for string", sig.str)
+ }
+ s, err := varParseString(tok.val)
+ if err != nil {
+ return nil, err
+ }
+ n := stringNode{str: s}
+ if sig.str == "" {
+ return stringNode{str: s}, nil
+ }
+ n.sig = sig
+ switch sig.str {
+ case "o":
+ n.val = ObjectPath(s)
+ case "g":
+ n.val = Signature{s}
+ case "s":
+ n.val = s
+ }
+ return n, nil
+}
+
+func varParseString(s string) (string, error) {
+ // quotes are guaranteed to be there
+ s = s[1 : len(s)-1]
+ buf := new(bytes.Buffer)
+ for len(s) != 0 {
+ r, size := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && size == 1 {
+ return "", errors.New("invalid UTF-8")
+ }
+ s = s[size:]
+ if r != '\\' {
+ buf.WriteRune(r)
+ continue
+ }
+ r, size = utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && size == 1 {
+ return "", errors.New("invalid UTF-8")
+ }
+ s = s[size:]
+ switch r {
+ case 'a':
+ buf.WriteRune(0x7)
+ case 'b':
+ buf.WriteRune(0x8)
+ case 'f':
+ buf.WriteRune(0xc)
+ case 'n':
+ buf.WriteRune('\n')
+ case 'r':
+ buf.WriteRune('\r')
+ case 't':
+ buf.WriteRune('\t')
+ case '\n':
+ case 'u':
+ if len(s) < 4 {
+ return "", errors.New("short unicode escape")
+ }
+ r, err := strconv.ParseUint(s[:4], 16, 32)
+ if err != nil {
+ return "", err
+ }
+ buf.WriteRune(rune(r))
+ s = s[4:]
+ case 'U':
+ if len(s) < 8 {
+ return "", errors.New("short unicode escape")
+ }
+ r, err := strconv.ParseUint(s[:8], 16, 32)
+ if err != nil {
+ return "", err
+ }
+ buf.WriteRune(rune(r))
+ s = s[8:]
+ default:
+ buf.WriteRune(r)
+ }
+ }
+ return buf.String(), nil
+}
+
+var boolSigSet = sigSet{Signature{"b"}: true}
+
+type boolNode bool
+
+func (boolNode) Infer() (Signature, error) {
+ return Signature{"b"}, nil
+}
+
+func (b boolNode) String() string {
+ if b {
+ return "true"
+ }
+ return "false"
+}
+
+func (boolNode) Sigs() sigSet {
+ return boolSigSet
+}
+
+func (b boolNode) Value(sig Signature) (interface{}, error) {
+ if sig.str != "b" {
+ return nil, varTypeError{b.String(), sig}
+ }
+ return bool(b), nil
+}
+
+type arrayNode struct {
+ set sigSet
+ children []varNode
+ val interface{}
+}
+
+func (n arrayNode) Infer() (Signature, error) {
+ for _, v := range n.children {
+ csig, err := varInfer(v)
+ if err != nil {
+ continue
+ }
+ return Signature{"a" + csig.str}, nil
+ }
+ return Signature{}, fmt.Errorf("can't infer type for %q", n.String())
+}
+
+func (n arrayNode) String() string {
+ s := "["
+ for i, v := range n.children {
+ s += v.String()
+ if i != len(n.children)-1 {
+ s += ", "
+ }
+ }
+ return s + "]"
+}
+
+func (n arrayNode) Sigs() sigSet {
+ return n.set
+}
+
+func (n arrayNode) Value(sig Signature) (interface{}, error) {
+ if n.set.Empty() {
+ // no type information whatsoever, so this must be an empty slice
+ return reflect.MakeSlice(typeFor(sig.str), 0, 0).Interface(), nil
+ }
+ if !n.set[sig] {
+ return nil, varTypeError{n.String(), sig}
+ }
+ s := reflect.MakeSlice(typeFor(sig.str), len(n.children), len(n.children))
+ for i, v := range n.children {
+ rv, err := v.Value(Signature{sig.str[1:]})
+ if err != nil {
+ return nil, err
+ }
+ s.Index(i).Set(reflect.ValueOf(rv))
+ }
+ return s.Interface(), nil
+}
+
+func varMakeArrayNode(p *varParser, sig Signature) (varNode, error) {
+ var n arrayNode
+ if sig.str != "" {
+ n.set = sigSet{sig: true}
+ }
+ if t := p.next(); t.typ == tokArrayEnd {
+ return n, nil
+ } else {
+ p.backup()
+ }
+Loop:
+ for {
+ t := p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ }
+ p.backup()
+ cn, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if cset := cn.Sigs(); !cset.Empty() {
+ if n.set.Empty() {
+ n.set = cset.ToArray()
+ } else {
+ nset := cset.ToArray().Intersect(n.set)
+ if nset.Empty() {
+ return nil, fmt.Errorf("can't parse %q with given type information", cn.String())
+ }
+ n.set = nset
+ }
+ }
+ n.children = append(n.children, cn)
+ switch t := p.next(); t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokArrayEnd:
+ break Loop
+ case tokComma:
+ continue
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ }
+ return n, nil
+}
+
+type variantNode struct {
+ n varNode
+}
+
+var variantSet = sigSet{
+ Signature{"v"}: true,
+}
+
+func (variantNode) Infer() (Signature, error) {
+ return Signature{"v"}, nil
+}
+
+func (n variantNode) String() string {
+ return "<" + n.n.String() + ">"
+}
+
+func (variantNode) Sigs() sigSet {
+ return variantSet
+}
+
+func (n variantNode) Value(sig Signature) (interface{}, error) {
+ if sig.str != "v" {
+ return nil, varTypeError{n.String(), sig}
+ }
+ sig, err := varInfer(n.n)
+ if err != nil {
+ return nil, err
+ }
+ v, err := n.n.Value(sig)
+ if err != nil {
+ return nil, err
+ }
+ return MakeVariant(v), nil
+}
+
+func varMakeVariantNode(p *varParser, sig Signature) (varNode, error) {
+ n, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if t := p.next(); t.typ != tokVariantEnd {
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ vn := variantNode{n}
+ if sig.str != "" && sig.str != "v" {
+ return nil, varTypeError{vn.String(), sig}
+ }
+ return variantNode{n}, nil
+}
+
+type dictEntry struct {
+ key, val varNode
+}
+
+type dictNode struct {
+ kset, vset sigSet
+ children []dictEntry
+ val interface{}
+}
+
+func (n dictNode) Infer() (Signature, error) {
+ for _, v := range n.children {
+ ksig, err := varInfer(v.key)
+ if err != nil {
+ continue
+ }
+ vsig, err := varInfer(v.val)
+ if err != nil {
+ continue
+ }
+ return Signature{"a{" + ksig.str + vsig.str + "}"}, nil
+ }
+ return Signature{}, fmt.Errorf("can't infer type for %q", n.String())
+}
+
+func (n dictNode) String() string {
+ s := "{"
+ for i, v := range n.children {
+ s += v.key.String() + ": " + v.val.String()
+ if i != len(n.children)-1 {
+ s += ", "
+ }
+ }
+ return s + "}"
+}
+
+func (n dictNode) Sigs() sigSet {
+ r := sigSet{}
+ for k := range n.kset {
+ for v := range n.vset {
+ sig := "a{" + k.str + v.str + "}"
+ r[Signature{sig}] = true
+ }
+ }
+ return r
+}
+
+func (n dictNode) Value(sig Signature) (interface{}, error) {
+ set := n.Sigs()
+ if set.Empty() {
+ // no type information -> empty dict
+ return reflect.MakeMap(typeFor(sig.str)).Interface(), nil
+ }
+ if !set[sig] {
+ return nil, varTypeError{n.String(), sig}
+ }
+ m := reflect.MakeMap(typeFor(sig.str))
+ ksig := Signature{sig.str[2:3]}
+ vsig := Signature{sig.str[3 : len(sig.str)-1]}
+ for _, v := range n.children {
+ kv, err := v.key.Value(ksig)
+ if err != nil {
+ return nil, err
+ }
+ vv, err := v.val.Value(vsig)
+ if err != nil {
+ return nil, err
+ }
+ m.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv))
+ }
+ return m.Interface(), nil
+}
+
+func varMakeDictNode(p *varParser, sig Signature) (varNode, error) {
+ var n dictNode
+
+ if sig.str != "" {
+ if len(sig.str) < 5 {
+ return nil, fmt.Errorf("invalid signature %q for dict type", sig)
+ }
+ ksig := Signature{string(sig.str[2])}
+ vsig := Signature{sig.str[3 : len(sig.str)-1]}
+ n.kset = sigSet{ksig: true}
+ n.vset = sigSet{vsig: true}
+ }
+ if t := p.next(); t.typ == tokDictEnd {
+ return n, nil
+ } else {
+ p.backup()
+ }
+Loop:
+ for {
+ t := p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ }
+ p.backup()
+ kn, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if kset := kn.Sigs(); !kset.Empty() {
+ if n.kset.Empty() {
+ n.kset = kset
+ } else {
+ n.kset = kset.Intersect(n.kset)
+ if n.kset.Empty() {
+ return nil, fmt.Errorf("can't parse %q with given type information", kn.String())
+ }
+ }
+ }
+ t = p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokColon:
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ t = p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ }
+ p.backup()
+ vn, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if vset := vn.Sigs(); !vset.Empty() {
+ if n.vset.Empty() {
+ n.vset = vset
+ } else {
+ n.vset = n.vset.Intersect(vset)
+ if n.vset.Empty() {
+ return nil, fmt.Errorf("can't parse %q with given type information", vn.String())
+ }
+ }
+ }
+ n.children = append(n.children, dictEntry{kn, vn})
+ t = p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokDictEnd:
+ break Loop
+ case tokComma:
+ continue
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ }
+ return n, nil
+}
+
+type byteStringNode []byte
+
+var byteStringSet = sigSet{
+ Signature{"ay"}: true,
+}
+
+func (byteStringNode) Infer() (Signature, error) {
+ return Signature{"ay"}, nil
+}
+
+func (b byteStringNode) String() string {
+ return string(b)
+}
+
+func (b byteStringNode) Sigs() sigSet {
+ return byteStringSet
+}
+
+func (b byteStringNode) Value(sig Signature) (interface{}, error) {
+ if sig.str != "ay" {
+ return nil, varTypeError{b.String(), sig}
+ }
+ return []byte(b), nil
+}
+
+func varParseByteString(s string) ([]byte, error) {
+ // quotes and b at start are guaranteed to be there
+ b := make([]byte, 0, 1)
+ s = s[2 : len(s)-1]
+ for len(s) != 0 {
+ c := s[0]
+ s = s[1:]
+ if c != '\\' {
+ b = append(b, c)
+ continue
+ }
+ c = s[0]
+ s = s[1:]
+ switch c {
+ case 'a':
+ b = append(b, 0x7)
+ case 'b':
+ b = append(b, 0x8)
+ case 'f':
+ b = append(b, 0xc)
+ case 'n':
+ b = append(b, '\n')
+ case 'r':
+ b = append(b, '\r')
+ case 't':
+ b = append(b, '\t')
+ case 'x':
+ if len(s) < 2 {
+ return nil, errors.New("short escape")
+ }
+ n, err := strconv.ParseUint(s[:2], 16, 8)
+ if err != nil {
+ return nil, err
+ }
+ b = append(b, byte(n))
+ s = s[2:]
+ case '0':
+ if len(s) < 3 {
+ return nil, errors.New("short escape")
+ }
+ n, err := strconv.ParseUint(s[:3], 8, 8)
+ if err != nil {
+ return nil, err
+ }
+ b = append(b, byte(n))
+ s = s[3:]
+ default:
+ b = append(b, c)
+ }
+ }
+ return append(b, 0), nil
+}
+
+func varInfer(n varNode) (Signature, error) {
+ if sig, ok := n.Sigs().Single(); ok {
+ return sig, nil
+ }
+ return n.Infer()
+}
diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE
new file mode 100644
index 000000000..7be0cc7b6
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/LICENSE
@@ -0,0 +1,36 @@
+Protocol Buffers for Go with Gadgets
+
+Copyright (c) 2013, The GoGo Authors. All rights reserved.
+http://github.com/gogo/protobuf
+
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/gogo/protobuf/README b/vendor/github.com/gogo/protobuf/README
new file mode 100644
index 000000000..b4accc0c0
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/README
@@ -0,0 +1,214 @@
+GoGoProtobuf http://github.com/gogo/protobuf extends
+GoProtobuf http://github.com/golang/protobuf
+
+# Go support for Protocol Buffers
+
+Google's data interchange format.
+Copyright 2010 The Go Authors.
+https://github.com/golang/protobuf
+
+This package and the code it generates requires at least Go 1.4.
+
+This software implements Go bindings for protocol buffers. For
+information about protocol buffers themselves, see
+ https://developers.google.com/protocol-buffers/
+
+## Installation ##
+
+To use this software, you must:
+- Install the standard C++ implementation of protocol buffers from
+ https://developers.google.com/protocol-buffers/
+- Of course, install the Go compiler and tools from
+ https://golang.org/
+ See
+ https://golang.org/doc/install
+ for details or, if you are using gccgo, follow the instructions at
+ https://golang.org/doc/install/gccgo
+- Grab the code from the repository and install the proto package.
+ The simplest way is to run `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`.
+ The compiler plugin, protoc-gen-go, will be installed in $GOBIN,
+ defaulting to $GOPATH/bin. It must be in your $PATH for the protocol
+ compiler, protoc, to find it.
+
+This software has two parts: a 'protocol compiler plugin' that
+generates Go source files that, once compiled, can access and manage
+protocol buffers; and a library that implements run-time support for
+encoding (marshaling), decoding (unmarshaling), and accessing protocol
+buffers.
+
+There is support for gRPC in Go using protocol buffers.
+See the note at the bottom of this file for details.
+
+There are no insertion points in the plugin.
+
+GoGoProtobuf provides extensions for protocol buffers and GoProtobuf
+see http://github.com/gogo/protobuf/gogoproto/doc.go
+
+## Using protocol buffers with Go ##
+
+Once the software is installed, there are two steps to using it.
+First you must compile the protocol buffer definitions and then import
+them, with the support library, into your program.
+
+To compile the protocol buffer definition, run protoc with the --gogo_out
+parameter set to the directory you want to output the Go code to.
+
+ protoc --gogo_out=. *.proto
+
+The generated files will be suffixed .pb.go. See the Test code below
+for an example using such a file.
+
+The package comment for the proto library contains text describing
+the interface provided in Go for protocol buffers. Here is an edited
+version.
+
+If you are using any gogo.proto extensions you will need to specify the
+proto_path to include the descriptor.proto and gogo.proto.
+gogo.proto is located in github.com/gogo/protobuf/gogoproto
+This should be fine, since your import is the same.
+descriptor.proto is located in either github.com/gogo/protobuf/protobuf
+or code.google.com/p/protobuf/trunk/src/
+Its import is google/protobuf/descriptor.proto so it might need some help.
+
+ protoc --gogo_out=. -I=.:github.com/gogo/protobuf/protobuf *.proto
+
+==========
+
+The proto package converts data structures to and from the
+wire format of protocol buffers. It works in concert with the
+Go source code generated for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ Helpers for getting values are superseded by the
+ GetFoo methods and their use is deprecated.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed with the enum's type name. Enum types have
+ a String method, and a Enum method to assist in message construction.
+ - Nested groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Getters are only generated for message and oneof fields.
+ - Enum types do not get an Enum method.
+
+Consider file test.proto, containing
+
+```proto
+ package example;
+
+ enum FOO { X = 17; };
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ }
+```
+
+To create and play with a Test object from the example package,
+
+```go
+ package main
+
+ import (
+ "log"
+
+ "github.com/gogo/protobuf/proto"
+ "path/to/example"
+ )
+
+ func main() {
+ test := &example.Test {
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &example.Test_OptionalGroup {
+ RequiredField: proto.String("good bye"),
+ },
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &example.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // etc.
+ }
+```
+
+
+## Parameters ##
+
+To pass extra parameters to the plugin, use a comma-separated
+parameter list separated from the output directory by a colon:
+
+
+ protoc --gogo_out=plugins=grpc,import_path=mypackage:. *.proto
+
+
+- `import_prefix=xxx` - a prefix that is added onto the beginning of
+ all imports. Useful for things like generating protos in a
+ subdirectory, or regenerating vendored protobufs in-place.
+- `import_path=foo/bar` - used as the package if no input files
+ declare `go_package`. If it contains slashes, everything up to the
+ rightmost slash is ignored.
+- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to
+ load. The only plugin in this repo is `grpc`.
+- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is
+ associated with Go package quux/shme. This is subject to the
+ import_prefix parameter.
+
+## gRPC Support ##
+
+If a proto file specifies RPC services, protoc-gen-go can be instructed to
+generate code compatible with gRPC (http://www.grpc.io/). To do this, pass
+the `plugins` parameter to protoc-gen-go; the usual way is to insert it into
+the --go_out argument to protoc:
+
+ protoc --gogo_out=plugins=grpc:. *.proto
+
+## Plugins ##
+
+The `protoc-gen-go/generator` package exposes a plugin interface,
+which is used by the gRPC code generation. This interface is not
+supported and is subject to incompatible changes without notice.
diff --git a/vendor/github.com/gogo/protobuf/Readme.md b/vendor/github.com/gogo/protobuf/Readme.md
new file mode 100644
index 000000000..00b346f23
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/Readme.md
@@ -0,0 +1,119 @@
+# Protocol Buffers for Go with Gadgets
+
+[![Build Status](https://travis-ci.org/gogo/protobuf.svg?branch=master)](https://travis-ci.org/gogo/protobuf)
+
+gogoprotobuf is a fork of <a href="https://github.com/golang/protobuf">golang/protobuf</a> with extra code generation features.
+
+This code generation is used to achieve:
+
+ - fast marshalling and unmarshalling
+ - more canonical Go structures
+ - goprotobuf compatibility
+ - less typing by optionally generating extra helper code
+ - peace of mind by optionally generating test and benchmark code
+ - other serialization formats
+
+Keeping track of how up to date gogoprotobuf is relative to golang/protobuf is done in this
+<a href="https://github.com/gogo/protobuf/issues/191">issue</a>
+
+## Users
+
+These projects use gogoprotobuf:
+
+ - <a href="http://godoc.org/github.com/coreos/etcd">etcd</a> - <a href="https://blog.gopheracademy.com/advent-2015/etcd-distributed-key-value-store-with-grpc-http2/">blog</a>
+ - <a href="https://www.spacemonkey.com/">spacemonkey</a> - <a href="https://www.spacemonkey.com/blog/posts/go-space-monkey">blog</a>
+ - <a href="http://bazil.org">bazil</a>
+ - <a href="http://badoo.com">badoo</a>
+ - <a href="https://github.com/mesos/mesos-go">mesos-go</a>
+ - <a href="https://github.com/mozilla-services/heka">heka</a>
+ - <a href="https://github.com/cockroachdb/cockroach">cockroachdb</a>
+ - <a href="https://github.com/jbenet/go-ipfs">go-ipfs</a>
+ - <a href="https://github.com/philhofer/rkive">rkive-go</a>
+ - <a href="https://www.dropbox.com">dropbox</a>
+ - <a href="https://srclib.org/">srclib</a> - <a href="https://sourcegraph.com/sourcegraph.com/sourcegraph/srclib@97f54fed4f9a4bff0a28edf6eb7c0e013afc7bcd/.tree/graph/def.proto">sample proto file</a>
+ - <a href="http://www.adyoulike.com/">adyoulike</a>
+ - <a href="http://www.cloudfoundry.org/">cloudfoundry</a>
+ - <a href="http://kubernetes.io/">kubernetes</a>
+ - <a href="https://dgraph.io/">dgraph</a> - <a href="https://github.com/dgraph-io/dgraph/releases/tag/v0.4.3">release notes</a> - <a href="https://discuss.dgraph.io/t/gogoprotobuf-is-extremely-fast/639">benchmarks</a></a>
+ - <a href="https://github.com/centrifugal/centrifugo">centrifugo</a> - <a href="https://forum.golangbridge.org/t/centrifugo-real-time-messaging-websocket-or-sockjs-server-v1-5-0-released/2861">release notes</a> - <a href="https://medium.com/@fzambia/centrifugo-protobuf-inside-json-outside-21d39bdabd68#.o3icmgjqd">blog</a>
+
+Please lets us know if you are using gogoprotobuf by posting on our <a href="https://groups.google.com/forum/#!topic/gogoprotobuf/Brw76BxmFpQ">GoogleGroup</a>.
+
+### Mentioned
+
+ - <a href="http://www.slideshare.net/albertstrasheim/serialization-in-go">Cloudflare - go serialization talk - Albert Strasheim</a>
+ - <a href="http://gophercon.sourcegraph.com/post/83747547505/writing-a-high-performance-database-in-go">gophercon</a>
+ - <a href="https://github.com/alecthomas/go_serialization_benchmarks">alecthomas' go serialization benchmarks</a>
+
+## Getting Started
+
+There are several ways to use gogoprotobuf, but for all you need to install go and protoc.
+After that you can choose:
+
+ - Speed
+ - More Speed and more generated code
+ - Most Speed and most customization
+
+### Installation
+
+To install it, you must first have Go (at least version 1.3.3) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). Go 1.4.2, 1.5.4, 1.6.3 and 1.7 are continuously tested.
+
+Next, install the standard protocol buffer implementation from [https://github.com/google/protobuf](https://github.com/google/protobuf).
+Most versions from 2.3.1 should not give any problems, but 2.5.0, 2.6.1 and 3 are continuously tested.
+
+### Speed
+
+Install the protoc-gen-gofast binary
+
+ go get github.com/gogo/protobuf/protoc-gen-gofast
+
+Use it to generate faster marshaling and unmarshaling go code for your protocol buffers.
+
+ protoc --gofast_out=. myproto.proto
+
+This does not allow you to use any of the other gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md).
+
+### More Speed and more generated code
+
+Fields without pointers cause less time in the garbage collector.
+More code generation results in more convenient methods.
+
+Other binaries are also included:
+
+ protoc-gen-gogofast (same as gofast, but imports gogoprotobuf)
+ protoc-gen-gogofaster (same as gogofast, without XXX_unrecognized, less pointer fields)
+ protoc-gen-gogoslick (same as gogofaster, but with generated string, gostring and equal methods)
+
+Installing any of these binaries is easy. Simply run:
+
+ go get github.com/gogo/protobuf/proto
+ go get github.com/gogo/protobuf/{binary}
+ go get github.com/gogo/protobuf/gogoproto
+
+These binaries allow you to using gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md).
+
+### Most Speed and most customization
+
+Customizing the fields of the messages to be the fields that you actually want to use removes the need to copy between the structs you use and structs you use to serialize.
+gogoprotobuf also offers more serialization formats and generation of tests and even more methods.
+
+Please visit the [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md) page for more documentation.
+
+Install protoc-gen-gogo:
+
+ go get github.com/gogo/protobuf/proto
+ go get github.com/gogo/protobuf/jsonpb
+ go get github.com/gogo/protobuf/protoc-gen-gogo
+ go get github.com/gogo/protobuf/gogoproto
+
+## Proto3
+
+Proto3 is supported, but the new well known types are not supported yet.
+[See Proto3 Issue](https://github.com/gogo/protobuf/issues/57) for more details.
+
+## GRPC
+
+It works the same as golang/protobuf, simply specify the plugin.
+Here is an example using gofast:
+
+ protoc --gofast_out=plugins=grpc:. my.proto
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go
new file mode 100644
index 000000000..5ecfae113
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go
@@ -0,0 +1,168 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package gogoproto provides extensions for protocol buffers to achieve:
+
+ - fast marshalling and unmarshalling.
+ - peace of mind by optionally generating test and benchmark code.
+ - more canonical Go structures.
+ - less typing by optionally generating extra helper code.
+ - goprotobuf compatibility
+
+More Canonical Go Structures
+
+A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs.
+You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct.
+Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions.
+
+ - nullable, if false, a field is generated without a pointer (see warning below).
+ - embed, if true, the field is generated as an embedded field.
+ - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128
+ - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames.
+ - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums.
+ - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps.
+ - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps.
+
+Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset.
+
+Let us look at:
+
+ github.com/gogo/protobuf/test/example/example.proto
+
+for a quicker overview.
+
+The following message:
+
+ package test;
+
+ import "github.com/gogo/protobuf/gogoproto/gogo.proto";
+
+ message A {
+ optional string Description = 1 [(gogoproto.nullable) = false];
+ optional int64 Number = 2 [(gogoproto.nullable) = false];
+ optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false];
+ }
+
+Will generate a go struct which looks a lot like this:
+
+ type A struct {
+ Description string
+ Number int64
+ Id github_com_gogo_protobuf_test_custom.Uuid
+ }
+
+You will see there are no pointers, since all fields are non-nullable.
+You will also see a custom type which marshals to a string.
+Be warned it is your responsibility to test your custom types thoroughly.
+You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods.
+
+Next we will embed the message A in message B.
+
+ message B {
+ optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true];
+ repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false];
+ }
+
+See below that A is embedded in B.
+
+ type B struct {
+ A
+ G []github_com_gogo_protobuf_test_custom.Uint128
+ }
+
+Also see the repeated custom type.
+
+ type Uint128 [2]uint64
+
+Next we will create a custom name for one of our fields.
+
+ message C {
+ optional int64 size = 1 [(gogoproto.customname) = "MySize"];
+ }
+
+See below that the field's name is MySize and not Size.
+
+ type C struct {
+ MySize *int64
+ }
+
+The is useful when having a protocol buffer message with a field name which conflicts with a generated method.
+As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error.
+Using customname you can fix this error without changing the field name.
+This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable.
+
+Gogoprotobuf also has some more subtle changes, these could be changed back:
+
+ - the generated package name for imports do not have the extra /filename.pb,
+ but are actually the imports specified in the .proto file.
+
+Gogoprotobuf also has lost some features which should be brought back with time:
+
+ - Marshalling and unmarshalling with reflect and without the unsafe package,
+ this requires work in pointer_reflect.go
+
+Why does nullable break protocol buffer specifications:
+
+The protocol buffer specification states, somewhere, that you should be able to tell whether a
+field is set or unset. With the option nullable=false this feature is lost,
+since your non-nullable fields will always be set. It can be seen as a layer on top of
+protocol buffers, where before and after marshalling all non-nullable fields are set
+and they cannot be unset.
+
+Goprotobuf Compatibility:
+
+Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers.
+Gogoprotobuf generates the same code as goprotobuf if no extensions are used.
+The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf:
+
+ - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto.
+ - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix
+ - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method.
+ - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face
+ - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method.
+ - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension
+ - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields.
+
+Less Typing and Peace of Mind is explained in their specific plugin folders godoc:
+
+ - github.com/gogo/protobuf/plugin/<extension_name>
+
+If you do not use any of these extension the code that is generated
+will be the same as if goprotobuf has generated it.
+
+The most complete way to see examples is to look at
+
+ github.com/gogo/protobuf/test/thetest.proto
+
+Gogoprototest is a seperate project,
+because we want to keep gogoprotobuf independant of goprotobuf,
+but we still want to test it thoroughly.
+
+*/
+package gogoproto
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
new file mode 100644
index 000000000..6da0e3e7f
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
@@ -0,0 +1,665 @@
+// Code generated by protoc-gen-gogo.
+// source: gogo.proto
+// DO NOT EDIT!
+
+/*
+Package gogoproto is a generated protocol buffer package.
+
+It is generated from these files:
+ gogo.proto
+
+It has these top-level messages:
+*/
+package gogoproto
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+var E_GoprotoEnumPrefix = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.EnumOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 62001,
+ Name: "gogoproto.goproto_enum_prefix",
+ Tag: "varint,62001,opt,name=goproto_enum_prefix,json=goprotoEnumPrefix",
+}
+
+var E_GoprotoEnumStringer = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.EnumOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 62021,
+ Name: "gogoproto.goproto_enum_stringer",
+ Tag: "varint,62021,opt,name=goproto_enum_stringer,json=goprotoEnumStringer",
+}
+
+var E_EnumStringer = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.EnumOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 62022,
+ Name: "gogoproto.enum_stringer",
+ Tag: "varint,62022,opt,name=enum_stringer,json=enumStringer",
+}
+
+var E_EnumCustomname = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.EnumOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 62023,
+ Name: "gogoproto.enum_customname",
+ Tag: "bytes,62023,opt,name=enum_customname,json=enumCustomname",
+}
+
+var E_EnumvalueCustomname = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.EnumValueOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 66001,
+ Name: "gogoproto.enumvalue_customname",
+ Tag: "bytes,66001,opt,name=enumvalue_customname,json=enumvalueCustomname",
+}
+
+var E_GoprotoGettersAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63001,
+ Name: "gogoproto.goproto_getters_all",
+ Tag: "varint,63001,opt,name=goproto_getters_all,json=goprotoGettersAll",
+}
+
+var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63002,
+ Name: "gogoproto.goproto_enum_prefix_all",
+ Tag: "varint,63002,opt,name=goproto_enum_prefix_all,json=goprotoEnumPrefixAll",
+}
+
+var E_GoprotoStringerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63003,
+ Name: "gogoproto.goproto_stringer_all",
+ Tag: "varint,63003,opt,name=goproto_stringer_all,json=goprotoStringerAll",
+}
+
+var E_VerboseEqualAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63004,
+ Name: "gogoproto.verbose_equal_all",
+ Tag: "varint,63004,opt,name=verbose_equal_all,json=verboseEqualAll",
+}
+
+var E_FaceAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63005,
+ Name: "gogoproto.face_all",
+ Tag: "varint,63005,opt,name=face_all,json=faceAll",
+}
+
+var E_GostringAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63006,
+ Name: "gogoproto.gostring_all",
+ Tag: "varint,63006,opt,name=gostring_all,json=gostringAll",
+}
+
+var E_PopulateAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63007,
+ Name: "gogoproto.populate_all",
+ Tag: "varint,63007,opt,name=populate_all,json=populateAll",
+}
+
+var E_StringerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63008,
+ Name: "gogoproto.stringer_all",
+ Tag: "varint,63008,opt,name=stringer_all,json=stringerAll",
+}
+
+var E_OnlyoneAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63009,
+ Name: "gogoproto.onlyone_all",
+ Tag: "varint,63009,opt,name=onlyone_all,json=onlyoneAll",
+}
+
+var E_EqualAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63013,
+ Name: "gogoproto.equal_all",
+ Tag: "varint,63013,opt,name=equal_all,json=equalAll",
+}
+
+var E_DescriptionAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63014,
+ Name: "gogoproto.description_all",
+ Tag: "varint,63014,opt,name=description_all,json=descriptionAll",
+}
+
+var E_TestgenAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63015,
+ Name: "gogoproto.testgen_all",
+ Tag: "varint,63015,opt,name=testgen_all,json=testgenAll",
+}
+
+var E_BenchgenAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63016,
+ Name: "gogoproto.benchgen_all",
+ Tag: "varint,63016,opt,name=benchgen_all,json=benchgenAll",
+}
+
+var E_MarshalerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63017,
+ Name: "gogoproto.marshaler_all",
+ Tag: "varint,63017,opt,name=marshaler_all,json=marshalerAll",
+}
+
+var E_UnmarshalerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63018,
+ Name: "gogoproto.unmarshaler_all",
+ Tag: "varint,63018,opt,name=unmarshaler_all,json=unmarshalerAll",
+}
+
+var E_StableMarshalerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63019,
+ Name: "gogoproto.stable_marshaler_all",
+ Tag: "varint,63019,opt,name=stable_marshaler_all,json=stableMarshalerAll",
+}
+
+var E_SizerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63020,
+ Name: "gogoproto.sizer_all",
+ Tag: "varint,63020,opt,name=sizer_all,json=sizerAll",
+}
+
+var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63021,
+ Name: "gogoproto.goproto_enum_stringer_all",
+ Tag: "varint,63021,opt,name=goproto_enum_stringer_all,json=goprotoEnumStringerAll",
+}
+
+var E_EnumStringerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63022,
+ Name: "gogoproto.enum_stringer_all",
+ Tag: "varint,63022,opt,name=enum_stringer_all,json=enumStringerAll",
+}
+
+var E_UnsafeMarshalerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63023,
+ Name: "gogoproto.unsafe_marshaler_all",
+ Tag: "varint,63023,opt,name=unsafe_marshaler_all,json=unsafeMarshalerAll",
+}
+
+var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63024,
+ Name: "gogoproto.unsafe_unmarshaler_all",
+ Tag: "varint,63024,opt,name=unsafe_unmarshaler_all,json=unsafeUnmarshalerAll",
+}
+
+var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63025,
+ Name: "gogoproto.goproto_extensions_map_all",
+ Tag: "varint,63025,opt,name=goproto_extensions_map_all,json=goprotoExtensionsMapAll",
+}
+
+var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63026,
+ Name: "gogoproto.goproto_unrecognized_all",
+ Tag: "varint,63026,opt,name=goproto_unrecognized_all,json=goprotoUnrecognizedAll",
+}
+
+var E_GogoprotoImport = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63027,
+ Name: "gogoproto.gogoproto_import",
+ Tag: "varint,63027,opt,name=gogoproto_import,json=gogoprotoImport",
+}
+
+var E_ProtosizerAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63028,
+ Name: "gogoproto.protosizer_all",
+ Tag: "varint,63028,opt,name=protosizer_all,json=protosizerAll",
+}
+
+var E_CompareAll = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FileOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 63029,
+ Name: "gogoproto.compare_all",
+ Tag: "varint,63029,opt,name=compare_all,json=compareAll",
+}
+
+var E_GoprotoGetters = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64001,
+ Name: "gogoproto.goproto_getters",
+ Tag: "varint,64001,opt,name=goproto_getters,json=goprotoGetters",
+}
+
+var E_GoprotoStringer = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64003,
+ Name: "gogoproto.goproto_stringer",
+ Tag: "varint,64003,opt,name=goproto_stringer,json=goprotoStringer",
+}
+
+var E_VerboseEqual = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64004,
+ Name: "gogoproto.verbose_equal",
+ Tag: "varint,64004,opt,name=verbose_equal,json=verboseEqual",
+}
+
+var E_Face = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64005,
+ Name: "gogoproto.face",
+ Tag: "varint,64005,opt,name=face",
+}
+
+var E_Gostring = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64006,
+ Name: "gogoproto.gostring",
+ Tag: "varint,64006,opt,name=gostring",
+}
+
+var E_Populate = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64007,
+ Name: "gogoproto.populate",
+ Tag: "varint,64007,opt,name=populate",
+}
+
+var E_Stringer = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 67008,
+ Name: "gogoproto.stringer",
+ Tag: "varint,67008,opt,name=stringer",
+}
+
+var E_Onlyone = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64009,
+ Name: "gogoproto.onlyone",
+ Tag: "varint,64009,opt,name=onlyone",
+}
+
+var E_Equal = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64013,
+ Name: "gogoproto.equal",
+ Tag: "varint,64013,opt,name=equal",
+}
+
+var E_Description = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64014,
+ Name: "gogoproto.description",
+ Tag: "varint,64014,opt,name=description",
+}
+
+var E_Testgen = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64015,
+ Name: "gogoproto.testgen",
+ Tag: "varint,64015,opt,name=testgen",
+}
+
+var E_Benchgen = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64016,
+ Name: "gogoproto.benchgen",
+ Tag: "varint,64016,opt,name=benchgen",
+}
+
+var E_Marshaler = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64017,
+ Name: "gogoproto.marshaler",
+ Tag: "varint,64017,opt,name=marshaler",
+}
+
+var E_Unmarshaler = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64018,
+ Name: "gogoproto.unmarshaler",
+ Tag: "varint,64018,opt,name=unmarshaler",
+}
+
+var E_StableMarshaler = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64019,
+ Name: "gogoproto.stable_marshaler",
+ Tag: "varint,64019,opt,name=stable_marshaler,json=stableMarshaler",
+}
+
+var E_Sizer = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64020,
+ Name: "gogoproto.sizer",
+ Tag: "varint,64020,opt,name=sizer",
+}
+
+var E_UnsafeMarshaler = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64023,
+ Name: "gogoproto.unsafe_marshaler",
+ Tag: "varint,64023,opt,name=unsafe_marshaler,json=unsafeMarshaler",
+}
+
+var E_UnsafeUnmarshaler = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64024,
+ Name: "gogoproto.unsafe_unmarshaler",
+ Tag: "varint,64024,opt,name=unsafe_unmarshaler,json=unsafeUnmarshaler",
+}
+
+var E_GoprotoExtensionsMap = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64025,
+ Name: "gogoproto.goproto_extensions_map",
+ Tag: "varint,64025,opt,name=goproto_extensions_map,json=goprotoExtensionsMap",
+}
+
+var E_GoprotoUnrecognized = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64026,
+ Name: "gogoproto.goproto_unrecognized",
+ Tag: "varint,64026,opt,name=goproto_unrecognized,json=goprotoUnrecognized",
+}
+
+var E_Protosizer = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64028,
+ Name: "gogoproto.protosizer",
+ Tag: "varint,64028,opt,name=protosizer",
+}
+
+var E_Compare = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.MessageOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 64029,
+ Name: "gogoproto.compare",
+ Tag: "varint,64029,opt,name=compare",
+}
+
+var E_Nullable = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 65001,
+ Name: "gogoproto.nullable",
+ Tag: "varint,65001,opt,name=nullable",
+}
+
+var E_Embed = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 65002,
+ Name: "gogoproto.embed",
+ Tag: "varint,65002,opt,name=embed",
+}
+
+var E_Customtype = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 65003,
+ Name: "gogoproto.customtype",
+ Tag: "bytes,65003,opt,name=customtype",
+}
+
+var E_Customname = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 65004,
+ Name: "gogoproto.customname",
+ Tag: "bytes,65004,opt,name=customname",
+}
+
+var E_Jsontag = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 65005,
+ Name: "gogoproto.jsontag",
+ Tag: "bytes,65005,opt,name=jsontag",
+}
+
+var E_Moretags = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 65006,
+ Name: "gogoproto.moretags",
+ Tag: "bytes,65006,opt,name=moretags",
+}
+
+var E_Casttype = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 65007,
+ Name: "gogoproto.casttype",
+ Tag: "bytes,65007,opt,name=casttype",
+}
+
+var E_Castkey = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 65008,
+ Name: "gogoproto.castkey",
+ Tag: "bytes,65008,opt,name=castkey",
+}
+
+var E_Castvalue = &proto.ExtensionDesc{
+ ExtendedType: (*google_protobuf.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 65009,
+ Name: "gogoproto.castvalue",
+ Tag: "bytes,65009,opt,name=castvalue",
+}
+
+func init() {
+ proto.RegisterExtension(E_GoprotoEnumPrefix)
+ proto.RegisterExtension(E_GoprotoEnumStringer)
+ proto.RegisterExtension(E_EnumStringer)
+ proto.RegisterExtension(E_EnumCustomname)
+ proto.RegisterExtension(E_EnumvalueCustomname)
+ proto.RegisterExtension(E_GoprotoGettersAll)
+ proto.RegisterExtension(E_GoprotoEnumPrefixAll)
+ proto.RegisterExtension(E_GoprotoStringerAll)
+ proto.RegisterExtension(E_VerboseEqualAll)
+ proto.RegisterExtension(E_FaceAll)
+ proto.RegisterExtension(E_GostringAll)
+ proto.RegisterExtension(E_PopulateAll)
+ proto.RegisterExtension(E_StringerAll)
+ proto.RegisterExtension(E_OnlyoneAll)
+ proto.RegisterExtension(E_EqualAll)
+ proto.RegisterExtension(E_DescriptionAll)
+ proto.RegisterExtension(E_TestgenAll)
+ proto.RegisterExtension(E_BenchgenAll)
+ proto.RegisterExtension(E_MarshalerAll)
+ proto.RegisterExtension(E_UnmarshalerAll)
+ proto.RegisterExtension(E_StableMarshalerAll)
+ proto.RegisterExtension(E_SizerAll)
+ proto.RegisterExtension(E_GoprotoEnumStringerAll)
+ proto.RegisterExtension(E_EnumStringerAll)
+ proto.RegisterExtension(E_UnsafeMarshalerAll)
+ proto.RegisterExtension(E_UnsafeUnmarshalerAll)
+ proto.RegisterExtension(E_GoprotoExtensionsMapAll)
+ proto.RegisterExtension(E_GoprotoUnrecognizedAll)
+ proto.RegisterExtension(E_GogoprotoImport)
+ proto.RegisterExtension(E_ProtosizerAll)
+ proto.RegisterExtension(E_CompareAll)
+ proto.RegisterExtension(E_GoprotoGetters)
+ proto.RegisterExtension(E_GoprotoStringer)
+ proto.RegisterExtension(E_VerboseEqual)
+ proto.RegisterExtension(E_Face)
+ proto.RegisterExtension(E_Gostring)
+ proto.RegisterExtension(E_Populate)
+ proto.RegisterExtension(E_Stringer)
+ proto.RegisterExtension(E_Onlyone)
+ proto.RegisterExtension(E_Equal)
+ proto.RegisterExtension(E_Description)
+ proto.RegisterExtension(E_Testgen)
+ proto.RegisterExtension(E_Benchgen)
+ proto.RegisterExtension(E_Marshaler)
+ proto.RegisterExtension(E_Unmarshaler)
+ proto.RegisterExtension(E_StableMarshaler)
+ proto.RegisterExtension(E_Sizer)
+ proto.RegisterExtension(E_UnsafeMarshaler)
+ proto.RegisterExtension(E_UnsafeUnmarshaler)
+ proto.RegisterExtension(E_GoprotoExtensionsMap)
+ proto.RegisterExtension(E_GoprotoUnrecognized)
+ proto.RegisterExtension(E_Protosizer)
+ proto.RegisterExtension(E_Compare)
+ proto.RegisterExtension(E_Nullable)
+ proto.RegisterExtension(E_Embed)
+ proto.RegisterExtension(E_Customtype)
+ proto.RegisterExtension(E_Customname)
+ proto.RegisterExtension(E_Jsontag)
+ proto.RegisterExtension(E_Moretags)
+ proto.RegisterExtension(E_Casttype)
+ proto.RegisterExtension(E_Castkey)
+ proto.RegisterExtension(E_Castvalue)
+}
+
+func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) }
+
+var fileDescriptorGogo = []byte{
+ // 1098 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x97, 0xc9, 0x6f, 0x1c, 0x45,
+ 0x14, 0x87, 0x85, 0x70, 0xe4, 0x99, 0xe7, 0x0d, 0x8f, 0x8d, 0x09, 0x11, 0x88, 0xe4, 0xc6, 0xc9,
+ 0x39, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0xa3, 0x20, 0x0c, 0x23, 0x13, 0x07, 0x10, 0x87,
+ 0x51, 0xcf, 0xb8, 0xdc, 0x19, 0xe8, 0xee, 0x6a, 0xba, 0xba, 0xa3, 0x38, 0x37, 0x14, 0x16, 0x21,
+ 0xc4, 0x8e, 0x04, 0x09, 0x09, 0xcb, 0x81, 0x7d, 0x0d, 0xcb, 0x9d, 0x0b, 0x70, 0xe6, 0x7f, 0xe0,
+ 0x02, 0x98, 0x4d, 0xf2, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x1e, 0x8f, 0x54, 0x35, 0xb7,
+ 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xbf, 0x69, 0x00, 0x5f, 0xf9, 0x6a, 0x31, 0x4e,
+ 0x54, 0xaa, 0x1a, 0x75, 0xbc, 0xce, 0x2f, 0x8f, 0x1c, 0xf5, 0x95, 0xf2, 0x03, 0x79, 0x3c, 0xff,
+ 0xab, 0x93, 0x6d, 0x1f, 0xdf, 0x92, 0xba, 0x9b, 0xf4, 0xe2, 0x54, 0x25, 0xc5, 0x62, 0xf1, 0x20,
+ 0xcc, 0xd1, 0xe2, 0xb6, 0x8c, 0xb2, 0xb0, 0x1d, 0x27, 0x72, 0xbb, 0x77, 0xa9, 0x71, 0xd7, 0x62,
+ 0x41, 0x2e, 0x32, 0xb9, 0xb8, 0x16, 0x65, 0xe1, 0x43, 0x71, 0xda, 0x53, 0x91, 0x3e, 0x7c, 0xf3,
+ 0xb7, 0x5b, 0x8f, 0xde, 0x72, 0x6f, 0x6d, 0x63, 0x96, 0x50, 0xfc, 0x5f, 0x2b, 0x07, 0xc5, 0x06,
+ 0xdc, 0x5e, 0xf1, 0xe9, 0x34, 0xe9, 0x45, 0xbe, 0x4c, 0x2c, 0xc6, 0x9f, 0xc8, 0x38, 0x67, 0x18,
+ 0x1f, 0x26, 0x54, 0xac, 0xc2, 0xd4, 0x28, 0xae, 0x9f, 0xc9, 0x35, 0x29, 0x4d, 0x49, 0x13, 0x66,
+ 0x72, 0x49, 0x37, 0xd3, 0xa9, 0x0a, 0x23, 0x2f, 0x94, 0x16, 0xcd, 0x2f, 0xb9, 0xa6, 0xbe, 0x31,
+ 0x8d, 0xd8, 0x6a, 0x49, 0x89, 0xf3, 0x30, 0x8f, 0x9f, 0x5c, 0xf4, 0x82, 0x4c, 0x9a, 0xb6, 0x63,
+ 0x43, 0x6d, 0xe7, 0x71, 0x19, 0x2b, 0x7f, 0xbd, 0x32, 0x96, 0x2b, 0xe7, 0x4a, 0x81, 0xe1, 0x35,
+ 0x3a, 0xe1, 0xcb, 0x34, 0x95, 0x89, 0x6e, 0x7b, 0x41, 0x30, 0x64, 0x93, 0x67, 0x7a, 0x41, 0x69,
+ 0xbc, 0xba, 0x5b, 0xed, 0x44, 0xb3, 0x20, 0x57, 0x82, 0x40, 0x6c, 0xc2, 0x1d, 0x43, 0x3a, 0xeb,
+ 0xe0, 0xbc, 0x46, 0xce, 0xf9, 0x03, 0xdd, 0x45, 0x6d, 0x0b, 0xf8, 0xf3, 0xb2, 0x1f, 0x0e, 0xce,
+ 0x77, 0xc8, 0xd9, 0x20, 0x96, 0xdb, 0x82, 0xc6, 0xfb, 0x61, 0xf6, 0xa2, 0x4c, 0x3a, 0x4a, 0xcb,
+ 0xb6, 0x7c, 0x2a, 0xf3, 0x02, 0x07, 0xdd, 0x75, 0xd2, 0xcd, 0x10, 0xb8, 0x86, 0x1c, 0xba, 0x4e,
+ 0x42, 0x6d, 0xdb, 0xeb, 0x4a, 0x07, 0xc5, 0x0d, 0x52, 0x8c, 0xe3, 0x7a, 0x44, 0x57, 0x60, 0xd2,
+ 0x57, 0xc5, 0x2d, 0x39, 0xe0, 0xef, 0x12, 0x3e, 0xc1, 0x0c, 0x29, 0x62, 0x15, 0x67, 0x81, 0x97,
+ 0xba, 0xec, 0xe0, 0x3d, 0x56, 0x30, 0x43, 0x8a, 0x11, 0xca, 0xfa, 0x3e, 0x2b, 0xb4, 0x51, 0xcf,
+ 0x65, 0x98, 0x50, 0x51, 0xb0, 0xa3, 0x22, 0x97, 0x4d, 0x7c, 0x40, 0x06, 0x20, 0x04, 0x05, 0x4b,
+ 0x50, 0x77, 0x6d, 0xc4, 0x87, 0x84, 0xd7, 0x24, 0x77, 0xa0, 0x09, 0x33, 0x3c, 0x64, 0x7a, 0x2a,
+ 0x72, 0x50, 0x7c, 0x44, 0x8a, 0x69, 0x03, 0xa3, 0xdb, 0x48, 0xa5, 0x4e, 0x7d, 0xe9, 0x22, 0xf9,
+ 0x98, 0x6f, 0x83, 0x10, 0x2a, 0x65, 0x47, 0x46, 0xdd, 0x0b, 0x6e, 0x86, 0x4f, 0xb8, 0x94, 0xcc,
+ 0xa0, 0x62, 0x15, 0xa6, 0x42, 0x2f, 0xd1, 0x17, 0xbc, 0xc0, 0xa9, 0x1d, 0x9f, 0x92, 0x63, 0xb2,
+ 0x84, 0xa8, 0x22, 0x59, 0x34, 0x8a, 0xe6, 0x33, 0xae, 0x88, 0x81, 0xd1, 0xd1, 0xd3, 0xa9, 0xd7,
+ 0x09, 0x64, 0x7b, 0x14, 0xdb, 0xe7, 0x7c, 0xf4, 0x0a, 0x76, 0xdd, 0x34, 0x2e, 0x41, 0x5d, 0xf7,
+ 0x2e, 0x3b, 0x69, 0xbe, 0xe0, 0x4e, 0xe7, 0x00, 0xc2, 0x8f, 0xc1, 0x9d, 0x43, 0x47, 0xbd, 0x83,
+ 0xec, 0x4b, 0x92, 0x2d, 0x0c, 0x19, 0xf7, 0x34, 0x12, 0x46, 0x55, 0x7e, 0xc5, 0x23, 0x41, 0x0e,
+ 0xb8, 0x5a, 0x30, 0x9f, 0x45, 0xda, 0xdb, 0x1e, 0xad, 0x6a, 0x5f, 0x73, 0xd5, 0x0a, 0xb6, 0x52,
+ 0xb5, 0x73, 0xb0, 0x40, 0xc6, 0xd1, 0xfa, 0xfa, 0x0d, 0x0f, 0xd6, 0x82, 0xde, 0xac, 0x76, 0xf7,
+ 0x71, 0x38, 0x52, 0x96, 0xf3, 0x52, 0x2a, 0x23, 0x8d, 0x4c, 0x3b, 0xf4, 0x62, 0x07, 0xf3, 0x4d,
+ 0x32, 0xf3, 0xc4, 0x5f, 0x2b, 0x05, 0xeb, 0x5e, 0x8c, 0xf2, 0x47, 0xe1, 0x30, 0xcb, 0xb3, 0x28,
+ 0x91, 0x5d, 0xe5, 0x47, 0xbd, 0xcb, 0x72, 0xcb, 0x41, 0xfd, 0xed, 0x40, 0xab, 0x36, 0x0d, 0x1c,
+ 0xcd, 0x67, 0xe1, 0xb6, 0xf2, 0xf7, 0x46, 0xbb, 0x17, 0xc6, 0x2a, 0x49, 0x2d, 0xc6, 0xef, 0xb8,
+ 0x53, 0x25, 0x77, 0x36, 0xc7, 0xc4, 0x1a, 0x4c, 0xe7, 0x7f, 0xba, 0x3e, 0x92, 0xdf, 0x93, 0x68,
+ 0xaa, 0x4f, 0xd1, 0xe0, 0xe8, 0xaa, 0x30, 0xf6, 0x12, 0x97, 0xf9, 0xf7, 0x03, 0x0f, 0x0e, 0x42,
+ 0x8a, 0xa7, 0x6f, 0x66, 0x20, 0x89, 0x1b, 0xf7, 0x1c, 0x90, 0xac, 0x4b, 0xad, 0x3d, 0xbf, 0xf4,
+ 0x3c, 0xbd, 0x47, 0x67, 0xb6, 0x1a, 0xc4, 0xe2, 0x01, 0x2c, 0x4f, 0x35, 0x2e, 0xed, 0xb2, 0x2b,
+ 0x7b, 0x65, 0x85, 0x2a, 0x69, 0x29, 0xce, 0xc0, 0x54, 0x25, 0x2a, 0xed, 0xaa, 0x67, 0x48, 0x35,
+ 0x69, 0x26, 0xa5, 0x38, 0x01, 0x63, 0x18, 0x7b, 0x76, 0xfc, 0x59, 0xc2, 0xf3, 0xe5, 0xe2, 0x14,
+ 0xd4, 0x38, 0xee, 0xec, 0xe8, 0x73, 0x84, 0x96, 0x08, 0xe2, 0x1c, 0x75, 0x76, 0xfc, 0x79, 0xc6,
+ 0x19, 0x41, 0xdc, 0xbd, 0x84, 0x3f, 0xbe, 0x38, 0x46, 0xe3, 0x8a, 0x6b, 0xb7, 0x04, 0xe3, 0x94,
+ 0x71, 0x76, 0xfa, 0x05, 0xfa, 0x72, 0x26, 0xc4, 0x7d, 0x70, 0xc8, 0xb1, 0xe0, 0x2f, 0x11, 0x5a,
+ 0xac, 0x17, 0xab, 0x30, 0x61, 0xe4, 0x9a, 0x1d, 0x7f, 0x99, 0x70, 0x93, 0xc2, 0xad, 0x53, 0xae,
+ 0xd9, 0x05, 0xaf, 0xf0, 0xd6, 0x89, 0xc0, 0xb2, 0x71, 0xa4, 0xd9, 0xe9, 0x57, 0xb9, 0xea, 0x8c,
+ 0x88, 0x65, 0xa8, 0x97, 0x63, 0xca, 0xce, 0xbf, 0x46, 0x7c, 0x9f, 0xc1, 0x0a, 0x18, 0x63, 0xd2,
+ 0xae, 0x78, 0x9d, 0x2b, 0x60, 0x50, 0x78, 0x8c, 0x06, 0xa3, 0xcf, 0x6e, 0x7a, 0x83, 0x8f, 0xd1,
+ 0x40, 0xf2, 0x61, 0x37, 0xf3, 0x69, 0x61, 0x57, 0xbc, 0xc9, 0xdd, 0xcc, 0xd7, 0xe3, 0x36, 0x06,
+ 0xb3, 0xc4, 0xee, 0x78, 0x8b, 0xb7, 0x31, 0x10, 0x25, 0xa2, 0x05, 0x8d, 0x83, 0x39, 0x62, 0xf7,
+ 0xbd, 0x4d, 0xbe, 0xd9, 0x03, 0x31, 0x22, 0x1e, 0x81, 0x85, 0xe1, 0x19, 0x62, 0xb7, 0x5e, 0xdd,
+ 0x1b, 0xf8, 0xd5, 0x6f, 0x46, 0x88, 0x38, 0xd7, 0xff, 0xd5, 0x6f, 0xe6, 0x87, 0x5d, 0x7b, 0x6d,
+ 0xaf, 0xfa, 0x62, 0x67, 0xc6, 0x87, 0x58, 0x01, 0xe8, 0x8f, 0x6e, 0xbb, 0xeb, 0x3a, 0xb9, 0x0c,
+ 0x08, 0x8f, 0x06, 0x4d, 0x6e, 0x3b, 0x7f, 0x83, 0x8f, 0x06, 0x11, 0x62, 0x09, 0x6a, 0x51, 0x16,
+ 0x04, 0xf8, 0x70, 0x34, 0xee, 0x1e, 0x12, 0x13, 0x32, 0xd8, 0x62, 0xf6, 0xf7, 0x7d, 0x3a, 0x18,
+ 0x0c, 0x88, 0x13, 0x70, 0x48, 0x86, 0x1d, 0xb9, 0x65, 0x23, 0xff, 0xd8, 0xe7, 0x81, 0x80, 0xab,
+ 0xc5, 0x32, 0x40, 0xf1, 0xd2, 0x98, 0xee, 0xc4, 0xd6, 0x6f, 0xfd, 0x73, 0xbf, 0x78, 0x07, 0x35,
+ 0x90, 0xbe, 0x20, 0x7f, 0xeb, 0xb4, 0x08, 0x76, 0xab, 0x82, 0xfc, 0x45, 0xf3, 0x24, 0x8c, 0x3f,
+ 0xa1, 0x55, 0x94, 0x7a, 0xbe, 0x8d, 0xfe, 0x8b, 0x68, 0x5e, 0x8f, 0x05, 0x0b, 0x55, 0x22, 0x53,
+ 0xcf, 0xd7, 0x36, 0xf6, 0x6f, 0x62, 0x4b, 0x00, 0xe1, 0xae, 0xa7, 0x53, 0x97, 0xfb, 0xfe, 0x87,
+ 0x61, 0x06, 0x70, 0xd3, 0x78, 0xfd, 0xa4, 0xdc, 0xb1, 0xb1, 0xff, 0xf2, 0xa6, 0x69, 0xbd, 0x38,
+ 0x05, 0x75, 0xbc, 0xcc, 0xdf, 0xb7, 0x6d, 0xf0, 0x7f, 0x04, 0xf7, 0x89, 0xd3, 0xc7, 0x60, 0xae,
+ 0xab, 0xc2, 0x41, 0xec, 0x34, 0x34, 0x55, 0x53, 0xb5, 0xf2, 0x07, 0xf1, 0xff, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0x87, 0x5c, 0xee, 0x2b, 0x7e, 0x11, 0x00, 0x00,
+}
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto
new file mode 100644
index 000000000..18a58c5d4
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto
@@ -0,0 +1,122 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+package gogoproto;
+
+import "google/protobuf/descriptor.proto";
+
+option java_package = "com.google.protobuf";
+option java_outer_classname = "GoGoProtos";
+
+extend google.protobuf.EnumOptions {
+ optional bool goproto_enum_prefix = 62001;
+ optional bool goproto_enum_stringer = 62021;
+ optional bool enum_stringer = 62022;
+ optional string enum_customname = 62023;
+}
+
+extend google.protobuf.EnumValueOptions {
+ optional string enumvalue_customname = 66001;
+}
+
+extend google.protobuf.FileOptions {
+ optional bool goproto_getters_all = 63001;
+ optional bool goproto_enum_prefix_all = 63002;
+ optional bool goproto_stringer_all = 63003;
+ optional bool verbose_equal_all = 63004;
+ optional bool face_all = 63005;
+ optional bool gostring_all = 63006;
+ optional bool populate_all = 63007;
+ optional bool stringer_all = 63008;
+ optional bool onlyone_all = 63009;
+
+ optional bool equal_all = 63013;
+ optional bool description_all = 63014;
+ optional bool testgen_all = 63015;
+ optional bool benchgen_all = 63016;
+ optional bool marshaler_all = 63017;
+ optional bool unmarshaler_all = 63018;
+ optional bool stable_marshaler_all = 63019;
+
+ optional bool sizer_all = 63020;
+
+ optional bool goproto_enum_stringer_all = 63021;
+ optional bool enum_stringer_all = 63022;
+
+ optional bool unsafe_marshaler_all = 63023;
+ optional bool unsafe_unmarshaler_all = 63024;
+
+ optional bool goproto_extensions_map_all = 63025;
+ optional bool goproto_unrecognized_all = 63026;
+ optional bool gogoproto_import = 63027;
+ optional bool protosizer_all = 63028;
+ optional bool compare_all = 63029;
+}
+
+extend google.protobuf.MessageOptions {
+ optional bool goproto_getters = 64001;
+ optional bool goproto_stringer = 64003;
+ optional bool verbose_equal = 64004;
+ optional bool face = 64005;
+ optional bool gostring = 64006;
+ optional bool populate = 64007;
+ optional bool stringer = 67008;
+ optional bool onlyone = 64009;
+
+ optional bool equal = 64013;
+ optional bool description = 64014;
+ optional bool testgen = 64015;
+ optional bool benchgen = 64016;
+ optional bool marshaler = 64017;
+ optional bool unmarshaler = 64018;
+ optional bool stable_marshaler = 64019;
+
+ optional bool sizer = 64020;
+
+ optional bool unsafe_marshaler = 64023;
+ optional bool unsafe_unmarshaler = 64024;
+
+ optional bool goproto_extensions_map = 64025;
+ optional bool goproto_unrecognized = 64026;
+
+ optional bool protosizer = 64028;
+ optional bool compare = 64029;
+}
+
+extend google.protobuf.FieldOptions {
+ optional bool nullable = 65001;
+ optional bool embed = 65002;
+ optional string customtype = 65003;
+ optional string customname = 65004;
+ optional string jsontag = 65005;
+ optional string moretags = 65006;
+ optional string casttype = 65007;
+ optional string castkey = 65008;
+ optional string castvalue = 65009;
+}
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go
new file mode 100644
index 000000000..670021fed
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go
@@ -0,0 +1,310 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gogoproto
+
+import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+import proto "github.com/gogo/protobuf/proto"
+
+func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool {
+ return proto.GetBoolExtension(field.Options, E_Embed, false)
+}
+
+func IsNullable(field *google_protobuf.FieldDescriptorProto) bool {
+ return proto.GetBoolExtension(field.Options, E_Nullable, true)
+}
+
+func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool {
+ nullable := IsNullable(field)
+ if field.IsMessage() || IsCustomType(field) {
+ return nullable
+ }
+ if proto3 {
+ return false
+ }
+ return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES
+}
+
+func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool {
+ typ := GetCustomType(field)
+ if len(typ) > 0 {
+ return true
+ }
+ return false
+}
+
+func IsCastType(field *google_protobuf.FieldDescriptorProto) bool {
+ typ := GetCastType(field)
+ if len(typ) > 0 {
+ return true
+ }
+ return false
+}
+
+func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool {
+ typ := GetCastKey(field)
+ if len(typ) > 0 {
+ return true
+ }
+ return false
+}
+
+func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool {
+ typ := GetCastValue(field)
+ if len(typ) > 0 {
+ return true
+ }
+ return false
+}
+
+func GetCustomType(field *google_protobuf.FieldDescriptorProto) string {
+ if field.Options != nil {
+ v, err := proto.GetExtension(field.Options, E_Customtype)
+ if err == nil && v.(*string) != nil {
+ return *(v.(*string))
+ }
+ }
+ return ""
+}
+
+func GetCastType(field *google_protobuf.FieldDescriptorProto) string {
+ if field.Options != nil {
+ v, err := proto.GetExtension(field.Options, E_Casttype)
+ if err == nil && v.(*string) != nil {
+ return *(v.(*string))
+ }
+ }
+ return ""
+}
+
+func GetCastKey(field *google_protobuf.FieldDescriptorProto) string {
+ if field.Options != nil {
+ v, err := proto.GetExtension(field.Options, E_Castkey)
+ if err == nil && v.(*string) != nil {
+ return *(v.(*string))
+ }
+ }
+ return ""
+}
+
+func GetCastValue(field *google_protobuf.FieldDescriptorProto) string {
+ if field.Options != nil {
+ v, err := proto.GetExtension(field.Options, E_Castvalue)
+ if err == nil && v.(*string) != nil {
+ return *(v.(*string))
+ }
+ }
+ return ""
+}
+
+func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool {
+ name := GetCustomName(field)
+ if len(name) > 0 {
+ return true
+ }
+ return false
+}
+
+func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool {
+ name := GetEnumCustomName(field)
+ if len(name) > 0 {
+ return true
+ }
+ return false
+}
+
+func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool {
+ name := GetEnumValueCustomName(field)
+ if len(name) > 0 {
+ return true
+ }
+ return false
+}
+
+func GetCustomName(field *google_protobuf.FieldDescriptorProto) string {
+ if field.Options != nil {
+ v, err := proto.GetExtension(field.Options, E_Customname)
+ if err == nil && v.(*string) != nil {
+ return *(v.(*string))
+ }
+ }
+ return ""
+}
+
+func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string {
+ if field.Options != nil {
+ v, err := proto.GetExtension(field.Options, E_EnumCustomname)
+ if err == nil && v.(*string) != nil {
+ return *(v.(*string))
+ }
+ }
+ return ""
+}
+
+func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string {
+ if field.Options != nil {
+ v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname)
+ if err == nil && v.(*string) != nil {
+ return *(v.(*string))
+ }
+ }
+ return ""
+}
+
+func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string {
+ if field.Options != nil {
+ v, err := proto.GetExtension(field.Options, E_Jsontag)
+ if err == nil && v.(*string) != nil {
+ return (v.(*string))
+ }
+ }
+ return nil
+}
+
+func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string {
+ if field.Options != nil {
+ v, err := proto.GetExtension(field.Options, E_Moretags)
+ if err == nil && v.(*string) != nil {
+ return (v.(*string))
+ }
+ }
+ return nil
+}
+
+type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool
+
+func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
+ return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true))
+}
+
+func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true))
+}
+
+func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true))
+}
+
+func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false))
+}
+
+func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false))
+}
+
+func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false))
+}
+
+func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false))
+}
+
+func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false))
+}
+
+func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false))
+}
+
+func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false))
+}
+
+func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false))
+}
+
+func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false))
+}
+
+func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false))
+}
+
+func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false))
+}
+
+func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false))
+}
+
+func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false))
+}
+
+func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false))
+}
+
+func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false))
+}
+
+func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
+ return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true))
+}
+
+func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
+ return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false))
+}
+
+func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false))
+}
+
+func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false))
+}
+
+func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true))
+}
+
+func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ if IsProto3(file) {
+ return false
+ }
+ return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true))
+}
+
+func IsProto3(file *google_protobuf.FileDescriptorProto) bool {
+ return file.GetSyntax() == "proto3"
+}
+
+func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool {
+ return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true)
+}
+
+func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+ return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false))
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go
new file mode 100644
index 000000000..5d4cba4b5
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/clone.go
@@ -0,0 +1,234 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+
+ out := reflect.New(in.Type().Elem())
+ // out is empty so a merge is a deep copy.
+ mergeStruct(out.Elem(), in.Elem())
+ return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ // Explicit test prior to mergeStruct so that mistyped nils will fail
+ panic("proto: type mismatch")
+ }
+ if in.IsNil() {
+ // Merging nil into non-nil is a quiet no-op
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, ok := in.Addr().Interface().(extensionsBytes); ok {
+ emOut := out.Addr().Interface().(extensionsBytes)
+ bIn := emIn.GetExtensions()
+ bOut := emOut.GetExtensions()
+ *bOut = append(*bOut, *bIn...)
+ } else if emIn, ok := extendable(in.Addr().Interface()); ok {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go
new file mode 100644
index 000000000..0d6634cc0
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/decode.go
@@ -0,0 +1,882 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ // x, n already 0
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ // x, err already 0
+
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+ oi := o.index
+
+ err := o.skip(t, tag, wire)
+ if err != nil {
+ return err
+ }
+
+ if !unrecField.IsValid() {
+ return nil
+ }
+
+ ptr := structPointer_Bytes(base, unrecField)
+
+ // Add the skipped field to struct field
+ obuf := o.buf
+
+ o.buf = *ptr
+ o.EncodeVarint(uint64(tag<<3 | wire))
+ *ptr = append(o.buf, obuf[oi:o.index]...)
+
+ o.buf = obuf
+
+ return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+ var u uint64
+ var err error
+
+ switch wire {
+ case WireVarint:
+ _, err = o.DecodeVarint()
+ case WireFixed64:
+ _, err = o.DecodeFixed64()
+ case WireBytes:
+ _, err = o.DecodeRawBytes(false)
+ case WireFixed32:
+ _, err = o.DecodeFixed32()
+ case WireStartGroup:
+ for {
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ fwire := int(u & 0x7)
+ if fwire == WireEndGroup {
+ break
+ }
+ ftag := int(u >> 3)
+ err = o.skip(t, ftag, fwire)
+ if err != nil {
+ break
+ }
+ }
+ default:
+ err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+ }
+ return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The method should reset the receiver before
+// decoding starts. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+ return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+
+ err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+ if collectStats {
+ stats.Decode++
+ }
+
+ return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+ var state errorState
+ required, reqFields := prop.reqCount, uint64(0)
+
+ var err error
+ for err == nil && o.index < len(o.buf) {
+ oi := o.index
+ var u uint64
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ wire := int(u & 0x7)
+ if wire == WireEndGroup {
+ if is_group {
+ if required > 0 {
+ // Not enough information to determine the exact field.
+ // (See below.)
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ return nil // input is satisfied
+ }
+ return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+ }
+ tag := int(u >> 3)
+ if tag <= 0 {
+ return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+ }
+ fieldnum, ok := prop.decoderTags.get(tag)
+ if !ok {
+ // Maybe it's an extension?
+ if prop.extendable {
+ if e, eok := structPointer_Interface(base, st).(extensionsBytes); eok {
+ if isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ ext := e.GetExtensions()
+ *ext = append(*ext, o.buf[oi:o.index]...)
+ }
+ continue
+ }
+ } else if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ extmap := e.extensionsWrite()
+ ext := extmap[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+ extmap[int32(tag)] = ext
+ }
+ continue
+ }
+ }
+ // Maybe it's a oneof?
+ if prop.oneofUnmarshaler != nil {
+ m := structPointer_Interface(base, st).(Message)
+ // First return value indicates whether tag is a oneof field.
+ ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+ if err == ErrInternalBadWireType {
+ // Map the error to something more descriptive.
+ // Do the formatting here to save generated code space.
+ err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+ }
+ if ok {
+ continue
+ }
+ }
+ err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+ continue
+ }
+ p := prop.Prop[fieldnum]
+
+ if p.dec == nil {
+ fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+ continue
+ }
+ dec := p.dec
+ if wire != WireStartGroup && wire != p.WireType {
+ if wire == WireBytes && p.packedDec != nil {
+ // a packable field
+ dec = p.packedDec
+ } else {
+ err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+ continue
+ }
+ }
+ decErr := dec(o, p, base)
+ if decErr != nil && !state.shouldContinue(decErr, p) {
+ err = decErr
+ }
+ if err == nil && p.Required {
+ // Successfully decoded a required field.
+ if tag <= 64 {
+ // use bitmap for fields 1-64 to catch field reuse.
+ var mask uint64 = 1 << uint64(tag-1)
+ if reqFields&mask == 0 {
+ // new required field
+ reqFields |= mask
+ required--
+ }
+ } else {
+ // This is imprecise. It can be fooled by a required field
+ // with a tag > 64 that is encoded twice; that's very rare.
+ // A fully correct implementation would require allocating
+ // a data structure, which we would like to avoid.
+ required--
+ }
+ }
+ }
+ if err == nil {
+ if is_group {
+ return io.ErrUnexpectedEOF
+ }
+ if state.err != nil {
+ return state.err
+ }
+ if required > 0 {
+ // Not enough information to determine the exact field. If we use extra
+ // CPU, we could determine the field only if the missing required field
+ // has a tag <= 64 and we check reqFields.
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ }
+ return err
+}
+
+// Individual type decoders
+// For each,
+// u is the decoded value,
+// v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+ boolPoolSize = 16
+ uint32PoolSize = 8
+ uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ if len(o.bools) == 0 {
+ o.bools = make([]bool, boolPoolSize)
+ }
+ o.bools[0] = u != 0
+ *structPointer_Bool(base, p.field) = &o.bools[0]
+ o.bools = o.bools[1:]
+ return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ *structPointer_BoolVal(base, p.field) = u != 0
+ return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+ return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64_Set(structPointer_Word64(base, p.field), o, u)
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+ return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_String(base, p.field) = &s
+ return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_StringVal(base, p.field) = s
+ return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ *structPointer_Bytes(base, p.field) = b
+ return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BoolSlice(base, p.field)
+ *v = append(*v, u != 0)
+ return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+ v := structPointer_BoolSlice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded bools
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+
+ y := *v
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ y = append(y, u != 0)
+ }
+
+ *v = y
+ return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ structPointer_Word32Slice(base, p.field).Append(uint32(u))
+ return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int32s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(uint32(u))
+ }
+ return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+
+ structPointer_Word64Slice(base, p.field).Append(u)
+ return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int64s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(u)
+ }
+ return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ v := structPointer_StringSlice(base, p.field)
+ *v = append(*v, s)
+ return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BytesSlice(base, p.field)
+ *v = append(*v, b)
+ return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ oi := o.index // index at the end of this map entry
+ o.index -= len(raw) // move buffer back to start of map entry
+
+ mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
+ if mptr.Elem().IsNil() {
+ mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+ }
+ v := mptr.Elem() // map[K]V
+
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // See enc_new_map for why.
+ keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+ keybase := toStructPointer(keyptr.Addr()) // **K
+
+ var valbase structPointer
+ var valptr reflect.Value
+ switch p.mtype.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valptr = reflect.ValueOf(&dummy) // *[]byte
+ valbase = toStructPointer(valptr) // *[]byte
+ case reflect.Ptr:
+ // message; valptr is **Msg; need to allocate the intermediate pointer
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valptr.Set(reflect.New(valptr.Type().Elem()))
+ valbase = toStructPointer(valptr)
+ default:
+ // everything else
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+
+ // Decode.
+ // This parses a restricted wire format, namely the encoding of a message
+ // with two fields. See enc_new_map for the format.
+ for o.index < oi {
+ // tagcode for key and value properties are always a single byte
+ // because they have tags 1 and 2.
+ tagcode := o.buf[o.index]
+ o.index++
+ switch tagcode {
+ case p.mkeyprop.tagcode[0]:
+ if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ case p.mvalprop.tagcode[0]:
+ if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ default:
+ // TODO: Should we silently skip this instead?
+ return fmt.Errorf("proto: bad map data tag %d", raw[0])
+ }
+ }
+ keyelem, valelem := keyptr.Elem(), valptr.Elem()
+ if !keyelem.IsValid() {
+ keyelem = reflect.Zero(p.mtype.Key())
+ }
+ if !valelem.IsValid() {
+ valelem = reflect.Zero(p.mtype.Elem())
+ }
+
+ v.SetMapIndex(keyelem, valelem)
+ return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+ return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := structPointer_Interface(bas, p.stype)
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+ v := reflect.New(p.stype)
+ bas := toStructPointer(v)
+ structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+ if is_group {
+ err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+ return err
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := v.Interface()
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go
new file mode 100644
index 000000000..ecc63873e
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go
@@ -0,0 +1,171 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "reflect"
+)
+
+// Decode a reference to a struct pointer.
+func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ panic("not supported, since this is a pointer receiver")
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ bas := structPointer_FieldPointer(base, p.field)
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of references to struct pointers ([]struct).
+func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error {
+ newBas := appendStructPointer(base, p.field, p.sstype)
+
+ if is_group {
+ panic("not supported, maybe in future, if requested.")
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ panic("not supported, since this is not a pointer receiver.")
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, newBas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of references to struct pointers.
+func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_ref_struct(p, false, base)
+}
+
+func setPtrCustomType(base structPointer, f field, v interface{}) {
+ if v == nil {
+ return
+ }
+ structPointer_SetStructPointer(base, f, structPointer(reflect.ValueOf(v).Pointer()))
+}
+
+func setCustomType(base structPointer, f field, value interface{}) {
+ if value == nil {
+ return
+ }
+ v := reflect.ValueOf(value).Elem()
+ t := reflect.TypeOf(value).Elem()
+ kind := t.Kind()
+ switch kind {
+ case reflect.Slice:
+ slice := reflect.MakeSlice(t, v.Len(), v.Cap())
+ reflect.Copy(slice, v)
+ oldHeader := structPointer_GetSliceHeader(base, f)
+ oldHeader.Data = slice.Pointer()
+ oldHeader.Len = v.Len()
+ oldHeader.Cap = v.Cap()
+ default:
+ size := reflect.TypeOf(value).Elem().Size()
+ structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), int(size))
+ }
+}
+
+func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ i := reflect.New(p.ctype.Elem()).Interface()
+ custom := (i).(Unmarshaler)
+ if err := custom.Unmarshal(b); err != nil {
+ return err
+ }
+ setPtrCustomType(base, p.field, custom)
+ return nil
+}
+
+func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ i := reflect.New(p.ctype).Interface()
+ custom := (i).(Unmarshaler)
+ if err := custom.Unmarshal(b); err != nil {
+ return err
+ }
+ if custom != nil {
+ setCustomType(base, p.field, custom)
+ }
+ return nil
+}
+
+// Decode a slice of bytes ([]byte) into a slice of custom types.
+func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ i := reflect.New(p.ctype.Elem()).Interface()
+ custom := (i).(Unmarshaler)
+ if err := custom.Unmarshal(b); err != nil {
+ return err
+ }
+ newBas := appendStructPointer(base, p.field, p.ctype)
+
+ setCustomType(newBas, 0, custom)
+
+ return nil
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go
new file mode 100644
index 000000000..8c1b8fd1f
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/encode.go
@@ -0,0 +1,1363 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+ field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // errOneofHasNil is the error returned if Marshal is called with
+ // a struct with a oneof field containing a nil element.
+ errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // ErrTooLarge is the error returned if Marshal is called with a
+ // message that encodes to >2GB.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// maxMarshalSize is the largest allowed size of an encoded protobuf,
+// since C++ and Java use signed int32s for the size.
+const maxMarshalSize = 1<<31 - 1
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ return sizeVarint(x)
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+func sizeFixed64(x uint64) int {
+ return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func sizeFixed32(x uint64) int {
+ return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func sizeZigzag64(x uint64) int {
+ return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+ return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+func sizeRawBytes(b []byte) int {
+ return sizeVarint(uint64(len(b))) +
+ len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+func sizeStringBytes(s string) int {
+ return sizeVarint(uint64(len(s))) +
+ len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ return m.Marshal()
+ }
+ p := NewBuffer(nil)
+ err := p.Marshal(pb)
+ var state errorState
+ if err != nil && !state.shouldContinue(err, nil) {
+ return nil, err
+ }
+ if p.buf == nil && err == nil {
+ // Return a non-nil slice on success.
+ return []byte{}, nil
+ }
+ return p.buf, err
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ var state errorState
+ err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+ }
+ return err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ data, err := m.Marshal()
+ if err != nil {
+ return err
+ }
+ p.buf = append(p.buf, data...)
+ return nil
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ err = p.enc_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Encode++
+ }
+
+ if len(p.buf) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+ // Can the object marshal itself? If so, Size is slow.
+ // TODO: add Size to Marshaler, or add a Sizer interface.
+ if m, ok := pb.(Marshaler); ok {
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return 0
+ }
+ if err == nil {
+ n = size_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Size++
+ }
+
+ return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := 0
+ if *v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, 1)
+ return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v && !p.oneof {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := word32_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := word32_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return ErrNil
+ }
+ x := word64_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return 0
+ }
+ x := word64_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := *v
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return 0
+ }
+ x := *v
+ n += len(p.tagcode)
+ n += sizeStringBytes(x)
+ return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return state.err
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return ErrNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ err := o.enc_struct(p.sprop, b)
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return 0
+ }
+
+ n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ n += size_struct(p.sprop, b)
+ n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ for _, x := range s {
+ o.buf = append(o.buf, p.tagcode...)
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+ for _, x := range s {
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(l))
+ n += l // each bool takes exactly one byte
+ return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(buf, uint64(x))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ bufSize += p.valSize(uint64(x))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := s.Index(i)
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := s.Index(i)
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, uint64(s.Index(i)))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(uint64(s.Index(i)))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, s.Index(i))
+ }
+ return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ n += p.valSize(s.Index(i))
+ }
+ return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, s.Index(i))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(s.Index(i))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return 0
+ }
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeRawBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeStringBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+ }
+ return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += len(p.tagcode)
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return errRepeatedHasNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+ err := o.enc_struct(p.sprop, b)
+
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ }
+ return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return // return size up to this point
+ }
+
+ n += size_struct(p.sprop, b)
+ }
+ return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+ exts := structPointer_ExtMap(base, p.field)
+ if err := encodeExtensionsMap(*exts); err != nil {
+ return err
+ }
+
+ return o.enc_map_body(*exts)
+}
+
+func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
+ exts := structPointer_Extensions(base, p.field)
+ if err := encodeExtensions(exts); err != nil {
+ return err
+ }
+ v, _ := exts.extensionsRead()
+
+ return o.enc_map_body(v)
+}
+
+func (o *Buffer) enc_map_body(v map[int32]Extension) error {
+ // Fast-path for common cases: zero or one extensions.
+ if len(v) <= 1 {
+ for _, e := range v {
+ o.buf = append(o.buf, e.enc...)
+ }
+ return nil
+ }
+
+ // Sort keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(v))
+ for k := range v {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ o.buf = append(o.buf, v[int32(k)].enc...)
+ }
+ return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+ v := structPointer_ExtMap(base, p.field)
+ return extensionsMapSize(*v)
+}
+
+func size_exts(p *Properties, base structPointer) int {
+ v := structPointer_Extensions(base, p.field)
+ return extensionsSize(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+ var state errorState // XXX: or do we need to plumb this through?
+
+ /*
+ A map defined as
+ map<key_type, value_type> map_field = N;
+ is encoded in the same way as
+ message MapFieldEntry {
+ key_type key = 1;
+ value_type value = 2;
+ }
+ repeated MapFieldEntry map_field = N;
+ */
+
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+ if v.Len() == 0 {
+ return nil
+ }
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ enc := func() error {
+ if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
+ return err
+ }
+ return nil
+ }
+
+ // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ o.buf = append(o.buf, p.tagcode...)
+ if err := o.enc_len_thing(enc, &state); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ n := 0
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ // Tag codes for key and val are the responsibility of the sub-sizer.
+ keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+ valsize := p.mvalprop.size(p.mvalprop, valbase)
+ entry := keysize + valsize
+ // Add on tag code and length of map entry itself.
+ n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+ }
+ return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+ keycopy = reflect.New(mapType.Key()).Elem() // addressable K
+ keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+ keyptr.Set(keycopy.Addr()) //
+ keybase = toStructPointer(keyptr.Addr()) // **K
+
+ // Value types are more varied and require special handling.
+ switch mapType.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+ valbase = toStructPointer(valcopy.Addr())
+ case reflect.Ptr:
+ // message; the generated field type is map[K]*Msg (so V is *Msg),
+ // so we only need one level of indirection.
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valbase = toStructPointer(valcopy.Addr())
+ default:
+ // everything else
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+ valptr.Set(valcopy.Addr()) //
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+ return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+ var state errorState
+ // Encode fields in tag order so that decoders may use optimizations
+ // that depend on the ordering.
+ // https://developers.google.com/protocol-buffers/docs/encoding#order
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.enc != nil {
+ err := p.enc(o, p, base)
+ if err != nil {
+ if err == ErrNil {
+ if p.Required && state.err == nil {
+ state.err = &RequiredNotSetError{p.Name}
+ }
+ } else if err == errRepeatedHasNil {
+ // Give more context to nil values in repeated fields.
+ return errors.New("repeated field " + p.OrigName + " has nil element")
+ } else if !state.shouldContinue(err, p) {
+ return err
+ }
+ }
+ if len(o.buf) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ }
+ }
+
+ // Do oneof fields.
+ if prop.oneofMarshaler != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ if err := prop.oneofMarshaler(m, o); err == ErrNil {
+ return errOneofHasNil
+ } else if err != nil {
+ return err
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ if len(o.buf)+len(v) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ if len(v) > 0 {
+ o.buf = append(o.buf, v...)
+ }
+ }
+
+ return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.size != nil {
+ n += p.size(p, base)
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ n += len(v)
+ }
+
+ // Factor in any oneof fields.
+ if prop.oneofSizer != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ n += prop.oneofSizer(m)
+ }
+
+ return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+ return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+ iLen := len(o.buf)
+ o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+ iMsg := len(o.buf)
+ err := enc()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ lMsg := len(o.buf) - iMsg
+ lLen := sizeVarint(uint64(lMsg))
+ switch x := lLen - (iMsg - iLen); {
+ case x > 0: // actual length is x bytes larger than the space we reserved
+ // Move msg x bytes right.
+ o.buf = append(o.buf, zeroes[:x]...)
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ case x < 0: // actual length is x bytes smaller than the space we reserved
+ // Move msg x bytes left.
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ o.buf = o.buf[:len(o.buf)+x] // x is negative
+ }
+ // Encode the length in the reserved space.
+ o.buf = o.buf[:iLen]
+ o.EncodeVarint(uint64(lMsg))
+ o.buf = o.buf[:len(o.buf)+lMsg]
+ return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+ err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+ // Ignore unset required fields.
+ reqNotSet, ok := err.(*RequiredNotSetError)
+ if !ok {
+ return false
+ }
+ if s.err == nil {
+ if prop != nil {
+ err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+ }
+ s.err = err
+ }
+ return true
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go
new file mode 100644
index 000000000..66e7e1630
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go
@@ -0,0 +1,354 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// http://github.com/golang/protobuf/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "reflect"
+)
+
+func NewRequiredNotSetError(field string) *RequiredNotSetError {
+ return &RequiredNotSetError{field}
+}
+
+type Sizer interface {
+ Size() int
+}
+
+func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, s...)
+ return nil
+}
+
+func size_ext_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return 0
+ }
+ n += len(s)
+ return
+}
+
+// Encode a reference to bool pointer.
+func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ x := 0
+ if v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_ref_bool(p *Properties, base structPointer) int {
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode a reference to int32 pointer.
+func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v))
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_ref_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v))
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_ref_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a reference to an int64 pointer.
+func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_ref_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a reference to a string pointer.
+func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_ref_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// Encode a reference to a message struct.
+func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetRefStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return nil
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+//TODO this is only copied, please fix this
+func size_ref_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetRefStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a slice of references to message struct pointers ([]struct).
+func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ ss := structPointer_GetStructPointer(base, p.field)
+ ss1 := structPointer_GetRefStructPointer(ss, field(0))
+ size := p.stype.Size()
+ l := structPointer_Len(base, p.field)
+ for i := 0; i < l; i++ {
+ structp := structPointer_Add(ss1, field(uintptr(i)*size))
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ }
+ return state.err
+}
+
+//TODO this is only copied, please fix this
+func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) {
+ ss := structPointer_GetStructPointer(base, p.field)
+ ss1 := structPointer_GetRefStructPointer(ss, field(0))
+ size := p.stype.Size()
+ l := structPointer_Len(base, p.field)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := structPointer_Add(ss1, field(uintptr(i)*size))
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += len(p.tagcode)
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error {
+ i := structPointer_InterfaceRef(base, p.field, p.ctype)
+ if i == nil {
+ return ErrNil
+ }
+ custom := i.(Marshaler)
+ data, err := custom.Marshal()
+ if err != nil {
+ return err
+ }
+ if data == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return nil
+}
+
+func size_custom_bytes(p *Properties, base structPointer) (n int) {
+ n += len(p.tagcode)
+ i := structPointer_InterfaceRef(base, p.field, p.ctype)
+ if i == nil {
+ return 0
+ }
+ custom := i.(Marshaler)
+ data, _ := custom.Marshal()
+ n += sizeRawBytes(data)
+ return
+}
+
+func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error {
+ custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler)
+ data, err := custom.Marshal()
+ if err != nil {
+ return err
+ }
+ if data == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return nil
+}
+
+func size_custom_ref_bytes(p *Properties, base structPointer) (n int) {
+ n += len(p.tagcode)
+ i := structPointer_InterfaceAt(base, p.field, p.ctype)
+ if i == nil {
+ return 0
+ }
+ custom := i.(Marshaler)
+ data, _ := custom.Marshal()
+ n += sizeRawBytes(data)
+ return
+}
+
+func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error {
+ inter := structPointer_InterfaceRef(base, p.field, p.ctype)
+ if inter == nil {
+ return ErrNil
+ }
+ slice := reflect.ValueOf(inter)
+ l := slice.Len()
+ for i := 0; i < l; i++ {
+ v := slice.Index(i)
+ custom := v.Interface().(Marshaler)
+ data, err := custom.Marshal()
+ if err != nil {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ }
+ return nil
+}
+
+func size_custom_slice_bytes(p *Properties, base structPointer) (n int) {
+ inter := structPointer_InterfaceRef(base, p.field, p.ctype)
+ if inter == nil {
+ return 0
+ }
+ slice := reflect.ValueOf(inter)
+ l := slice.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ v := slice.Index(i)
+ custom := v.Interface().(Marshaler)
+ data, _ := custom.Marshal()
+ n += sizeRawBytes(data)
+ }
+ return
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go
new file mode 100644
index 000000000..8b16f951c
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/equal.go
@@ -0,0 +1,296 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN. If the message is defined
+ in a proto3 .proto file, fields are not "set"; specifically,
+ zero length proto3 "bytes" fields are equal (nil == {}).
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal (a "bytes" field,
+ although represented by []byte, is not a repeated field)
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ sprop := GetProperties(v1.Type())
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ b1, ok := f1.Interface().(raw)
+ if ok {
+ b2 := f2.Interface().(raw)
+ // RawMessage
+ if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+ return false
+ }
+ continue
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2, sprop.Prop[i]) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_InternalExtensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ if !bytes.Equal(u1, u2) {
+ return false
+ }
+
+ return true
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2, nil)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2, nil) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ // Maps may have nil values in them, so check for nil.
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return equalAny(v1.Elem(), v2.Elem(), prop)
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value.
+ if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i), prop) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+ em1, _ := x1.extensionsRead()
+ em2, _ := x2.extensionsRead()
+ return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ continue
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go
new file mode 100644
index 000000000..f7384baa8
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/extensions.go
@@ -0,0 +1,689 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ extensionsWrite() map[int32]Extension
+ extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+type extensionsBytes interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ GetExtensions() *[]byte
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+ extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+ return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+ return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock() {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, bool) {
+ if ep, ok := p.(extendableProto); ok {
+ return ep, ok
+ }
+ if ep, ok := p.(extendableProtoV1); ok {
+ return extensionAdapter{ep}, ok
+ }
+ return nil, false
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+ // The struct must be indirect so that if a user inadvertently copies a
+ // generated message and its embedded XXX_InternalExtensions, they
+ // avoid the mayhem of a copied mutex.
+ //
+ // The mutex serializes all logically read-only operations to p.extensionMap.
+ // It is up to the client to ensure that write operations to p.extensionMap are
+ // mutually exclusive with other accesses.
+ p *struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+ if e.p == nil {
+ e.p = new(struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ })
+ e.p.extensionMap = make(map[int32]Extension)
+ }
+ return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use. It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+ if e.p == nil {
+ return nil, nil
+ }
+ return e.p.extensionMap, &e.p.mu
+}
+
+type extensionRange interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
+var extendableBytesType = reflect.TypeOf((*extensionsBytes)(nil)).Elem()
+var extensionRangeType = reflect.TypeOf((*extensionRange)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+ if ebase, ok := base.(extensionsBytes); ok {
+ clearExtension(base, id)
+ ext := ebase.GetExtensions()
+ *ext = append(*ext, b...)
+ return
+ }
+ epb, ok := extendable(base)
+ if !ok {
+ return
+ }
+ extmap := epb.extensionsWrite()
+ extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extensionRange, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ var pbi interface{} = pb
+ // Check the extended type.
+ if ea, ok := pbi.(extensionAdapter); ok {
+ pbi = ea.extendableProtoV1
+ }
+ if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+ return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensions(e *XXX_InternalExtensions) error {
+ m, mu := e.extensionsRead()
+ if m == nil {
+ return nil // fast path
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ return encodeExtensionsMap(m)
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensionsMap(m map[int32]Extension) error {
+ for k, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ p := NewBuffer(nil)
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ e.enc = p.buf
+ m[k] = e
+ }
+ return nil
+}
+
+func extensionsSize(e *XXX_InternalExtensions) (n int) {
+ m, mu := e.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ return extensionsMapSize(m)
+}
+
+func extensionsMapSize(m map[int32]Extension) (n int) {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ n += props.size(props, toStructPointer(x))
+ }
+ return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+ if epb, doki := pb.(extensionsBytes); doki {
+ ext := epb.GetExtensions()
+ buf := *ext
+ o := 0
+ for o < len(buf) {
+ tag, n := DecodeVarint(buf[o:])
+ fieldNum := int32(tag >> 3)
+ if int32(fieldNum) == extension.Field {
+ return true
+ }
+ wireType := int(tag & 0x7)
+ o += n
+ l, err := size(buf[o:], wireType)
+ if err != nil {
+ return false
+ }
+ o += l
+ }
+ return false
+ }
+ // TODO: Check types, field numbers, etc.?
+ epb, ok := extendable(pb)
+ if !ok {
+ return false
+ }
+ extmap, mu := epb.extensionsRead()
+ if extmap == nil {
+ return false
+ }
+ mu.Lock()
+ _, ok = extmap[extension.Field]
+ mu.Unlock()
+ return ok
+}
+
+func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
+ ext := pb.GetExtensions()
+ for offset < len(*ext) {
+ tag, n1 := DecodeVarint((*ext)[offset:])
+ fieldNum := int32(tag >> 3)
+ wireType := int(tag & 0x7)
+ n2, err := size((*ext)[offset+n1:], wireType)
+ if err != nil {
+ panic(err)
+ }
+ newOffset := offset + n1 + n2
+ if fieldNum == theFieldNum {
+ *ext = append((*ext)[:offset], (*ext)[newOffset:]...)
+ return offset
+ }
+ offset = newOffset
+ }
+ return -1
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+ clearExtension(pb, extension.Field)
+}
+
+func clearExtension(pb Message, fieldNum int32) {
+ if epb, doki := pb.(extensionsBytes); doki {
+ offset := 0
+ for offset != -1 {
+ offset = deleteExtension(epb, fieldNum, offset)
+ }
+ return
+ }
+ epb, ok := extendable(pb)
+ if !ok {
+ return
+ }
+ // TODO: Check types, field numbers, etc.?
+ extmap := epb.extensionsWrite()
+ delete(extmap, fieldNum)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present and has no default value it returns ErrMissingExtension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+ if epb, doki := pb.(extensionsBytes); doki {
+ ext := epb.GetExtensions()
+ o := 0
+ for o < len(*ext) {
+ tag, n := DecodeVarint((*ext)[o:])
+ fieldNum := int32(tag >> 3)
+ wireType := int(tag & 0x7)
+ l, err := size((*ext)[o+n:], wireType)
+ if err != nil {
+ return nil, err
+ }
+ if int32(fieldNum) == extension.Field {
+ v, err := decodeExtension((*ext)[o:o+n+l], extension)
+ if err != nil {
+ return nil, err
+ }
+ return v, nil
+ }
+ o += n + l
+ }
+ return defaultExtensionValue(extension)
+ }
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, errors.New("proto: not an extendable proto")
+ }
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return nil, err
+ }
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return defaultExtensionValue(extension)
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ o := NewBuffer(b)
+
+ t := reflect.TypeOf(extension.ExtensionType)
+
+ props := extensionProperties(extension)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate a "field" to store the pointer/slice itself; the
+ // pointer/slice will be stored here. We pass
+ // the address of this field to props.dec.
+ // This passes a zero field and a *t and lets props.dec
+ // interpret it as a *struct{ x t }.
+ value := reflect.New(t).Elem()
+
+ for {
+ // Discard wire type and field number varint. It isn't needed.
+ if _, err := o.DecodeVarint(); err != nil {
+ return nil, err
+ }
+
+ if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ return nil, err
+ }
+
+ if o.index >= len(o.buf) {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(pb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
+ }
+ registeredExtensions := RegisteredExtensions(pb)
+
+ emap, mu := epb.extensionsRead()
+ mu.Lock()
+ defer mu.Unlock()
+ extensions := make([]*ExtensionDesc, 0, len(emap))
+ for extid, e := range emap {
+ desc := e.desc
+ if desc == nil {
+ desc = registeredExtensions[extid]
+ if desc == nil {
+ desc = &ExtensionDesc{Field: extid}
+ }
+ }
+
+ extensions = append(extensions, desc)
+ }
+ return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+ if epb, doki := pb.(extensionsBytes); doki {
+ ClearExtension(pb, extension)
+ ext := epb.GetExtensions()
+ et := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+ p := NewBuffer(nil)
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ *ext = append(*ext, p.buf...)
+ return nil
+ }
+ epb, ok := extendable(pb)
+ if !ok {
+ return errors.New("proto: not an extendable proto")
+ }
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ extmap := epb.extensionsWrite()
+ extmap[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+ if epb, doki := pb.(extensionsBytes); doki {
+ ext := epb.GetExtensions()
+ *ext = []byte{}
+ return
+ }
+ epb, ok := extendable(pb)
+ if !ok {
+ return
+ }
+ m := epb.extensionsWrite()
+ for k := range m {
+ delete(m, k)
+ }
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
new file mode 100644
index 000000000..ea6478f00
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
@@ -0,0 +1,294 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+)
+
+func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool {
+ if reflect.ValueOf(pb).IsNil() {
+ return ifnotset
+ }
+ value, err := GetExtension(pb, extension)
+ if err != nil {
+ return ifnotset
+ }
+ if value == nil {
+ return ifnotset
+ }
+ if value.(*bool) == nil {
+ return ifnotset
+ }
+ return *(value.(*bool))
+}
+
+func (this *Extension) Equal(that *Extension) bool {
+ return bytes.Equal(this.enc, that.enc)
+}
+
+func (this *Extension) Compare(that *Extension) int {
+ return bytes.Compare(this.enc, that.enc)
+}
+
+func SizeOfInternalExtension(m extendableProto) (n int) {
+ return SizeOfExtensionMap(m.extensionsWrite())
+}
+
+func SizeOfExtensionMap(m map[int32]Extension) (n int) {
+ return extensionsMapSize(m)
+}
+
+type sortableMapElem struct {
+ field int32
+ ext Extension
+}
+
+func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions {
+ s := make(sortableExtensions, 0, len(m))
+ for k, v := range m {
+ s = append(s, &sortableMapElem{field: k, ext: v})
+ }
+ return s
+}
+
+type sortableExtensions []*sortableMapElem
+
+func (this sortableExtensions) Len() int { return len(this) }
+
+func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] }
+
+func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field }
+
+func (this sortableExtensions) String() string {
+ sort.Sort(this)
+ ss := make([]string, len(this))
+ for i := range this {
+ ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext)
+ }
+ return "map[" + strings.Join(ss, ",") + "]"
+}
+
+func StringFromInternalExtension(m extendableProto) string {
+ return StringFromExtensionsMap(m.extensionsWrite())
+}
+
+func StringFromExtensionsMap(m map[int32]Extension) string {
+ return newSortableExtensionsFromMap(m).String()
+}
+
+func StringFromExtensionsBytes(ext []byte) string {
+ m, err := BytesToExtensionsMap(ext)
+ if err != nil {
+ panic(err)
+ }
+ return StringFromExtensionsMap(m)
+}
+
+func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) {
+ return EncodeExtensionMap(m.extensionsWrite(), data)
+}
+
+func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
+ if err := encodeExtensionsMap(m); err != nil {
+ return 0, err
+ }
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ for _, k := range keys {
+ n += copy(data[n:], m[int32(k)].enc)
+ }
+ return n, nil
+}
+
+func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
+ if m[id].value == nil || m[id].desc == nil {
+ return m[id].enc, nil
+ }
+ if err := encodeExtensionsMap(m); err != nil {
+ return nil, err
+ }
+ return m[id].enc, nil
+}
+
+func size(buf []byte, wire int) (int, error) {
+ switch wire {
+ case WireVarint:
+ _, n := DecodeVarint(buf)
+ return n, nil
+ case WireFixed64:
+ return 8, nil
+ case WireBytes:
+ v, n := DecodeVarint(buf)
+ return int(v) + n, nil
+ case WireFixed32:
+ return 4, nil
+ case WireStartGroup:
+ offset := 0
+ for {
+ u, n := DecodeVarint(buf[offset:])
+ fwire := int(u & 0x7)
+ offset += n
+ if fwire == WireEndGroup {
+ return offset, nil
+ }
+ s, err := size(buf[offset:], wire)
+ if err != nil {
+ return 0, err
+ }
+ offset += s
+ }
+ }
+ return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire)
+}
+
+func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) {
+ m := make(map[int32]Extension)
+ i := 0
+ for i < len(buf) {
+ tag, n := DecodeVarint(buf[i:])
+ if n <= 0 {
+ return nil, fmt.Errorf("unable to decode varint")
+ }
+ fieldNum := int32(tag >> 3)
+ wireType := int(tag & 0x7)
+ l, err := size(buf[i+n:], wireType)
+ if err != nil {
+ return nil, err
+ }
+ end := i + int(l) + n
+ m[int32(fieldNum)] = Extension{enc: buf[i:end]}
+ i = end
+ }
+ return m, nil
+}
+
+func NewExtension(e []byte) Extension {
+ ee := Extension{enc: make([]byte, len(e))}
+ copy(ee.enc, e)
+ return ee
+}
+
+func AppendExtension(e Message, tag int32, buf []byte) {
+ if ee, eok := e.(extensionsBytes); eok {
+ ext := ee.GetExtensions()
+ *ext = append(*ext, buf...)
+ return
+ }
+ if ee, eok := e.(extendableProto); eok {
+ m := ee.extensionsWrite()
+ ext := m[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, buf...)
+ m[int32(tag)] = ext
+ }
+}
+
+func encodeExtension(e *Extension) error {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ return nil
+ }
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ p := NewBuffer(nil)
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ e.enc = p.buf
+ return nil
+}
+
+func (this Extension) GoString() string {
+ if this.enc == nil {
+ if err := encodeExtension(&this); err != nil {
+ panic(err)
+ }
+ }
+ return fmt.Sprintf("proto.NewExtension(%#v)", this.enc)
+}
+
+func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error {
+ typ := reflect.TypeOf(pb).Elem()
+ ext, ok := extensionMaps[typ]
+ if !ok {
+ return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
+ }
+ desc, ok := ext[fieldNum]
+ if !ok {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return SetExtension(pb, desc, value)
+}
+
+func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) {
+ typ := reflect.TypeOf(pb).Elem()
+ ext, ok := extensionMaps[typ]
+ if !ok {
+ return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
+ }
+ desc, ok := ext[fieldNum]
+ if !ok {
+ return nil, fmt.Errorf("unregistered field number %d", fieldNum)
+ }
+ return GetExtension(pb, desc)
+}
+
+func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions {
+ x := &XXX_InternalExtensions{
+ p: new(struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }),
+ }
+ x.p.extensionMap = m
+ return *x
+}
+
+func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension {
+ pb := extendable.(extendableProto)
+ return pb.extensionsWrite()
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go
new file mode 100644
index 000000000..2c30d7095
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/lib.go
@@ -0,0 +1,898 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Getters are only generated for message and oneof fields.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/gogo/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/gogo/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // write point
+
+ // pools of basic types to amortize allocation.
+ bools []bool
+ uint32s []uint32
+ uint64s []uint64
+
+ // extra pools, only used with pointer_reflect.go
+ int32s []int32
+ int64s []int64
+ float32s []float32
+ float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ sindex := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = sindex
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{
+ vs: vs,
+ // default Less function: textual comparison
+ less: func(a, b reflect.Value) bool {
+ return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+ },
+ }
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+ // numeric keys are sorted numerically.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const GoGoProtoPackageIsVersion2 = true
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const GoGoProtoPackageIsVersion1 = true
diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go
new file mode 100644
index 000000000..4b4f7c909
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go
@@ -0,0 +1,42 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "encoding/json"
+ "strconv"
+)
+
+func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) {
+ s, ok := m[value]
+ if !ok {
+ s = strconv.Itoa(int(value))
+ }
+ return json.Marshal(s)
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go
new file mode 100644
index 000000000..fd982decd
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/message_set.go
@@ -0,0 +1,311 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ if ms.find(pb) != nil {
+ return true
+ }
+ return false
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(exts interface{}) ([]byte, error) {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ if err := encodeExtensions(exts); err != nil {
+ return nil, err
+ }
+ m, _ = exts.extensionsRead()
+ case map[int32]Extension:
+ if err := encodeExtensionsMap(exts); err != nil {
+ return nil, err
+ }
+ m = exts
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+
+ // Sort extension IDs to provide a deterministic encoding.
+ // See also enc_map in encode.go.
+ ids := make([]int, 0, len(m))
+ for id := range m {
+ ids = append(ids, int(id))
+ }
+ sort.Ints(ids)
+
+ ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+ for _, id := range ids {
+ e := m[int32(id)]
+ // Remove the wire type and field number varint, as well as the length varint.
+ msg := skipVarint(skipVarint(e.enc))
+
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: Int32(int32(id)),
+ Message: msg,
+ })
+ }
+ return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m = exts.extensionsWrite()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return errors.New("proto: not an extension map")
+ }
+
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m, _ = exts.extensionsRead()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ if i > 0 {
+ b.WriteByte(',')
+ }
+
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
new file mode 100644
index 000000000..fb512e2e1
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,484 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "math"
+ "reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+ v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+ return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+ // Special case: an extension map entry with a value of type T
+ // passes a *T to the struct-handling code with a zero field,
+ // expecting that it will be treated as equivalent to *struct{ X T },
+ // which has the same memory layout. We have to handle that case
+ // specially, because reflect will panic if we call FieldByIndex on a
+ // non-struct.
+ if f == nil {
+ return p.v.Elem()
+ }
+
+ return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+ return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return structPointer_ifield(p, f).(*[]string)
+}
+
+// Extensions returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+ return structPointer_ifield(p, f).(*XXX_InternalExtensions)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+ return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+ v reflect.Value
+}
+
+func (p structPointerSlice) Len() int { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+ p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ float32Type = reflect.TypeOf(float32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+ v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+ return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int32Type:
+ if len(o.int32s) == 0 {
+ o.int32s = make([]int32, uint32PoolSize)
+ }
+ o.int32s[0] = int32(x)
+ p.v.Set(reflect.ValueOf(&o.int32s[0]))
+ o.int32s = o.int32s[1:]
+ return
+ case uint32Type:
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+ o.uint32s = o.uint32s[1:]
+ return
+ case float32Type:
+ if len(o.float32s) == 0 {
+ o.float32s = make([]float32, uint32PoolSize)
+ }
+ o.float32s[0] = math.Float32frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float32s[0]))
+ o.float32s = o.float32s[1:]
+ return
+ }
+
+ // must be enum
+ p.v.Set(reflect.New(t))
+ p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+ v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ switch p.v.Type() {
+ case int32Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint32Type:
+ p.v.SetUint(uint64(x))
+ return
+ case float32Type:
+ p.v.SetFloat(float64(math.Float32frombits(x)))
+ return
+ }
+
+ // must be enum
+ p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+ v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int32:
+ elem.SetInt(int64(int32(x)))
+ case reflect.Uint32:
+ elem.SetUint(uint64(x))
+ case reflect.Float32:
+ elem.SetFloat(float64(math.Float32frombits(x)))
+ }
+}
+
+func (p word32Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+ return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+ v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int64Type:
+ if len(o.int64s) == 0 {
+ o.int64s = make([]int64, uint64PoolSize)
+ }
+ o.int64s[0] = int64(x)
+ p.v.Set(reflect.ValueOf(&o.int64s[0]))
+ o.int64s = o.int64s[1:]
+ return
+ case uint64Type:
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+ o.uint64s = o.uint64s[1:]
+ return
+ case float64Type:
+ if len(o.float64s) == 0 {
+ o.float64s = make([]float64, uint64PoolSize)
+ }
+ o.float64s[0] = math.Float64frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float64s[0]))
+ o.float64s = o.float64s[1:]
+ return
+ }
+ panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+ return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+ v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ switch p.v.Type() {
+ case int64Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint64Type:
+ p.v.SetUint(x)
+ return
+ case float64Type:
+ p.v.SetFloat(math.Float64frombits(x))
+ return
+ }
+ panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+ v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int64:
+ elem.SetInt(int64(int64(x)))
+ case reflect.Uint64:
+ elem.SetUint(uint64(x))
+ case reflect.Float64:
+ elem.SetFloat(float64(math.Float64frombits(x)))
+ }
+}
+
+func (p word64Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return uint64(elem.Uint())
+ case reflect.Float64:
+ return math.Float64bits(float64(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+ return word64Slice{structPointer_field(p, f)}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 000000000..6b5567d47
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,270 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+// type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+ return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+ return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+ return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+ return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ *p = &o.uint32s[0]
+ o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+ return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ *p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+ return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
+func (v *word32Slice) Len() int { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+ return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ *p = &o.uint64s[0]
+ o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+ return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+ return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ *p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
+func (v *word64Slice) Len() int { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+ return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
new file mode 100644
index 000000000..ad7c85179
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
@@ -0,0 +1,107 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} {
+ point := unsafe.Pointer(uintptr(p) + uintptr(f))
+ r := reflect.NewAt(t, point)
+ return r.Interface()
+}
+
+func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} {
+ point := unsafe.Pointer(uintptr(p) + uintptr(f))
+ r := reflect.NewAt(t, point)
+ if r.Elem().IsNil() {
+ return nil
+ }
+ return r.Elem().Interface()
+}
+
+func copyUintPtr(oldptr, newptr uintptr, size int) {
+ oldbytes := make([]byte, 0)
+ oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes))
+ oldslice.Data = oldptr
+ oldslice.Len = size
+ oldslice.Cap = size
+ newbytes := make([]byte, 0)
+ newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes))
+ newslice.Data = newptr
+ newslice.Len = size
+ newslice.Cap = size
+ copy(newbytes, oldbytes)
+}
+
+func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) {
+ copyUintPtr(uintptr(oldptr), uintptr(newptr), size)
+}
+
+func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer {
+ size := typ.Elem().Size()
+
+ oldHeader := structPointer_GetSliceHeader(base, f)
+ oldSlice := reflect.NewAt(typ, unsafe.Pointer(oldHeader)).Elem()
+ newLen := oldHeader.Len + 1
+ newSlice := reflect.MakeSlice(typ, newLen, newLen)
+ reflect.Copy(newSlice, oldSlice)
+ bas := toStructPointer(newSlice)
+ oldHeader.Data = uintptr(bas)
+ oldHeader.Len = newLen
+ oldHeader.Cap = newLen
+
+ return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size)))
+}
+
+func structPointer_FieldPointer(p structPointer, f field) structPointer {
+ return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+func structPointer_GetRefStructPointer(p structPointer, f field) structPointer {
+ return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader {
+ return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+func structPointer_Add(p structPointer, size field) structPointer {
+ return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size)))
+}
+
+func structPointer_Len(p structPointer, f field) int {
+ return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f))))
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go
new file mode 100644
index 000000000..3e4cad038
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/properties.go
@@ -0,0 +1,940 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
+// A oneofSizer does the sizing for all oneof fields in a message.
+type oneofSizer func(Message) int
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+ unrecField field // field id of the XXX_unrecognized []byte field
+ extendable bool // is this an extendable proto
+
+ oneofMarshaler oneofMarshaler
+ oneofUnmarshaler oneofUnmarshaler
+ oneofSizer oneofSizer
+ stype reflect.Type
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ JSONName string // name to use for JSON; determined by protoc
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ CustomType string
+ def_uint64 uint64
+
+ enc encoder
+ valEnc valueEncoder // set for bool and numeric types only
+ field field
+ tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+ tagbuf [8]byte
+ stype reflect.Type // set for struct types only
+ sstype reflect.Type // set for slices of structs types only
+ ctype reflect.Type // set for custom types only
+ sprop *StructProperties // set for struct types only
+ isMarshaler bool
+ isUnmarshaler bool
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+
+ size sizer
+ valSize valueSizer // set for bool and numeric types only
+
+ dec decoder
+ valDec valueDecoder // set for bool and numeric types only
+
+ // If this is a packable field, this will be the decoder for the packed version of the field.
+ packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s = ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != p.OrigName {
+ s += ",json=" + p.JSONName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeVarint
+ p.valDec = (*Buffer).DecodeVarint
+ p.valSize = sizeVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ p.valEnc = (*Buffer).EncodeFixed32
+ p.valDec = (*Buffer).DecodeFixed32
+ p.valSize = sizeFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ p.valEnc = (*Buffer).EncodeFixed64
+ p.valDec = (*Buffer).DecodeFixed64
+ p.valSize = sizeFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag32
+ p.valDec = (*Buffer).DecodeZigzag32
+ p.valSize = sizeZigzag32
+ case "zigzag64":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag64
+ p.valDec = (*Buffer).DecodeZigzag64
+ p.valSize = sizeZigzag64
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "json="):
+ p.JSONName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break
+ }
+ case strings.HasPrefix(f, "embedded="):
+ p.OrigName = strings.Split(f, "=")[1]
+ case strings.HasPrefix(f, "customtype="):
+ p.CustomType = strings.Split(f, "=")[1]
+ }
+ }
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+ fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ p.enc = nil
+ p.dec = nil
+ p.size = nil
+ if len(p.CustomType) > 0 {
+ p.setCustomEncAndDec(typ)
+ p.setTag(lockGetProp)
+ return
+ }
+ switch t1 := typ; t1.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+ // proto3 scalar types
+
+ case reflect.Bool:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_proto3_bool
+ } else {
+ p.enc = (*Buffer).enc_ref_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_ref_bool
+ }
+ case reflect.Int32:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_int32
+ } else {
+ p.enc = (*Buffer).enc_ref_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_ref_int32
+ }
+ case reflect.Uint32:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_proto3_uint32
+ } else {
+ p.enc = (*Buffer).enc_ref_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_ref_uint32
+ }
+ case reflect.Int64, reflect.Uint64:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ } else {
+ p.enc = (*Buffer).enc_ref_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_ref_int64
+ }
+ case reflect.Float32:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_uint32
+ } else {
+ p.enc = (*Buffer).enc_ref_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_ref_uint32
+ }
+ case reflect.Float64:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ } else {
+ p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_ref_int64
+ }
+ case reflect.String:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_proto3_string
+ } else {
+ p.enc = (*Buffer).enc_ref_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_ref_string
+ }
+ case reflect.Struct:
+ p.stype = typ
+ p.isMarshaler = isMarshaler(typ)
+ p.isUnmarshaler = isUnmarshaler(typ)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_ref_struct_message
+ p.dec = (*Buffer).dec_ref_struct_message
+ p.size = size_ref_struct_message
+ } else {
+ fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ)
+ }
+
+ case reflect.Ptr:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+ break
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_bool
+ p.dec = (*Buffer).dec_bool
+ p.size = size_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_int32
+ p.dec = (*Buffer).dec_int32
+ p.size = size_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_uint32
+ p.dec = (*Buffer).dec_int32 // can reuse
+ p.size = size_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_int64
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_int32
+ p.size = size_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_string
+ p.dec = (*Buffer).dec_string
+ p.size = size_string
+ case reflect.Struct:
+ p.stype = t1.Elem()
+ p.isMarshaler = isMarshaler(t1)
+ p.isUnmarshaler = isUnmarshaler(t1)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_struct_message
+ p.dec = (*Buffer).dec_struct_message
+ p.size = size_struct_message
+ } else {
+ p.enc = (*Buffer).enc_struct_group
+ p.dec = (*Buffer).dec_struct_group
+ p.size = size_struct_group
+ }
+ }
+
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ case reflect.Bool:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_bool
+ p.size = size_slice_packed_bool
+ } else {
+ p.enc = (*Buffer).enc_slice_bool
+ p.size = size_slice_bool
+ }
+ p.dec = (*Buffer).dec_slice_bool
+ p.packedDec = (*Buffer).dec_slice_packed_bool
+ case reflect.Int32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int32
+ p.size = size_slice_packed_int32
+ } else {
+ p.enc = (*Buffer).enc_slice_int32
+ p.size = size_slice_int32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Uint32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Int64, reflect.Uint64:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ case reflect.Uint8:
+ p.dec = (*Buffer).dec_slice_byte
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_slice_byte
+ p.size = size_proto3_slice_byte
+ } else {
+ p.enc = (*Buffer).enc_slice_byte
+ p.size = size_slice_byte
+ }
+ case reflect.Float32, reflect.Float64:
+ switch t2.Bits() {
+ case 32:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case 64:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ }
+ case reflect.String:
+ p.enc = (*Buffer).enc_slice_string
+ p.dec = (*Buffer).dec_slice_string
+ p.size = size_slice_string
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+ break
+ case reflect.Struct:
+ p.stype = t2.Elem()
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_slice_struct_message
+ p.dec = (*Buffer).dec_slice_struct_message
+ p.size = size_slice_struct_message
+ } else {
+ p.enc = (*Buffer).enc_slice_struct_group
+ p.dec = (*Buffer).dec_slice_struct_group
+ p.size = size_slice_struct_group
+ }
+ }
+ case reflect.Slice:
+ switch t2.Elem().Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+ break
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_slice_byte
+ p.dec = (*Buffer).dec_slice_slice_byte
+ p.size = size_slice_slice_byte
+ }
+ case reflect.Struct:
+ p.setSliceOfNonPointerStructs(t1)
+ }
+
+ case reflect.Map:
+ p.enc = (*Buffer).enc_new_map
+ p.dec = (*Buffer).dec_new_map
+ p.size = size_new_map
+
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+ p.setTag(lockGetProp)
+}
+
+func (p *Properties) setTag(lockGetProp bool) {
+ // precalculate tag code
+ wire := p.WireType
+ if p.Packed {
+ wire = WireBytes
+ }
+ x := uint32(p.Tag)<<3 | uint32(wire)
+ i := 0
+ for i = 0; x > 127; i++ {
+ p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ p.tagbuf[i] = uint8(x)
+ p.tagcode = p.tagbuf[0 : i+1]
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+ return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+ return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if f != nil {
+ p.field = toField(f)
+ }
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
+ reflect.PtrTo(t).Implements(extendableProtoV1Type) ||
+ reflect.PtrTo(t).Implements(extendableBytesType)
+ prop.unrecField = invalidField
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ isOneofMessage := false
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ if f.Name == "XXX_InternalExtensions" { // special case
+ p.enc = (*Buffer).enc_exts
+ p.dec = nil // not needed
+ p.size = size_exts
+ } else if f.Name == "XXX_extensions" { // special case
+ if len(f.Tag.Get("protobuf")) > 0 {
+ p.enc = (*Buffer).enc_ext_slice_byte
+ p.dec = nil // not needed
+ p.size = size_ext_slice_byte
+ } else {
+ p.enc = (*Buffer).enc_map
+ p.dec = nil // not needed
+ p.size = size_map
+ }
+ } else if f.Name == "XXX_unrecognized" { // special case
+ prop.unrecField = toField(&f)
+ }
+ oneof := f.Tag.Get("protobuf_oneof") // special case
+ if oneof != "" {
+ isOneofMessage = true
+ // Oneof fields don't use the traditional protobuf tag.
+ p.OrigName = oneof
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
+ fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok {
+ var oots []interface{}
+ prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
+ prop.stype = t
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+ if len(x) != 1 {
+ fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+ return nil
+ }
+ prop := GetProperties(t)
+ return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+ if pb == nil {
+ err = ErrNil
+ return
+ }
+ // get the reflect type of the pointer to the struct.
+ t = reflect.TypeOf(pb)
+ // get the address of the struct.
+ value := reflect.ValueOf(pb)
+ b = toStructPointer(value)
+ return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+var enumStringMaps = make(map[string]map[int32]string)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+ if _, ok := enumStringMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumStringMaps[typeName] = unusedNameMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypes = make(map[string]reflect.Type)
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypes[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
+
+// MessageType returns the message type (pointer to struct) for a named message.
+func MessageType(name string) reflect.Type { return protoTypes[name] }
+
+// A registry of all linked proto files.
+var (
+ protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+ protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
new file mode 100644
index 000000000..4607a9754
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
@@ -0,0 +1,66 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+)
+
+func (p *Properties) setCustomEncAndDec(typ reflect.Type) {
+ p.ctype = typ
+ if p.Repeated {
+ p.enc = (*Buffer).enc_custom_slice_bytes
+ p.dec = (*Buffer).dec_custom_slice_bytes
+ p.size = size_custom_slice_bytes
+ } else if typ.Kind() == reflect.Ptr {
+ p.enc = (*Buffer).enc_custom_bytes
+ p.dec = (*Buffer).dec_custom_bytes
+ p.size = size_custom_bytes
+ } else {
+ p.enc = (*Buffer).enc_custom_ref_bytes
+ p.dec = (*Buffer).dec_custom_ref_bytes
+ p.size = size_custom_ref_bytes
+ }
+}
+
+func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) {
+ t2 := typ.Elem()
+ p.sstype = typ
+ p.stype = t2
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ p.enc = (*Buffer).enc_slice_ref_struct_message
+ p.dec = (*Buffer).dec_slice_ref_struct_message
+ p.size = size_slice_ref_struct_message
+ if p.Wire != "bytes" {
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2)
+ }
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go
new file mode 100644
index 000000000..5a5fd93f7
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go
@@ -0,0 +1,119 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "io"
+)
+
+func Skip(data []byte) (n int, err error) {
+ l := len(data)
+ index := 0
+ for index < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[index]
+ index++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ index++
+ if data[index-1] < 0x80 {
+ break
+ }
+ }
+ return index, nil
+ case 1:
+ index += 8
+ return index, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[index]
+ index++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ index += length
+ return index, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = index
+ for shift := uint(0); ; shift += 7 {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[index]
+ index++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := Skip(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ index = start + next
+ }
+ return index, nil
+ case 4:
+ return index, nil
+ case 5:
+ index += 4
+ return index, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go
new file mode 100644
index 000000000..b3e12e268
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/text.go
@@ -0,0 +1,815 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ gtNewline = []byte(">\n")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Print("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+ Bytes() []byte
+}
+
+func writeStruct(w *textWriter, sv reflect.Value) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("<nil>\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if len(props.Enum) > 0 {
+ if err := writeEnum(w, v, props); err != nil {
+ return err
+ }
+ } else if err := writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if b, ok := fv.Interface().(raw); ok {
+ if err := writeRaw(w, b.Bytes()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ if len(props.Enum) > 0 {
+ if err := writeEnum(w, fv, props); err != nil {
+ return err
+ }
+ } else if err := writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv
+ if pv.CanAddr() {
+ pv = sv.Addr()
+ } else {
+ pv = reflect.New(sv.Type())
+ pv.Elem().Set(sv)
+ }
+ if pv.Type().Implements(extensionRangeType) {
+ if err := writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, b); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ if props != nil && len(props.CustomType) > 0 {
+ custom, ok := v.Interface().(Marshaler)
+ if ok {
+ data, err := custom.Marshal()
+ if err != nil {
+ return err
+ }
+ if err := writeString(w, string(data)); err != nil {
+ return err
+ }
+ return nil
+ }
+ }
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Bytes())); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else if err := writeStruct(w, v); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, ferr := fmt.Fprintf(w, "/* %v */\n", err)
+ return ferr
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, werr := w.Write(endBraceNewline); werr != nil {
+ return werr
+ }
+ continue
+ }
+ if _, ferr := fmt.Fprint(w, tag); ferr != nil {
+ return ferr
+ }
+ if wire != WireStartGroup {
+ if err = w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err = w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ e := pv.Interface().(Message)
+
+ var m map[int32]Extension
+ var mu sync.Locker
+ if em, ok := e.(extensionsBytes); ok {
+ eb := em.GetExtensions()
+ var err error
+ m, err = BytesToExtensionsMap(*eb)
+ if err != nil {
+ return err
+ }
+ mu = notLocker{}
+ } else if _, ok := e.(extendableProto); ok {
+ ep, _ := extendable(e)
+ m, mu = ep.extensionsRead()
+ if m == nil {
+ return nil
+ }
+ }
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+
+ mu.Lock()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+ mu.Unlock()
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(e, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line).
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte("<nil>"))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: m.Compact,
+ }
+
+ if tm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (m *TextMarshaler) Text(pb Message) string {
+ var buf bytes.Buffer
+ m.Marshal(&buf, pb)
+ return buf.String()
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go
new file mode 100644
index 000000000..589267419
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go
@@ -0,0 +1,57 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+)
+
+func writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
+ m, ok := enumStringMaps[props.Enum]
+ if !ok {
+ if err := writeAny(w, v, props); err != nil {
+ return err
+ }
+ }
+ key := int32(0)
+ if v.Kind() == reflect.Ptr {
+ key = int32(v.Elem().Int())
+ } else {
+ key = int32(v.Int())
+ }
+ s, ok := m[key]
+ if !ok {
+ if err := writeAny(w, v, props); err != nil {
+ return err
+ }
+ }
+ _, err := fmt.Fprint(w, s)
+ return err
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go
new file mode 100644
index 000000000..bcd732c3c
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go
@@ -0,0 +1,858 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+ errBadHex = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ base := 8
+ ss := s[:2]
+ s = s[2:]
+ if r == 'x' || r == 'X' {
+ base = 16
+ } else {
+ ss = string(r) + ss
+ }
+ i, err := strconv.ParseUint(ss, base, 8)
+ if err != nil {
+ return "", "", err
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'u', 'U':
+ n := 4
+ if r == 'U' {
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+ }
+
+ bs := make([]byte, n/2)
+ for i := 0; i < n; i += 2 {
+ a, ok1 := unhex(s[i])
+ b, ok2 := unhex(s[i+1])
+ if !ok1 || !ok2 {
+ return "", "", errBadHex
+ }
+ bs[i/2] = a<<4 | b
+ }
+ s = s[n:]
+ return string(bs), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+ switch {
+ case '0' <= b && b <= '9':
+ return b - '0', true
+ case 'a' <= b && b <= 'f':
+ return b - 'a' + 10, true
+ case 'A' <= b && b <= 'F':
+ return b - 'A' + 10, true
+ }
+ return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]".
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == tok.value {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", tok.value)
+ }
+ // Check the extension terminator.
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != "]" {
+ return p.errorf("unrecognized extension terminator %q", tok.value)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(Message)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ sv.Field(oop.Field).Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order. See b/28924776 for a time
+ // this went wrong.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ case "value":
+ if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ default:
+ p.back()
+ return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+ if len(props.CustomType) > 0 {
+ if props.Repeated {
+ t := reflect.TypeOf(v.Interface())
+ if t.Kind() == reflect.Slice {
+ tc := reflect.TypeOf(new(Marshaler))
+ ok := t.Elem().Implements(tc.Elem())
+ if ok {
+ fv := v
+ flen := fv.Len()
+ if flen == fv.Cap() {
+ nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1)
+ reflect.Copy(nav, fv)
+ fv.Set(nav)
+ }
+ fv.SetLen(flen + 1)
+
+ // Read one.
+ p.back()
+ return p.readAny(fv.Index(flen), props)
+ }
+ }
+ }
+ if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
+ custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler)
+ err := custom.Unmarshal([]byte(tok.unquoted))
+ if err != nil {
+ return p.errorf("%v %v: %v", err, v.Type(), tok.value)
+ }
+ v.Set(reflect.ValueOf(custom))
+ } else {
+ custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler)
+ err := custom.Unmarshal([]byte(tok.unquoted))
+ if err != nil {
+ return p.errorf("%v %v: %v", err, v.Type(), tok.value)
+ }
+ v.Set(reflect.Indirect(reflect.ValueOf(custom)))
+ }
+ return nil
+ }
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ ntok := p.next()
+ if ntok.err != nil {
+ return ntok.err
+ }
+ if ntok.value == "]" {
+ break
+ }
+ if ntok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", ntok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // Either "true", "false", 1 or 0.
+ switch tok.value {
+ case "true", "1":
+ fv.SetBool(true)
+ return nil
+ case "false", "0":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ err := um.UnmarshalText([]byte(s))
+ return err
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+ return pe
+ }
+ return nil
+}
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto
new file mode 100644
index 000000000..acaee1f49
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto
@@ -0,0 +1,150 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+//
+// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to
+// change.
+//
+// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is
+// just a program that reads a CodeGeneratorRequest from stdin and writes a
+// CodeGeneratorResponse to stdout.
+//
+// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
+// of dealing with the raw protocol defined here.
+//
+// A plugin executable needs only to be placed somewhere in the path. The
+// plugin should be named "protoc-gen-$NAME", and will then be used when the
+// flag "--${NAME}_out" is passed to protoc.
+
+syntax = "proto2";
+package google.protobuf.compiler;
+option java_package = "com.google.protobuf.compiler";
+option java_outer_classname = "PluginProtos";
+
+option go_package = "plugin_go";
+
+import "google/protobuf/descriptor.proto";
+
+// An encoded CodeGeneratorRequest is written to the plugin's stdin.
+message CodeGeneratorRequest {
+ // The .proto files that were explicitly listed on the command-line. The
+ // code generator should generate code only for these files. Each file's
+ // descriptor will be included in proto_file, below.
+ repeated string file_to_generate = 1;
+
+ // The generator parameter passed on the command-line.
+ optional string parameter = 2;
+
+ // FileDescriptorProtos for all files in files_to_generate and everything
+ // they import. The files will appear in topological order, so each file
+ // appears before any file that imports it.
+ //
+ // protoc guarantees that all proto_files will be written after
+ // the fields above, even though this is not technically guaranteed by the
+ // protobuf wire format. This theoretically could allow a plugin to stream
+ // in the FileDescriptorProtos and handle them one by one rather than read
+ // the entire set into memory at once. However, as of this writing, this
+ // is not similarly optimized on protoc's end -- it will store all fields in
+ // memory at once before sending them to the plugin.
+ repeated FileDescriptorProto proto_file = 15;
+}
+
+// The plugin writes an encoded CodeGeneratorResponse to stdout.
+message CodeGeneratorResponse {
+ // Error message. If non-empty, code generation failed. The plugin process
+ // should exit with status code zero even if it reports an error in this way.
+ //
+ // This should be used to indicate errors in .proto files which prevent the
+ // code generator from generating correct code. Errors which indicate a
+ // problem in protoc itself -- such as the input CodeGeneratorRequest being
+ // unparseable -- should be reported by writing a message to stderr and
+ // exiting with a non-zero status code.
+ optional string error = 1;
+
+ // Represents a single generated file.
+ message File {
+ // The file name, relative to the output directory. The name must not
+ // contain "." or ".." components and must be relative, not be absolute (so,
+ // the file cannot lie outside the output directory). "/" must be used as
+ // the path separator, not "\".
+ //
+ // If the name is omitted, the content will be appended to the previous
+ // file. This allows the generator to break large files into small chunks,
+ // and allows the generated text to be streamed back to protoc so that large
+ // files need not reside completely in memory at one time. Note that as of
+ // this writing protoc does not optimize for this -- it will read the entire
+ // CodeGeneratorResponse before writing files to disk.
+ optional string name = 1;
+
+ // If non-empty, indicates that the named file should already exist, and the
+ // content here is to be inserted into that file at a defined insertion
+ // point. This feature allows a code generator to extend the output
+ // produced by another code generator. The original generator may provide
+ // insertion points by placing special annotations in the file that look
+ // like:
+ // @@protoc_insertion_point(NAME)
+ // The annotation can have arbitrary text before and after it on the line,
+ // which allows it to be placed in a comment. NAME should be replaced with
+ // an identifier naming the point -- this is what other generators will use
+ // as the insertion_point. Code inserted at this point will be placed
+ // immediately above the line containing the insertion point (thus multiple
+ // insertions to the same point will come out in the order they were added).
+ // The double-@ is intended to make it unlikely that the generated code
+ // could contain things that look like insertion points by accident.
+ //
+ // For example, the C++ code generator places the following line in the
+ // .pb.h files that it generates:
+ // // @@protoc_insertion_point(namespace_scope)
+ // This line appears within the scope of the file's package namespace, but
+ // outside of any particular class. Another plugin can then specify the
+ // insertion_point "namespace_scope" to generate additional classes or
+ // other declarations that should be placed in this scope.
+ //
+ // Note that if the line containing the insertion point begins with
+ // whitespace, the same whitespace will be added to every line of the
+ // inserted text. This is useful for languages like Python, where
+ // indentation matters. In these languages, the insertion point comment
+ // should be indented the same amount as any inserted code will need to be
+ // in order to work correctly in that context.
+ //
+ // The code generator that generates the initial file and the one which
+ // inserts into it must both run as part of a single invocation of protoc.
+ // Code generators are executed in the order in which they appear on the
+ // command line.
+ //
+ // If |insertion_point| is present, |name| must also be present.
+ optional string insertion_point = 2;
+
+ // The file contents.
+ optional string content = 15;
+ }
+ repeated File file = 15;
+}
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto
new file mode 100644
index 000000000..c59a6022e
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto
@@ -0,0 +1,779 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+// Based on original Protocol Buffers design by
+// Sanjay Ghemawat, Jeff Dean, and others.
+//
+// The messages in this file describe the definitions found in .proto files.
+// A valid .proto file can be translated directly to a FileDescriptorProto
+// without any other information (e.g. without reading its imports).
+
+
+syntax = "proto2";
+
+package google.protobuf;
+option go_package = "descriptor";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DescriptorProtos";
+option csharp_namespace = "Google.Protobuf.Reflection";
+option objc_class_prefix = "GPB";
+
+// descriptor.proto must be optimized for speed because reflection-based
+// algorithms don't work during bootstrapping.
+option optimize_for = SPEED;
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+message FileDescriptorSet {
+ repeated FileDescriptorProto file = 1;
+}
+
+// Describes a complete .proto file.
+message FileDescriptorProto {
+ optional string name = 1; // file name, relative to root of source tree
+ optional string package = 2; // e.g. "foo", "foo.bar", etc.
+
+ // Names of files imported by this file.
+ repeated string dependency = 3;
+ // Indexes of the public imported files in the dependency list above.
+ repeated int32 public_dependency = 10;
+ // Indexes of the weak imported files in the dependency list.
+ // For Google-internal migration only. Do not use.
+ repeated int32 weak_dependency = 11;
+
+ // All top-level definitions in this file.
+ repeated DescriptorProto message_type = 4;
+ repeated EnumDescriptorProto enum_type = 5;
+ repeated ServiceDescriptorProto service = 6;
+ repeated FieldDescriptorProto extension = 7;
+
+ optional FileOptions options = 8;
+
+ // This field contains optional information about the original source code.
+ // You may safely remove this entire field without harming runtime
+ // functionality of the descriptors -- the information is needed only by
+ // development tools.
+ optional SourceCodeInfo source_code_info = 9;
+
+ // The syntax of the proto file.
+ // The supported values are "proto2" and "proto3".
+ optional string syntax = 12;
+}
+
+// Describes a message type.
+message DescriptorProto {
+ optional string name = 1;
+
+ repeated FieldDescriptorProto field = 2;
+ repeated FieldDescriptorProto extension = 6;
+
+ repeated DescriptorProto nested_type = 3;
+ repeated EnumDescriptorProto enum_type = 4;
+
+ message ExtensionRange {
+ optional int32 start = 1;
+ optional int32 end = 2;
+ }
+ repeated ExtensionRange extension_range = 5;
+
+ repeated OneofDescriptorProto oneof_decl = 8;
+
+ optional MessageOptions options = 7;
+
+ // Range of reserved tag numbers. Reserved tag numbers may not be used by
+ // fields or extension ranges in the same message. Reserved ranges may
+ // not overlap.
+ message ReservedRange {
+ optional int32 start = 1; // Inclusive.
+ optional int32 end = 2; // Exclusive.
+ }
+ repeated ReservedRange reserved_range = 9;
+ // Reserved field names, which may not be used by fields in the same message.
+ // A given name may only be reserved once.
+ repeated string reserved_name = 10;
+}
+
+// Describes a field within a message.
+message FieldDescriptorProto {
+ enum Type {
+ // 0 is reserved for errors.
+ // Order is weird for historical reasons.
+ TYPE_DOUBLE = 1;
+ TYPE_FLOAT = 2;
+ // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
+ // negative values are likely.
+ TYPE_INT64 = 3;
+ TYPE_UINT64 = 4;
+ // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
+ // negative values are likely.
+ TYPE_INT32 = 5;
+ TYPE_FIXED64 = 6;
+ TYPE_FIXED32 = 7;
+ TYPE_BOOL = 8;
+ TYPE_STRING = 9;
+ TYPE_GROUP = 10; // Tag-delimited aggregate.
+ TYPE_MESSAGE = 11; // Length-delimited aggregate.
+
+ // New in version 2.
+ TYPE_BYTES = 12;
+ TYPE_UINT32 = 13;
+ TYPE_ENUM = 14;
+ TYPE_SFIXED32 = 15;
+ TYPE_SFIXED64 = 16;
+ TYPE_SINT32 = 17; // Uses ZigZag encoding.
+ TYPE_SINT64 = 18; // Uses ZigZag encoding.
+ };
+
+ enum Label {
+ // 0 is reserved for errors
+ LABEL_OPTIONAL = 1;
+ LABEL_REQUIRED = 2;
+ LABEL_REPEATED = 3;
+ // TODO(sanjay): Should we add LABEL_MAP?
+ };
+
+ optional string name = 1;
+ optional int32 number = 3;
+ optional Label label = 4;
+
+ // If type_name is set, this need not be set. If both this and type_name
+ // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+ optional Type type = 5;
+
+ // For message and enum types, this is the name of the type. If the name
+ // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
+ // rules are used to find the type (i.e. first the nested types within this
+ // message are searched, then within the parent, on up to the root
+ // namespace).
+ optional string type_name = 6;
+
+ // For extensions, this is the name of the type being extended. It is
+ // resolved in the same manner as type_name.
+ optional string extendee = 2;
+
+ // For numeric types, contains the original text representation of the value.
+ // For booleans, "true" or "false".
+ // For strings, contains the default text contents (not escaped in any way).
+ // For bytes, contains the C escaped value. All bytes >= 128 are escaped.
+ // TODO(kenton): Base-64 encode?
+ optional string default_value = 7;
+
+ // If set, gives the index of a oneof in the containing type's oneof_decl
+ // list. This field is a member of that oneof.
+ optional int32 oneof_index = 9;
+
+ // JSON name of this field. The value is set by protocol compiler. If the
+ // user has set a "json_name" option on this field, that option's value
+ // will be used. Otherwise, it's deduced from the field's name by converting
+ // it to camelCase.
+ optional string json_name = 10;
+
+ optional FieldOptions options = 8;
+}
+
+// Describes a oneof.
+message OneofDescriptorProto {
+ optional string name = 1;
+}
+
+// Describes an enum type.
+message EnumDescriptorProto {
+ optional string name = 1;
+
+ repeated EnumValueDescriptorProto value = 2;
+
+ optional EnumOptions options = 3;
+}
+
+// Describes a value within an enum.
+message EnumValueDescriptorProto {
+ optional string name = 1;
+ optional int32 number = 2;
+
+ optional EnumValueOptions options = 3;
+}
+
+// Describes a service.
+message ServiceDescriptorProto {
+ optional string name = 1;
+ repeated MethodDescriptorProto method = 2;
+
+ optional ServiceOptions options = 3;
+}
+
+// Describes a method of a service.
+message MethodDescriptorProto {
+ optional string name = 1;
+
+ // Input and output type names. These are resolved in the same way as
+ // FieldDescriptorProto.type_name, but must refer to a message type.
+ optional string input_type = 2;
+ optional string output_type = 3;
+
+ optional MethodOptions options = 4;
+
+ // Identifies if client streams multiple client messages
+ optional bool client_streaming = 5 [default=false];
+ // Identifies if server streams multiple server messages
+ optional bool server_streaming = 6 [default=false];
+}
+
+
+// ===================================================================
+// Options
+
+// Each of the definitions above may have "options" attached. These are
+// just annotations which may cause code to be generated slightly differently
+// or may contain hints for code that manipulates protocol messages.
+//
+// Clients may define custom options as extensions of the *Options messages.
+// These extensions may not yet be known at parsing time, so the parser cannot
+// store the values in them. Instead it stores them in a field in the *Options
+// message called uninterpreted_option. This field must have the same name
+// across all *Options messages. We then use this field to populate the
+// extensions when we build a descriptor, at which point all protos have been
+// parsed and so all extensions are known.
+//
+// Extension numbers for custom options may be chosen as follows:
+// * For options which will only be used within a single application or
+// organization, or for experimental options, use field numbers 50000
+// through 99999. It is up to you to ensure that you do not use the
+// same number for multiple options.
+// * For options which will be published and used publicly by multiple
+// independent entities, e-mail protobuf-global-extension-registry@google.com
+// to reserve extension numbers. Simply provide your project name (e.g.
+// Objective-C plugin) and your project website (if available) -- there's no
+// need to explain how you intend to use them. Usually you only need one
+// extension number. You can declare multiple options with only one extension
+// number by putting them in a sub-message. See the Custom Options section of
+// the docs for examples:
+// https://developers.google.com/protocol-buffers/docs/proto#options
+// If this turns out to be popular, a web service will be set up
+// to automatically assign option numbers.
+
+
+message FileOptions {
+
+ // Sets the Java package where classes generated from this .proto will be
+ // placed. By default, the proto package is used, but this is often
+ // inappropriate because proto packages do not normally start with backwards
+ // domain names.
+ optional string java_package = 1;
+
+
+ // If set, all the classes from the .proto file are wrapped in a single
+ // outer class with the given name. This applies to both Proto1
+ // (equivalent to the old "--one_java_file" option) and Proto2 (where
+ // a .proto always translates to a single class, but you may want to
+ // explicitly choose the class name).
+ optional string java_outer_classname = 8;
+
+ // If set true, then the Java code generator will generate a separate .java
+ // file for each top-level message, enum, and service defined in the .proto
+ // file. Thus, these types will *not* be nested inside the outer class
+ // named by java_outer_classname. However, the outer class will still be
+ // generated to contain the file's getDescriptor() method as well as any
+ // top-level extensions defined in the file.
+ optional bool java_multiple_files = 10 [default=false];
+
+ // If set true, then the Java code generator will generate equals() and
+ // hashCode() methods for all messages defined in the .proto file.
+ // This increases generated code size, potentially substantially for large
+ // protos, which may harm a memory-constrained application.
+ // - In the full runtime this is a speed optimization, as the
+ // AbstractMessage base class includes reflection-based implementations of
+ // these methods.
+ // - In the lite runtime, setting this option changes the semantics of
+ // equals() and hashCode() to more closely match those of the full runtime;
+ // the generated methods compute their results based on field values rather
+ // than object identity. (Implementations should not assume that hashcodes
+ // will be consistent across runtimes or versions of the protocol compiler.)
+ optional bool java_generate_equals_and_hash = 20 [default=false];
+
+ // If set true, then the Java2 code generator will generate code that
+ // throws an exception whenever an attempt is made to assign a non-UTF-8
+ // byte sequence to a string field.
+ // Message reflection will do the same.
+ // However, an extension field still accepts non-UTF-8 byte sequences.
+ // This option has no effect on when used with the lite runtime.
+ optional bool java_string_check_utf8 = 27 [default=false];
+
+
+ // Generated classes can be optimized for speed or code size.
+ enum OptimizeMode {
+ SPEED = 1; // Generate complete code for parsing, serialization,
+ // etc.
+ CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
+ LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
+ }
+ optional OptimizeMode optimize_for = 9 [default=SPEED];
+
+ // Sets the Go package where structs generated from this .proto will be
+ // placed. If omitted, the Go package will be derived from the following:
+ // - The basename of the package import path, if provided.
+ // - Otherwise, the package statement in the .proto file, if present.
+ // - Otherwise, the basename of the .proto file, without extension.
+ optional string go_package = 11;
+
+
+
+ // Should generic services be generated in each language? "Generic" services
+ // are not specific to any particular RPC system. They are generated by the
+ // main code generators in each language (without additional plugins).
+ // Generic services were the only kind of service generation supported by
+ // early versions of google.protobuf.
+ //
+ // Generic services are now considered deprecated in favor of using plugins
+ // that generate code specific to your particular RPC system. Therefore,
+ // these default to false. Old code which depends on generic services should
+ // explicitly set them to true.
+ optional bool cc_generic_services = 16 [default=false];
+ optional bool java_generic_services = 17 [default=false];
+ optional bool py_generic_services = 18 [default=false];
+
+ // Is this file deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for everything in the file, or it will be completely ignored; in the very
+ // least, this is a formalization for deprecating files.
+ optional bool deprecated = 23 [default=false];
+
+ // Enables the use of arenas for the proto messages in this file. This applies
+ // only to generated classes for C++.
+ optional bool cc_enable_arenas = 31 [default=false];
+
+
+ // Sets the objective c class prefix which is prepended to all objective c
+ // generated classes from this .proto. There is no default.
+ optional string objc_class_prefix = 36;
+
+ // Namespace for generated classes; defaults to the package.
+ optional string csharp_namespace = 37;
+
+ // Whether the nano proto compiler should generate in the deprecated non-nano
+ // suffixed package.
+ optional bool javanano_use_deprecated_package = 38;
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message MessageOptions {
+ // Set true to use the old proto1 MessageSet wire format for extensions.
+ // This is provided for backwards-compatibility with the MessageSet wire
+ // format. You should not use this for any other reason: It's less
+ // efficient, has fewer features, and is more complicated.
+ //
+ // The message must be defined exactly as follows:
+ // message Foo {
+ // option message_set_wire_format = true;
+ // extensions 4 to max;
+ // }
+ // Note that the message cannot have any defined fields; MessageSets only
+ // have extensions.
+ //
+ // All extensions of your type must be singular messages; e.g. they cannot
+ // be int32s, enums, or repeated messages.
+ //
+ // Because this is an option, the above two restrictions are not enforced by
+ // the protocol compiler.
+ optional bool message_set_wire_format = 1 [default=false];
+
+ // Disables the generation of the standard "descriptor()" accessor, which can
+ // conflict with a field of the same name. This is meant to make migration
+ // from proto1 easier; new code should avoid fields named "descriptor".
+ optional bool no_standard_descriptor_accessor = 2 [default=false];
+
+ // Is this message deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the message, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating messages.
+ optional bool deprecated = 3 [default=false];
+
+ // Whether the message is an automatically generated map entry type for the
+ // maps field.
+ //
+ // For maps fields:
+ // map<KeyType, ValueType> map_field = 1;
+ // The parsed descriptor looks like:
+ // message MapFieldEntry {
+ // option map_entry = true;
+ // optional KeyType key = 1;
+ // optional ValueType value = 2;
+ // }
+ // repeated MapFieldEntry map_field = 1;
+ //
+ // Implementations may choose not to generate the map_entry=true message, but
+ // use a native map in the target language to hold the keys and values.
+ // The reflection APIs in such implementions still need to work as
+ // if the field is a repeated message field.
+ //
+ // NOTE: Do not set the option in .proto files. Always use the maps syntax
+ // instead. The option should only be implicitly set by the proto compiler
+ // parser.
+ optional bool map_entry = 7;
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message FieldOptions {
+ // The ctype option instructs the C++ code generator to use a different
+ // representation of the field than it normally would. See the specific
+ // options below. This option is not yet implemented in the open source
+ // release -- sorry, we'll try to include it in a future version!
+ optional CType ctype = 1 [default = STRING];
+ enum CType {
+ // Default mode.
+ STRING = 0;
+
+ CORD = 1;
+
+ STRING_PIECE = 2;
+ }
+ // The packed option can be enabled for repeated primitive fields to enable
+ // a more efficient representation on the wire. Rather than repeatedly
+ // writing the tag and type for each element, the entire array is encoded as
+ // a single length-delimited blob. In proto3, only explicit setting it to
+ // false will avoid using packed encoding.
+ optional bool packed = 2;
+
+
+ // The jstype option determines the JavaScript type used for values of the
+ // field. The option is permitted only for 64 bit integral and fixed types
+ // (int64, uint64, sint64, fixed64, sfixed64). By default these types are
+ // represented as JavaScript strings. This avoids loss of precision that can
+ // happen when a large value is converted to a floating point JavaScript
+ // numbers. Specifying JS_NUMBER for the jstype causes the generated
+ // JavaScript code to use the JavaScript "number" type instead of strings.
+ // This option is an enum to permit additional types to be added,
+ // e.g. goog.math.Integer.
+ optional JSType jstype = 6 [default = JS_NORMAL];
+ enum JSType {
+ // Use the default type.
+ JS_NORMAL = 0;
+
+ // Use JavaScript strings.
+ JS_STRING = 1;
+
+ // Use JavaScript numbers.
+ JS_NUMBER = 2;
+ }
+
+ // Should this field be parsed lazily? Lazy applies only to message-type
+ // fields. It means that when the outer message is initially parsed, the
+ // inner message's contents will not be parsed but instead stored in encoded
+ // form. The inner message will actually be parsed when it is first accessed.
+ //
+ // This is only a hint. Implementations are free to choose whether to use
+ // eager or lazy parsing regardless of the value of this option. However,
+ // setting this option true suggests that the protocol author believes that
+ // using lazy parsing on this field is worth the additional bookkeeping
+ // overhead typically needed to implement it.
+ //
+ // This option does not affect the public interface of any generated code;
+ // all method signatures remain the same. Furthermore, thread-safety of the
+ // interface is not affected by this option; const methods remain safe to
+ // call from multiple threads concurrently, while non-const methods continue
+ // to require exclusive access.
+ //
+ //
+ // Note that implementations may choose not to check required fields within
+ // a lazy sub-message. That is, calling IsInitialized() on the outher message
+ // may return true even if the inner message has missing required fields.
+ // This is necessary because otherwise the inner message would have to be
+ // parsed in order to perform the check, defeating the purpose of lazy
+ // parsing. An implementation which chooses not to check required fields
+ // must be consistent about it. That is, for any particular sub-message, the
+ // implementation must either *always* check its required fields, or *never*
+ // check its required fields, regardless of whether or not the message has
+ // been parsed.
+ optional bool lazy = 5 [default=false];
+
+ // Is this field deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for accessors, or it will be completely ignored; in the very least, this
+ // is a formalization for deprecating fields.
+ optional bool deprecated = 3 [default=false];
+
+ // For Google-internal migration only. Do not use.
+ optional bool weak = 10 [default=false];
+
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message EnumOptions {
+
+ // Set this option to true to allow mapping different tag names to the same
+ // value.
+ optional bool allow_alias = 2;
+
+ // Is this enum deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the enum, or it will be completely ignored; in the very least, this
+ // is a formalization for deprecating enums.
+ optional bool deprecated = 3 [default=false];
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message EnumValueOptions {
+ // Is this enum value deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the enum value, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating enum values.
+ optional bool deprecated = 1 [default=false];
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message ServiceOptions {
+
+ // Note: Field numbers 1 through 32 are reserved for Google's internal RPC
+ // framework. We apologize for hoarding these numbers to ourselves, but
+ // we were already using them long before we decided to release Protocol
+ // Buffers.
+
+ // Is this service deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the service, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating services.
+ optional bool deprecated = 33 [default=false];
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message MethodOptions {
+
+ // Note: Field numbers 1 through 32 are reserved for Google's internal RPC
+ // framework. We apologize for hoarding these numbers to ourselves, but
+ // we were already using them long before we decided to release Protocol
+ // Buffers.
+
+ // Is this method deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the method, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating methods.
+ optional bool deprecated = 33 [default=false];
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+message UninterpretedOption {
+ // The name of the uninterpreted option. Each string represents a segment in
+ // a dot-separated name. is_extension is true iff a segment represents an
+ // extension (denoted with parentheses in options specs in .proto files).
+ // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+ // "foo.(bar.baz).qux".
+ message NamePart {
+ required string name_part = 1;
+ required bool is_extension = 2;
+ }
+ repeated NamePart name = 2;
+
+ // The value of the uninterpreted option, in whatever type the tokenizer
+ // identified it as during parsing. Exactly one of these should be set.
+ optional string identifier_value = 3;
+ optional uint64 positive_int_value = 4;
+ optional int64 negative_int_value = 5;
+ optional double double_value = 6;
+ optional bytes string_value = 7;
+ optional string aggregate_value = 8;
+}
+
+// ===================================================================
+// Optional source code info
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+message SourceCodeInfo {
+ // A Location identifies a piece of source code in a .proto file which
+ // corresponds to a particular definition. This information is intended
+ // to be useful to IDEs, code indexers, documentation generators, and similar
+ // tools.
+ //
+ // For example, say we have a file like:
+ // message Foo {
+ // optional string foo = 1;
+ // }
+ // Let's look at just the field definition:
+ // optional string foo = 1;
+ // ^ ^^ ^^ ^ ^^^
+ // a bc de f ghi
+ // We have the following locations:
+ // span path represents
+ // [a,i) [ 4, 0, 2, 0 ] The whole field definition.
+ // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
+ // [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
+ // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
+ // [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
+ //
+ // Notes:
+ // - A location may refer to a repeated field itself (i.e. not to any
+ // particular index within it). This is used whenever a set of elements are
+ // logically enclosed in a single code segment. For example, an entire
+ // extend block (possibly containing multiple extension definitions) will
+ // have an outer location whose path refers to the "extensions" repeated
+ // field without an index.
+ // - Multiple locations may have the same path. This happens when a single
+ // logical declaration is spread out across multiple places. The most
+ // obvious example is the "extend" block again -- there may be multiple
+ // extend blocks in the same scope, each of which will have the same path.
+ // - A location's span is not always a subset of its parent's span. For
+ // example, the "extendee" of an extension declaration appears at the
+ // beginning of the "extend" block and is shared by all extensions within
+ // the block.
+ // - Just because a location's span is a subset of some other location's span
+ // does not mean that it is a descendent. For example, a "group" defines
+ // both a type and a field in a single declaration. Thus, the locations
+ // corresponding to the type and field and their components will overlap.
+ // - Code which tries to interpret locations should probably be designed to
+ // ignore those that it doesn't understand, as more types of locations could
+ // be recorded in the future.
+ repeated Location location = 1;
+ message Location {
+ // Identifies which part of the FileDescriptorProto was defined at this
+ // location.
+ //
+ // Each element is a field number or an index. They form a path from
+ // the root FileDescriptorProto to the place where the definition. For
+ // example, this path:
+ // [ 4, 3, 2, 7, 1 ]
+ // refers to:
+ // file.message_type(3) // 4, 3
+ // .field(7) // 2, 7
+ // .name() // 1
+ // This is because FileDescriptorProto.message_type has field number 4:
+ // repeated DescriptorProto message_type = 4;
+ // and DescriptorProto.field has field number 2:
+ // repeated FieldDescriptorProto field = 2;
+ // and FieldDescriptorProto.name has field number 1:
+ // optional string name = 1;
+ //
+ // Thus, the above path gives the location of a field name. If we removed
+ // the last element:
+ // [ 4, 3, 2, 7 ]
+ // this path refers to the whole field declaration (from the beginning
+ // of the label to the terminating semicolon).
+ repeated int32 path = 1 [packed=true];
+
+ // Always has exactly three or four elements: start line, start column,
+ // end line (optional, otherwise assumed same as start line), end column.
+ // These are packed into a single field for efficiency. Note that line
+ // and column numbers are zero-based -- typically you will want to add
+ // 1 to each before displaying to a user.
+ repeated int32 span = 2 [packed=true];
+
+ // If this SourceCodeInfo represents a complete declaration, these are any
+ // comments appearing before and after the declaration which appear to be
+ // attached to the declaration.
+ //
+ // A series of line comments appearing on consecutive lines, with no other
+ // tokens appearing on those lines, will be treated as a single comment.
+ //
+ // leading_detached_comments will keep paragraphs of comments that appear
+ // before (but not connected to) the current element. Each paragraph,
+ // separated by empty lines, will be one comment element in the repeated
+ // field.
+ //
+ // Only the comment content is provided; comment markers (e.g. //) are
+ // stripped out. For block comments, leading whitespace and an asterisk
+ // will be stripped from the beginning of each line other than the first.
+ // Newlines are included in the output.
+ //
+ // Examples:
+ //
+ // optional int32 foo = 1; // Comment attached to foo.
+ // // Comment attached to bar.
+ // optional int32 bar = 2;
+ //
+ // optional string baz = 3;
+ // // Comment attached to baz.
+ // // Another line attached to baz.
+ //
+ // // Comment attached to qux.
+ // //
+ // // Another line attached to qux.
+ // optional double qux = 4;
+ //
+ // // Detached comment for corge. This is not leading or trailing comments
+ // // to qux or corge because there are blank lines separating it from
+ // // both.
+ //
+ // // Detached comment for corge paragraph 2.
+ //
+ // optional string corge = 5;
+ // /* Block comment attached
+ // * to corge. Leading asterisks
+ // * will be removed. */
+ // /* Block comment attached to
+ // * grault. */
+ // optional int32 grault = 6;
+ //
+ // // ignored detached comments.
+ optional string leading_comments = 3;
+ optional string trailing_comments = 4;
+ repeated string leading_detached_comments = 6;
+ }
+}
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
new file mode 100644
index 000000000..341b59c53
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
@@ -0,0 +1,1981 @@
+// Code generated by protoc-gen-gogo.
+// source: descriptor.proto
+// DO NOT EDIT!
+
+/*
+Package descriptor is a generated protocol buffer package.
+
+It is generated from these files:
+ descriptor.proto
+
+It has these top-level messages:
+ FileDescriptorSet
+ FileDescriptorProto
+ DescriptorProto
+ FieldDescriptorProto
+ OneofDescriptorProto
+ EnumDescriptorProto
+ EnumValueDescriptorProto
+ ServiceDescriptorProto
+ MethodDescriptorProto
+ FileOptions
+ MessageOptions
+ FieldOptions
+ EnumOptions
+ EnumValueOptions
+ ServiceOptions
+ MethodOptions
+ UninterpretedOption
+ SourceCodeInfo
+*/
+package descriptor
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type FieldDescriptorProto_Type int32
+
+const (
+ // 0 is reserved for errors.
+ // Order is weird for historical reasons.
+ FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1
+ FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2
+ // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
+ // negative values are likely.
+ FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3
+ FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4
+ // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
+ // negative values are likely.
+ FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5
+ FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6
+ FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7
+ FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8
+ FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9
+ FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10
+ FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11
+ // New in version 2.
+ FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12
+ FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13
+ FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14
+ FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15
+ FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16
+ FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17
+ FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18
+)
+
+var FieldDescriptorProto_Type_name = map[int32]string{
+ 1: "TYPE_DOUBLE",
+ 2: "TYPE_FLOAT",
+ 3: "TYPE_INT64",
+ 4: "TYPE_UINT64",
+ 5: "TYPE_INT32",
+ 6: "TYPE_FIXED64",
+ 7: "TYPE_FIXED32",
+ 8: "TYPE_BOOL",
+ 9: "TYPE_STRING",
+ 10: "TYPE_GROUP",
+ 11: "TYPE_MESSAGE",
+ 12: "TYPE_BYTES",
+ 13: "TYPE_UINT32",
+ 14: "TYPE_ENUM",
+ 15: "TYPE_SFIXED32",
+ 16: "TYPE_SFIXED64",
+ 17: "TYPE_SINT32",
+ 18: "TYPE_SINT64",
+}
+var FieldDescriptorProto_Type_value = map[string]int32{
+ "TYPE_DOUBLE": 1,
+ "TYPE_FLOAT": 2,
+ "TYPE_INT64": 3,
+ "TYPE_UINT64": 4,
+ "TYPE_INT32": 5,
+ "TYPE_FIXED64": 6,
+ "TYPE_FIXED32": 7,
+ "TYPE_BOOL": 8,
+ "TYPE_STRING": 9,
+ "TYPE_GROUP": 10,
+ "TYPE_MESSAGE": 11,
+ "TYPE_BYTES": 12,
+ "TYPE_UINT32": 13,
+ "TYPE_ENUM": 14,
+ "TYPE_SFIXED32": 15,
+ "TYPE_SFIXED64": 16,
+ "TYPE_SINT32": 17,
+ "TYPE_SINT64": 18,
+}
+
+func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type {
+ p := new(FieldDescriptorProto_Type)
+ *p = x
+ return p
+}
+func (x FieldDescriptorProto_Type) String() string {
+ return proto.EnumName(FieldDescriptorProto_Type_name, int32(x))
+}
+func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type")
+ if err != nil {
+ return err
+ }
+ *x = FieldDescriptorProto_Type(value)
+ return nil
+}
+func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{3, 0}
+}
+
+type FieldDescriptorProto_Label int32
+
+const (
+ // 0 is reserved for errors
+ FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1
+ FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
+ FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
+)
+
+var FieldDescriptorProto_Label_name = map[int32]string{
+ 1: "LABEL_OPTIONAL",
+ 2: "LABEL_REQUIRED",
+ 3: "LABEL_REPEATED",
+}
+var FieldDescriptorProto_Label_value = map[string]int32{
+ "LABEL_OPTIONAL": 1,
+ "LABEL_REQUIRED": 2,
+ "LABEL_REPEATED": 3,
+}
+
+func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label {
+ p := new(FieldDescriptorProto_Label)
+ *p = x
+ return p
+}
+func (x FieldDescriptorProto_Label) String() string {
+ return proto.EnumName(FieldDescriptorProto_Label_name, int32(x))
+}
+func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label")
+ if err != nil {
+ return err
+ }
+ *x = FieldDescriptorProto_Label(value)
+ return nil
+}
+func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{3, 1}
+}
+
+// Generated classes can be optimized for speed or code size.
+type FileOptions_OptimizeMode int32
+
+const (
+ FileOptions_SPEED FileOptions_OptimizeMode = 1
+ // etc.
+ FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2
+ FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3
+)
+
+var FileOptions_OptimizeMode_name = map[int32]string{
+ 1: "SPEED",
+ 2: "CODE_SIZE",
+ 3: "LITE_RUNTIME",
+}
+var FileOptions_OptimizeMode_value = map[string]int32{
+ "SPEED": 1,
+ "CODE_SIZE": 2,
+ "LITE_RUNTIME": 3,
+}
+
+func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode {
+ p := new(FileOptions_OptimizeMode)
+ *p = x
+ return p
+}
+func (x FileOptions_OptimizeMode) String() string {
+ return proto.EnumName(FileOptions_OptimizeMode_name, int32(x))
+}
+func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode")
+ if err != nil {
+ return err
+ }
+ *x = FileOptions_OptimizeMode(value)
+ return nil
+}
+func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{9, 0}
+}
+
+type FieldOptions_CType int32
+
+const (
+ // Default mode.
+ FieldOptions_STRING FieldOptions_CType = 0
+ FieldOptions_CORD FieldOptions_CType = 1
+ FieldOptions_STRING_PIECE FieldOptions_CType = 2
+)
+
+var FieldOptions_CType_name = map[int32]string{
+ 0: "STRING",
+ 1: "CORD",
+ 2: "STRING_PIECE",
+}
+var FieldOptions_CType_value = map[string]int32{
+ "STRING": 0,
+ "CORD": 1,
+ "STRING_PIECE": 2,
+}
+
+func (x FieldOptions_CType) Enum() *FieldOptions_CType {
+ p := new(FieldOptions_CType)
+ *p = x
+ return p
+}
+func (x FieldOptions_CType) String() string {
+ return proto.EnumName(FieldOptions_CType_name, int32(x))
+}
+func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType")
+ if err != nil {
+ return err
+ }
+ *x = FieldOptions_CType(value)
+ return nil
+}
+func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{11, 0}
+}
+
+type FieldOptions_JSType int32
+
+const (
+ // Use the default type.
+ FieldOptions_JS_NORMAL FieldOptions_JSType = 0
+ // Use JavaScript strings.
+ FieldOptions_JS_STRING FieldOptions_JSType = 1
+ // Use JavaScript numbers.
+ FieldOptions_JS_NUMBER FieldOptions_JSType = 2
+)
+
+var FieldOptions_JSType_name = map[int32]string{
+ 0: "JS_NORMAL",
+ 1: "JS_STRING",
+ 2: "JS_NUMBER",
+}
+var FieldOptions_JSType_value = map[string]int32{
+ "JS_NORMAL": 0,
+ "JS_STRING": 1,
+ "JS_NUMBER": 2,
+}
+
+func (x FieldOptions_JSType) Enum() *FieldOptions_JSType {
+ p := new(FieldOptions_JSType)
+ *p = x
+ return p
+}
+func (x FieldOptions_JSType) String() string {
+ return proto.EnumName(FieldOptions_JSType_name, int32(x))
+}
+func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType")
+ if err != nil {
+ return err
+ }
+ *x = FieldOptions_JSType(value)
+ return nil
+}
+func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{11, 1}
+}
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+type FileDescriptorSet struct {
+ File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} }
+func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) }
+func (*FileDescriptorSet) ProtoMessage() {}
+func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} }
+
+func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto {
+ if m != nil {
+ return m.File
+ }
+ return nil
+}
+
+// Describes a complete .proto file.
+type FileDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"`
+ // Names of files imported by this file.
+ Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
+ // Indexes of the public imported files in the dependency list above.
+ PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"`
+ // Indexes of the weak imported files in the dependency list.
+ // For Google-internal migration only. Do not use.
+ WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
+ // All top-level definitions in this file.
+ MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
+ EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+ Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"`
+ Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"`
+ Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+ // This field contains optional information about the original source code.
+ // You may safely remove this entire field without harming runtime
+ // functionality of the descriptors -- the information is needed only by
+ // development tools.
+ SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"`
+ // The syntax of the proto file.
+ // The supported values are "proto2" and "proto3".
+ Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} }
+func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*FileDescriptorProto) ProtoMessage() {}
+func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{1} }
+
+func (m *FileDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FileDescriptorProto) GetPackage() string {
+ if m != nil && m.Package != nil {
+ return *m.Package
+ }
+ return ""
+}
+
+func (m *FileDescriptorProto) GetDependency() []string {
+ if m != nil {
+ return m.Dependency
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetPublicDependency() []int32 {
+ if m != nil {
+ return m.PublicDependency
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetWeakDependency() []int32 {
+ if m != nil {
+ return m.WeakDependency
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto {
+ if m != nil {
+ return m.MessageType
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto {
+ if m != nil {
+ return m.EnumType
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto {
+ if m != nil {
+ return m.Service
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto {
+ if m != nil {
+ return m.Extension
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetOptions() *FileOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo {
+ if m != nil {
+ return m.SourceCodeInfo
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetSyntax() string {
+ if m != nil && m.Syntax != nil {
+ return *m.Syntax
+ }
+ return ""
+}
+
+// Describes a message type.
+type DescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
+ Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
+ NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"`
+ EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+ ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"`
+ OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"`
+ Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"`
+ ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
+ // Reserved field names, which may not be used by fields in the same message.
+ // A given name may only be reserved once.
+ ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DescriptorProto) Reset() { *m = DescriptorProto{} }
+func (m *DescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto) ProtoMessage() {}
+func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{2} }
+
+func (m *DescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *DescriptorProto) GetField() []*FieldDescriptorProto {
+ if m != nil {
+ return m.Field
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto {
+ if m != nil {
+ return m.Extension
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetNestedType() []*DescriptorProto {
+ if m != nil {
+ return m.NestedType
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto {
+ if m != nil {
+ return m.EnumType
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange {
+ if m != nil {
+ return m.ExtensionRange
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto {
+ if m != nil {
+ return m.OneofDecl
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetOptions() *MessageOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange {
+ if m != nil {
+ return m.ReservedRange
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetReservedName() []string {
+ if m != nil {
+ return m.ReservedName
+ }
+ return nil
+}
+
+type DescriptorProto_ExtensionRange struct {
+ Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+ End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} }
+func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ExtensionRange) ProtoMessage() {}
+func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{2, 0}
+}
+
+func (m *DescriptorProto_ExtensionRange) GetStart() int32 {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return 0
+}
+
+func (m *DescriptorProto_ExtensionRange) GetEnd() int32 {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return 0
+}
+
+// Range of reserved tag numbers. Reserved tag numbers may not be used by
+// fields or extension ranges in the same message. Reserved ranges may
+// not overlap.
+type DescriptorProto_ReservedRange struct {
+ Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+ End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} }
+func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ReservedRange) ProtoMessage() {}
+func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{2, 1}
+}
+
+func (m *DescriptorProto_ReservedRange) GetStart() int32 {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return 0
+}
+
+func (m *DescriptorProto_ReservedRange) GetEnd() int32 {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return 0
+}
+
+// Describes a field within a message.
+type FieldDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
+ Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
+ // If type_name is set, this need not be set. If both this and type_name
+ // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+ Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"`
+ // For message and enum types, this is the name of the type. If the name
+ // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
+ // rules are used to find the type (i.e. first the nested types within this
+ // message are searched, then within the parent, on up to the root
+ // namespace).
+ TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"`
+ // For extensions, this is the name of the type being extended. It is
+ // resolved in the same manner as type_name.
+ Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"`
+ // For numeric types, contains the original text representation of the value.
+ // For booleans, "true" or "false".
+ // For strings, contains the default text contents (not escaped in any way).
+ // For bytes, contains the C escaped value. All bytes >= 128 are escaped.
+ // TODO(kenton): Base-64 encode?
+ DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
+ // If set, gives the index of a oneof in the containing type's oneof_decl
+ // list. This field is a member of that oneof.
+ OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"`
+ // JSON name of this field. The value is set by protocol compiler. If the
+ // user has set a "json_name" option on this field, that option's value
+ // will be used. Otherwise, it's deduced from the field's name by converting
+ // it to camelCase.
+ JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
+ Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} }
+func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*FieldDescriptorProto) ProtoMessage() {}
+func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{3} }
+
+func (m *FieldDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetNumber() int32 {
+ if m != nil && m.Number != nil {
+ return *m.Number
+ }
+ return 0
+}
+
+func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return FieldDescriptorProto_LABEL_OPTIONAL
+}
+
+func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return FieldDescriptorProto_TYPE_DOUBLE
+}
+
+func (m *FieldDescriptorProto) GetTypeName() string {
+ if m != nil && m.TypeName != nil {
+ return *m.TypeName
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetExtendee() string {
+ if m != nil && m.Extendee != nil {
+ return *m.Extendee
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetDefaultValue() string {
+ if m != nil && m.DefaultValue != nil {
+ return *m.DefaultValue
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetOneofIndex() int32 {
+ if m != nil && m.OneofIndex != nil {
+ return *m.OneofIndex
+ }
+ return 0
+}
+
+func (m *FieldDescriptorProto) GetJsonName() string {
+ if m != nil && m.JsonName != nil {
+ return *m.JsonName
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetOptions() *FieldOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// Describes a oneof.
+type OneofDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} }
+func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*OneofDescriptorProto) ProtoMessage() {}
+func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{4} }
+
+func (m *OneofDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+// Describes an enum type.
+type EnumDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+ Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} }
+func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*EnumDescriptorProto) ProtoMessage() {}
+func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{5} }
+
+func (m *EnumDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *EnumDescriptorProto) GetOptions() *EnumOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// Describes a value within an enum.
+type EnumValueDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
+ Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} }
+func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*EnumValueDescriptorProto) ProtoMessage() {}
+func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{6}
+}
+
+func (m *EnumValueDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *EnumValueDescriptorProto) GetNumber() int32 {
+ if m != nil && m.Number != nil {
+ return *m.Number
+ }
+ return 0
+}
+
+func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// Describes a service.
+type ServiceDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
+ Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} }
+func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*ServiceDescriptorProto) ProtoMessage() {}
+func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{7} }
+
+func (m *ServiceDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto {
+ if m != nil {
+ return m.Method
+ }
+ return nil
+}
+
+func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// Describes a method of a service.
+type MethodDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ // Input and output type names. These are resolved in the same way as
+ // FieldDescriptorProto.type_name, but must refer to a message type.
+ InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
+ OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"`
+ Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"`
+ // Identifies if client streams multiple client messages
+ ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
+ // Identifies if server streams multiple server messages
+ ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} }
+func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*MethodDescriptorProto) ProtoMessage() {}
+func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{8} }
+
+const Default_MethodDescriptorProto_ClientStreaming bool = false
+const Default_MethodDescriptorProto_ServerStreaming bool = false
+
+func (m *MethodDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MethodDescriptorProto) GetInputType() string {
+ if m != nil && m.InputType != nil {
+ return *m.InputType
+ }
+ return ""
+}
+
+func (m *MethodDescriptorProto) GetOutputType() string {
+ if m != nil && m.OutputType != nil {
+ return *m.OutputType
+ }
+ return ""
+}
+
+func (m *MethodDescriptorProto) GetOptions() *MethodOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+func (m *MethodDescriptorProto) GetClientStreaming() bool {
+ if m != nil && m.ClientStreaming != nil {
+ return *m.ClientStreaming
+ }
+ return Default_MethodDescriptorProto_ClientStreaming
+}
+
+func (m *MethodDescriptorProto) GetServerStreaming() bool {
+ if m != nil && m.ServerStreaming != nil {
+ return *m.ServerStreaming
+ }
+ return Default_MethodDescriptorProto_ServerStreaming
+}
+
+type FileOptions struct {
+ // Sets the Java package where classes generated from this .proto will be
+ // placed. By default, the proto package is used, but this is often
+ // inappropriate because proto packages do not normally start with backwards
+ // domain names.
+ JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"`
+ // If set, all the classes from the .proto file are wrapped in a single
+ // outer class with the given name. This applies to both Proto1
+ // (equivalent to the old "--one_java_file" option) and Proto2 (where
+ // a .proto always translates to a single class, but you may want to
+ // explicitly choose the class name).
+ JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"`
+ // If set true, then the Java code generator will generate a separate .java
+ // file for each top-level message, enum, and service defined in the .proto
+ // file. Thus, these types will *not* be nested inside the outer class
+ // named by java_outer_classname. However, the outer class will still be
+ // generated to contain the file's getDescriptor() method as well as any
+ // top-level extensions defined in the file.
+ JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"`
+ // If set true, then the Java code generator will generate equals() and
+ // hashCode() methods for all messages defined in the .proto file.
+ // This increases generated code size, potentially substantially for large
+ // protos, which may harm a memory-constrained application.
+ // - In the full runtime this is a speed optimization, as the
+ // AbstractMessage base class includes reflection-based implementations of
+ // these methods.
+ // - In the lite runtime, setting this option changes the semantics of
+ // equals() and hashCode() to more closely match those of the full runtime;
+ // the generated methods compute their results based on field values rather
+ // than object identity. (Implementations should not assume that hashcodes
+ // will be consistent across runtimes or versions of the protocol compiler.)
+ JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash,def=0" json:"java_generate_equals_and_hash,omitempty"`
+ // If set true, then the Java2 code generator will generate code that
+ // throws an exception whenever an attempt is made to assign a non-UTF-8
+ // byte sequence to a string field.
+ // Message reflection will do the same.
+ // However, an extension field still accepts non-UTF-8 byte sequences.
+ // This option has no effect on when used with the lite runtime.
+ JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
+ OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
+ // Sets the Go package where structs generated from this .proto will be
+ // placed. If omitted, the Go package will be derived from the following:
+ // - The basename of the package import path, if provided.
+ // - Otherwise, the package statement in the .proto file, if present.
+ // - Otherwise, the basename of the .proto file, without extension.
+ GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"`
+ // Should generic services be generated in each language? "Generic" services
+ // are not specific to any particular RPC system. They are generated by the
+ // main code generators in each language (without additional plugins).
+ // Generic services were the only kind of service generation supported by
+ // early versions of google.protobuf.
+ //
+ // Generic services are now considered deprecated in favor of using plugins
+ // that generate code specific to your particular RPC system. Therefore,
+ // these default to false. Old code which depends on generic services should
+ // explicitly set them to true.
+ CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
+ JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
+ PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
+ // Is this file deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for everything in the file, or it will be completely ignored; in the very
+ // least, this is a formalization for deprecating files.
+ Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // Enables the use of arenas for the proto messages in this file. This applies
+ // only to generated classes for C++.
+ CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"`
+ // Sets the objective c class prefix which is prepended to all objective c
+ // generated classes from this .proto. There is no default.
+ ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"`
+ // Namespace for generated classes; defaults to the package.
+ CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"`
+ // Whether the nano proto compiler should generate in the deprecated non-nano
+ // suffixed package.
+ JavananoUseDeprecatedPackage *bool `protobuf:"varint,38,opt,name=javanano_use_deprecated_package,json=javananoUseDeprecatedPackage" json:"javanano_use_deprecated_package,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FileOptions) Reset() { *m = FileOptions{} }
+func (m *FileOptions) String() string { return proto.CompactTextString(m) }
+func (*FileOptions) ProtoMessage() {}
+func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{9} }
+
+var extRange_FileOptions = []proto.ExtensionRange{
+ {Start: 1000, End: 536870911},
+}
+
+func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_FileOptions
+}
+
+const Default_FileOptions_JavaMultipleFiles bool = false
+const Default_FileOptions_JavaGenerateEqualsAndHash bool = false
+const Default_FileOptions_JavaStringCheckUtf8 bool = false
+const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED
+const Default_FileOptions_CcGenericServices bool = false
+const Default_FileOptions_JavaGenericServices bool = false
+const Default_FileOptions_PyGenericServices bool = false
+const Default_FileOptions_Deprecated bool = false
+const Default_FileOptions_CcEnableArenas bool = false
+
+func (m *FileOptions) GetJavaPackage() string {
+ if m != nil && m.JavaPackage != nil {
+ return *m.JavaPackage
+ }
+ return ""
+}
+
+func (m *FileOptions) GetJavaOuterClassname() string {
+ if m != nil && m.JavaOuterClassname != nil {
+ return *m.JavaOuterClassname
+ }
+ return ""
+}
+
+func (m *FileOptions) GetJavaMultipleFiles() bool {
+ if m != nil && m.JavaMultipleFiles != nil {
+ return *m.JavaMultipleFiles
+ }
+ return Default_FileOptions_JavaMultipleFiles
+}
+
+func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool {
+ if m != nil && m.JavaGenerateEqualsAndHash != nil {
+ return *m.JavaGenerateEqualsAndHash
+ }
+ return Default_FileOptions_JavaGenerateEqualsAndHash
+}
+
+func (m *FileOptions) GetJavaStringCheckUtf8() bool {
+ if m != nil && m.JavaStringCheckUtf8 != nil {
+ return *m.JavaStringCheckUtf8
+ }
+ return Default_FileOptions_JavaStringCheckUtf8
+}
+
+func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode {
+ if m != nil && m.OptimizeFor != nil {
+ return *m.OptimizeFor
+ }
+ return Default_FileOptions_OptimizeFor
+}
+
+func (m *FileOptions) GetGoPackage() string {
+ if m != nil && m.GoPackage != nil {
+ return *m.GoPackage
+ }
+ return ""
+}
+
+func (m *FileOptions) GetCcGenericServices() bool {
+ if m != nil && m.CcGenericServices != nil {
+ return *m.CcGenericServices
+ }
+ return Default_FileOptions_CcGenericServices
+}
+
+func (m *FileOptions) GetJavaGenericServices() bool {
+ if m != nil && m.JavaGenericServices != nil {
+ return *m.JavaGenericServices
+ }
+ return Default_FileOptions_JavaGenericServices
+}
+
+func (m *FileOptions) GetPyGenericServices() bool {
+ if m != nil && m.PyGenericServices != nil {
+ return *m.PyGenericServices
+ }
+ return Default_FileOptions_PyGenericServices
+}
+
+func (m *FileOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_FileOptions_Deprecated
+}
+
+func (m *FileOptions) GetCcEnableArenas() bool {
+ if m != nil && m.CcEnableArenas != nil {
+ return *m.CcEnableArenas
+ }
+ return Default_FileOptions_CcEnableArenas
+}
+
+func (m *FileOptions) GetObjcClassPrefix() string {
+ if m != nil && m.ObjcClassPrefix != nil {
+ return *m.ObjcClassPrefix
+ }
+ return ""
+}
+
+func (m *FileOptions) GetCsharpNamespace() string {
+ if m != nil && m.CsharpNamespace != nil {
+ return *m.CsharpNamespace
+ }
+ return ""
+}
+
+func (m *FileOptions) GetJavananoUseDeprecatedPackage() bool {
+ if m != nil && m.JavananoUseDeprecatedPackage != nil {
+ return *m.JavananoUseDeprecatedPackage
+ }
+ return false
+}
+
+func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type MessageOptions struct {
+ // Set true to use the old proto1 MessageSet wire format for extensions.
+ // This is provided for backwards-compatibility with the MessageSet wire
+ // format. You should not use this for any other reason: It's less
+ // efficient, has fewer features, and is more complicated.
+ //
+ // The message must be defined exactly as follows:
+ // message Foo {
+ // option message_set_wire_format = true;
+ // extensions 4 to max;
+ // }
+ // Note that the message cannot have any defined fields; MessageSets only
+ // have extensions.
+ //
+ // All extensions of your type must be singular messages; e.g. they cannot
+ // be int32s, enums, or repeated messages.
+ //
+ // Because this is an option, the above two restrictions are not enforced by
+ // the protocol compiler.
+ MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"`
+ // Disables the generation of the standard "descriptor()" accessor, which can
+ // conflict with a field of the same name. This is meant to make migration
+ // from proto1 easier; new code should avoid fields named "descriptor".
+ NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"`
+ // Is this message deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the message, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating messages.
+ Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // Whether the message is an automatically generated map entry type for the
+ // maps field.
+ //
+ // For maps fields:
+ // map<KeyType, ValueType> map_field = 1;
+ // The parsed descriptor looks like:
+ // message MapFieldEntry {
+ // option map_entry = true;
+ // optional KeyType key = 1;
+ // optional ValueType value = 2;
+ // }
+ // repeated MapFieldEntry map_field = 1;
+ //
+ // Implementations may choose not to generate the map_entry=true message, but
+ // use a native map in the target language to hold the keys and values.
+ // The reflection APIs in such implementions still need to work as
+ // if the field is a repeated message field.
+ //
+ // NOTE: Do not set the option in .proto files. Always use the maps syntax
+ // instead. The option should only be implicitly set by the proto compiler
+ // parser.
+ MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MessageOptions) Reset() { *m = MessageOptions{} }
+func (m *MessageOptions) String() string { return proto.CompactTextString(m) }
+func (*MessageOptions) ProtoMessage() {}
+func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{10} }
+
+var extRange_MessageOptions = []proto.ExtensionRange{
+ {Start: 1000, End: 536870911},
+}
+
+func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_MessageOptions
+}
+
+const Default_MessageOptions_MessageSetWireFormat bool = false
+const Default_MessageOptions_NoStandardDescriptorAccessor bool = false
+const Default_MessageOptions_Deprecated bool = false
+
+func (m *MessageOptions) GetMessageSetWireFormat() bool {
+ if m != nil && m.MessageSetWireFormat != nil {
+ return *m.MessageSetWireFormat
+ }
+ return Default_MessageOptions_MessageSetWireFormat
+}
+
+func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool {
+ if m != nil && m.NoStandardDescriptorAccessor != nil {
+ return *m.NoStandardDescriptorAccessor
+ }
+ return Default_MessageOptions_NoStandardDescriptorAccessor
+}
+
+func (m *MessageOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_MessageOptions_Deprecated
+}
+
+func (m *MessageOptions) GetMapEntry() bool {
+ if m != nil && m.MapEntry != nil {
+ return *m.MapEntry
+ }
+ return false
+}
+
+func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type FieldOptions struct {
+ // The ctype option instructs the C++ code generator to use a different
+ // representation of the field than it normally would. See the specific
+ // options below. This option is not yet implemented in the open source
+ // release -- sorry, we'll try to include it in a future version!
+ Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
+ // The packed option can be enabled for repeated primitive fields to enable
+ // a more efficient representation on the wire. Rather than repeatedly
+ // writing the tag and type for each element, the entire array is encoded as
+ // a single length-delimited blob. In proto3, only explicit setting it to
+ // false will avoid using packed encoding.
+ Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
+ // The jstype option determines the JavaScript type used for values of the
+ // field. The option is permitted only for 64 bit integral and fixed types
+ // (int64, uint64, sint64, fixed64, sfixed64). By default these types are
+ // represented as JavaScript strings. This avoids loss of precision that can
+ // happen when a large value is converted to a floating point JavaScript
+ // numbers. Specifying JS_NUMBER for the jstype causes the generated
+ // JavaScript code to use the JavaScript "number" type instead of strings.
+ // This option is an enum to permit additional types to be added,
+ // e.g. goog.math.Integer.
+ Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"`
+ // Should this field be parsed lazily? Lazy applies only to message-type
+ // fields. It means that when the outer message is initially parsed, the
+ // inner message's contents will not be parsed but instead stored in encoded
+ // form. The inner message will actually be parsed when it is first accessed.
+ //
+ // This is only a hint. Implementations are free to choose whether to use
+ // eager or lazy parsing regardless of the value of this option. However,
+ // setting this option true suggests that the protocol author believes that
+ // using lazy parsing on this field is worth the additional bookkeeping
+ // overhead typically needed to implement it.
+ //
+ // This option does not affect the public interface of any generated code;
+ // all method signatures remain the same. Furthermore, thread-safety of the
+ // interface is not affected by this option; const methods remain safe to
+ // call from multiple threads concurrently, while non-const methods continue
+ // to require exclusive access.
+ //
+ //
+ // Note that implementations may choose not to check required fields within
+ // a lazy sub-message. That is, calling IsInitialized() on the outher message
+ // may return true even if the inner message has missing required fields.
+ // This is necessary because otherwise the inner message would have to be
+ // parsed in order to perform the check, defeating the purpose of lazy
+ // parsing. An implementation which chooses not to check required fields
+ // must be consistent about it. That is, for any particular sub-message, the
+ // implementation must either *always* check its required fields, or *never*
+ // check its required fields, regardless of whether or not the message has
+ // been parsed.
+ Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"`
+ // Is this field deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for accessors, or it will be completely ignored; in the very least, this
+ // is a formalization for deprecating fields.
+ Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // For Google-internal migration only. Do not use.
+ Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldOptions) Reset() { *m = FieldOptions{} }
+func (m *FieldOptions) String() string { return proto.CompactTextString(m) }
+func (*FieldOptions) ProtoMessage() {}
+func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{11} }
+
+var extRange_FieldOptions = []proto.ExtensionRange{
+ {Start: 1000, End: 536870911},
+}
+
+func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_FieldOptions
+}
+
+const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING
+const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL
+const Default_FieldOptions_Lazy bool = false
+const Default_FieldOptions_Deprecated bool = false
+const Default_FieldOptions_Weak bool = false
+
+func (m *FieldOptions) GetCtype() FieldOptions_CType {
+ if m != nil && m.Ctype != nil {
+ return *m.Ctype
+ }
+ return Default_FieldOptions_Ctype
+}
+
+func (m *FieldOptions) GetPacked() bool {
+ if m != nil && m.Packed != nil {
+ return *m.Packed
+ }
+ return false
+}
+
+func (m *FieldOptions) GetJstype() FieldOptions_JSType {
+ if m != nil && m.Jstype != nil {
+ return *m.Jstype
+ }
+ return Default_FieldOptions_Jstype
+}
+
+func (m *FieldOptions) GetLazy() bool {
+ if m != nil && m.Lazy != nil {
+ return *m.Lazy
+ }
+ return Default_FieldOptions_Lazy
+}
+
+func (m *FieldOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_FieldOptions_Deprecated
+}
+
+func (m *FieldOptions) GetWeak() bool {
+ if m != nil && m.Weak != nil {
+ return *m.Weak
+ }
+ return Default_FieldOptions_Weak
+}
+
+func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type EnumOptions struct {
+ // Set this option to true to allow mapping different tag names to the same
+ // value.
+ AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
+ // Is this enum deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the enum, or it will be completely ignored; in the very least, this
+ // is a formalization for deprecating enums.
+ Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EnumOptions) Reset() { *m = EnumOptions{} }
+func (m *EnumOptions) String() string { return proto.CompactTextString(m) }
+func (*EnumOptions) ProtoMessage() {}
+func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} }
+
+var extRange_EnumOptions = []proto.ExtensionRange{
+ {Start: 1000, End: 536870911},
+}
+
+func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_EnumOptions
+}
+
+const Default_EnumOptions_Deprecated bool = false
+
+func (m *EnumOptions) GetAllowAlias() bool {
+ if m != nil && m.AllowAlias != nil {
+ return *m.AllowAlias
+ }
+ return false
+}
+
+func (m *EnumOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_EnumOptions_Deprecated
+}
+
+func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type EnumValueOptions struct {
+ // Is this enum value deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the enum value, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating enum values.
+ Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} }
+func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) }
+func (*EnumValueOptions) ProtoMessage() {}
+func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} }
+
+var extRange_EnumValueOptions = []proto.ExtensionRange{
+ {Start: 1000, End: 536870911},
+}
+
+func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_EnumValueOptions
+}
+
+const Default_EnumValueOptions_Deprecated bool = false
+
+func (m *EnumValueOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_EnumValueOptions_Deprecated
+}
+
+func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type ServiceOptions struct {
+ // Is this service deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the service, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating services.
+ Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ServiceOptions) Reset() { *m = ServiceOptions{} }
+func (m *ServiceOptions) String() string { return proto.CompactTextString(m) }
+func (*ServiceOptions) ProtoMessage() {}
+func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} }
+
+var extRange_ServiceOptions = []proto.ExtensionRange{
+ {Start: 1000, End: 536870911},
+}
+
+func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_ServiceOptions
+}
+
+const Default_ServiceOptions_Deprecated bool = false
+
+func (m *ServiceOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_ServiceOptions_Deprecated
+}
+
+func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type MethodOptions struct {
+ // Is this method deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the method, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating methods.
+ Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MethodOptions) Reset() { *m = MethodOptions{} }
+func (m *MethodOptions) String() string { return proto.CompactTextString(m) }
+func (*MethodOptions) ProtoMessage() {}
+func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} }
+
+var extRange_MethodOptions = []proto.ExtensionRange{
+ {Start: 1000, End: 536870911},
+}
+
+func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_MethodOptions
+}
+
+const Default_MethodOptions_Deprecated bool = false
+
+func (m *MethodOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_MethodOptions_Deprecated
+}
+
+func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+type UninterpretedOption struct {
+ Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
+ // The value of the uninterpreted option, in whatever type the tokenizer
+ // identified it as during parsing. Exactly one of these should be set.
+ IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
+ PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"`
+ NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"`
+ DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
+ StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
+ AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} }
+func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) }
+func (*UninterpretedOption) ProtoMessage() {}
+func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{16} }
+
+func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *UninterpretedOption) GetIdentifierValue() string {
+ if m != nil && m.IdentifierValue != nil {
+ return *m.IdentifierValue
+ }
+ return ""
+}
+
+func (m *UninterpretedOption) GetPositiveIntValue() uint64 {
+ if m != nil && m.PositiveIntValue != nil {
+ return *m.PositiveIntValue
+ }
+ return 0
+}
+
+func (m *UninterpretedOption) GetNegativeIntValue() int64 {
+ if m != nil && m.NegativeIntValue != nil {
+ return *m.NegativeIntValue
+ }
+ return 0
+}
+
+func (m *UninterpretedOption) GetDoubleValue() float64 {
+ if m != nil && m.DoubleValue != nil {
+ return *m.DoubleValue
+ }
+ return 0
+}
+
+func (m *UninterpretedOption) GetStringValue() []byte {
+ if m != nil {
+ return m.StringValue
+ }
+ return nil
+}
+
+func (m *UninterpretedOption) GetAggregateValue() string {
+ if m != nil && m.AggregateValue != nil {
+ return *m.AggregateValue
+ }
+ return ""
+}
+
+// The name of the uninterpreted option. Each string represents a segment in
+// a dot-separated name. is_extension is true iff a segment represents an
+// extension (denoted with parentheses in options specs in .proto files).
+// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+// "foo.(bar.baz).qux".
+type UninterpretedOption_NamePart struct {
+ NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
+ IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} }
+func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
+func (*UninterpretedOption_NamePart) ProtoMessage() {}
+func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{16, 0}
+}
+
+func (m *UninterpretedOption_NamePart) GetNamePart() string {
+ if m != nil && m.NamePart != nil {
+ return *m.NamePart
+ }
+ return ""
+}
+
+func (m *UninterpretedOption_NamePart) GetIsExtension() bool {
+ if m != nil && m.IsExtension != nil {
+ return *m.IsExtension
+ }
+ return false
+}
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+type SourceCodeInfo struct {
+ // A Location identifies a piece of source code in a .proto file which
+ // corresponds to a particular definition. This information is intended
+ // to be useful to IDEs, code indexers, documentation generators, and similar
+ // tools.
+ //
+ // For example, say we have a file like:
+ // message Foo {
+ // optional string foo = 1;
+ // }
+ // Let's look at just the field definition:
+ // optional string foo = 1;
+ // ^ ^^ ^^ ^ ^^^
+ // a bc de f ghi
+ // We have the following locations:
+ // span path represents
+ // [a,i) [ 4, 0, 2, 0 ] The whole field definition.
+ // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
+ // [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
+ // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
+ // [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
+ //
+ // Notes:
+ // - A location may refer to a repeated field itself (i.e. not to any
+ // particular index within it). This is used whenever a set of elements are
+ // logically enclosed in a single code segment. For example, an entire
+ // extend block (possibly containing multiple extension definitions) will
+ // have an outer location whose path refers to the "extensions" repeated
+ // field without an index.
+ // - Multiple locations may have the same path. This happens when a single
+ // logical declaration is spread out across multiple places. The most
+ // obvious example is the "extend" block again -- there may be multiple
+ // extend blocks in the same scope, each of which will have the same path.
+ // - A location's span is not always a subset of its parent's span. For
+ // example, the "extendee" of an extension declaration appears at the
+ // beginning of the "extend" block and is shared by all extensions within
+ // the block.
+ // - Just because a location's span is a subset of some other location's span
+ // does not mean that it is a descendent. For example, a "group" defines
+ // both a type and a field in a single declaration. Thus, the locations
+ // corresponding to the type and field and their components will overlap.
+ // - Code which tries to interpret locations should probably be designed to
+ // ignore those that it doesn't understand, as more types of locations could
+ // be recorded in the future.
+ Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} }
+func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) }
+func (*SourceCodeInfo) ProtoMessage() {}
+func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{17} }
+
+func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
+ if m != nil {
+ return m.Location
+ }
+ return nil
+}
+
+type SourceCodeInfo_Location struct {
+ // Identifies which part of the FileDescriptorProto was defined at this
+ // location.
+ //
+ // Each element is a field number or an index. They form a path from
+ // the root FileDescriptorProto to the place where the definition. For
+ // example, this path:
+ // [ 4, 3, 2, 7, 1 ]
+ // refers to:
+ // file.message_type(3) // 4, 3
+ // .field(7) // 2, 7
+ // .name() // 1
+ // This is because FileDescriptorProto.message_type has field number 4:
+ // repeated DescriptorProto message_type = 4;
+ // and DescriptorProto.field has field number 2:
+ // repeated FieldDescriptorProto field = 2;
+ // and FieldDescriptorProto.name has field number 1:
+ // optional string name = 1;
+ //
+ // Thus, the above path gives the location of a field name. If we removed
+ // the last element:
+ // [ 4, 3, 2, 7 ]
+ // this path refers to the whole field declaration (from the beginning
+ // of the label to the terminating semicolon).
+ Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+ // Always has exactly three or four elements: start line, start column,
+ // end line (optional, otherwise assumed same as start line), end column.
+ // These are packed into a single field for efficiency. Note that line
+ // and column numbers are zero-based -- typically you will want to add
+ // 1 to each before displaying to a user.
+ Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"`
+ // If this SourceCodeInfo represents a complete declaration, these are any
+ // comments appearing before and after the declaration which appear to be
+ // attached to the declaration.
+ //
+ // A series of line comments appearing on consecutive lines, with no other
+ // tokens appearing on those lines, will be treated as a single comment.
+ //
+ // leading_detached_comments will keep paragraphs of comments that appear
+ // before (but not connected to) the current element. Each paragraph,
+ // separated by empty lines, will be one comment element in the repeated
+ // field.
+ //
+ // Only the comment content is provided; comment markers (e.g. //) are
+ // stripped out. For block comments, leading whitespace and an asterisk
+ // will be stripped from the beginning of each line other than the first.
+ // Newlines are included in the output.
+ //
+ // Examples:
+ //
+ // optional int32 foo = 1; // Comment attached to foo.
+ // // Comment attached to bar.
+ // optional int32 bar = 2;
+ //
+ // optional string baz = 3;
+ // // Comment attached to baz.
+ // // Another line attached to baz.
+ //
+ // // Comment attached to qux.
+ // //
+ // // Another line attached to qux.
+ // optional double qux = 4;
+ //
+ // // Detached comment for corge. This is not leading or trailing comments
+ // // to qux or corge because there are blank lines separating it from
+ // // both.
+ //
+ // // Detached comment for corge paragraph 2.
+ //
+ // optional string corge = 5;
+ // /* Block comment attached
+ // * to corge. Leading asterisks
+ // * will be removed. */
+ // /* Block comment attached to
+ // * grault. */
+ // optional int32 grault = 6;
+ //
+ // // ignored detached comments.
+ LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
+ TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
+ LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} }
+func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) }
+func (*SourceCodeInfo_Location) ProtoMessage() {}
+func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) {
+ return fileDescriptorDescriptor, []int{17, 0}
+}
+
+func (m *SourceCodeInfo_Location) GetPath() []int32 {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+func (m *SourceCodeInfo_Location) GetSpan() []int32 {
+ if m != nil {
+ return m.Span
+ }
+ return nil
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingComments() string {
+ if m != nil && m.LeadingComments != nil {
+ return *m.LeadingComments
+ }
+ return ""
+}
+
+func (m *SourceCodeInfo_Location) GetTrailingComments() string {
+ if m != nil && m.TrailingComments != nil {
+ return *m.TrailingComments
+ }
+ return ""
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
+ if m != nil {
+ return m.LeadingDetachedComments
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet")
+ proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto")
+ proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto")
+ proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange")
+ proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange")
+ proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto")
+ proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto")
+ proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto")
+ proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto")
+ proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto")
+ proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto")
+ proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions")
+ proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions")
+ proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions")
+ proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions")
+ proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions")
+ proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions")
+ proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions")
+ proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption")
+ proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart")
+ proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo")
+ proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location")
+ proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value)
+ proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value)
+ proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
+ proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
+ proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
+}
+
+func init() { proto.RegisterFile("descriptor.proto", fileDescriptorDescriptor) }
+
+var fileDescriptorDescriptor = []byte{
+ // 2211 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x73, 0xdb, 0xc6,
+ 0x15, 0x0f, 0xf8, 0x4f, 0xe4, 0x23, 0x45, 0xad, 0x56, 0x8a, 0x03, 0xcb, 0x76, 0x2c, 0x33, 0x76,
+ 0x2c, 0xdb, 0xad, 0x9c, 0x91, 0xff, 0x44, 0x51, 0x3a, 0xe9, 0x50, 0x24, 0xac, 0xd0, 0x43, 0x89,
+ 0x2c, 0x28, 0xb6, 0x4e, 0x2e, 0x98, 0x15, 0xb0, 0xa4, 0x60, 0x83, 0x0b, 0x14, 0x00, 0x6d, 0x2b,
+ 0x27, 0xcf, 0xf4, 0xd4, 0x6f, 0xd0, 0x69, 0x3b, 0x3d, 0xe4, 0x92, 0x99, 0x7e, 0x80, 0x1e, 0x7a,
+ 0xef, 0xb5, 0x87, 0x9e, 0x7b, 0xec, 0x4c, 0xfb, 0x0d, 0x7a, 0xed, 0xec, 0x2e, 0x00, 0x82, 0x7f,
+ 0x14, 0xab, 0x99, 0x49, 0xd3, 0x93, 0xb4, 0xbf, 0xf7, 0x7b, 0x8f, 0x6f, 0xdf, 0xfe, 0xb0, 0xef,
+ 0x01, 0x80, 0x2c, 0x1a, 0x98, 0xbe, 0xed, 0x85, 0xae, 0xbf, 0xed, 0xf9, 0x6e, 0xe8, 0xe2, 0x95,
+ 0xa1, 0xeb, 0x0e, 0x1d, 0x2a, 0x57, 0x27, 0xe3, 0x41, 0xed, 0x10, 0x56, 0x9f, 0xd8, 0x0e, 0x6d,
+ 0x26, 0xc4, 0x1e, 0x0d, 0xf1, 0x2e, 0xe4, 0x06, 0xb6, 0x43, 0x55, 0x65, 0x33, 0xbb, 0x55, 0xde,
+ 0xb9, 0xb9, 0x3d, 0xe3, 0xb4, 0x3d, 0xed, 0xd1, 0xe5, 0xb0, 0x2e, 0x3c, 0x6a, 0xff, 0xc8, 0xc1,
+ 0xda, 0x02, 0x2b, 0xc6, 0x90, 0x63, 0x64, 0xc4, 0x23, 0x2a, 0x5b, 0x25, 0x5d, 0xfc, 0x8f, 0x55,
+ 0x58, 0xf2, 0x88, 0xf9, 0x82, 0x0c, 0xa9, 0x9a, 0x11, 0x70, 0xbc, 0xc4, 0xef, 0x03, 0x58, 0xd4,
+ 0xa3, 0xcc, 0xa2, 0xcc, 0x3c, 0x53, 0xb3, 0x9b, 0xd9, 0xad, 0x92, 0x9e, 0x42, 0xf0, 0x3d, 0x58,
+ 0xf5, 0xc6, 0x27, 0x8e, 0x6d, 0x1a, 0x29, 0x1a, 0x6c, 0x66, 0xb7, 0xf2, 0x3a, 0x92, 0x86, 0xe6,
+ 0x84, 0x7c, 0x1b, 0x56, 0x5e, 0x51, 0xf2, 0x22, 0x4d, 0x2d, 0x0b, 0x6a, 0x95, 0xc3, 0x29, 0x62,
+ 0x03, 0x2a, 0x23, 0x1a, 0x04, 0x64, 0x48, 0x8d, 0xf0, 0xcc, 0xa3, 0x6a, 0x4e, 0xec, 0x7e, 0x73,
+ 0x6e, 0xf7, 0xb3, 0x3b, 0x2f, 0x47, 0x5e, 0xc7, 0x67, 0x1e, 0xc5, 0x75, 0x28, 0x51, 0x36, 0x1e,
+ 0xc9, 0x08, 0xf9, 0x73, 0xea, 0xa7, 0xb1, 0xf1, 0x68, 0x36, 0x4a, 0x91, 0xbb, 0x45, 0x21, 0x96,
+ 0x02, 0xea, 0xbf, 0xb4, 0x4d, 0xaa, 0x16, 0x44, 0x80, 0xdb, 0x73, 0x01, 0x7a, 0xd2, 0x3e, 0x1b,
+ 0x23, 0xf6, 0xc3, 0x0d, 0x28, 0xd1, 0xd7, 0x21, 0x65, 0x81, 0xed, 0x32, 0x75, 0x49, 0x04, 0xb9,
+ 0xb5, 0xe0, 0x14, 0xa9, 0x63, 0xcd, 0x86, 0x98, 0xf8, 0xe1, 0xc7, 0xb0, 0xe4, 0x7a, 0xa1, 0xed,
+ 0xb2, 0x40, 0x2d, 0x6e, 0x2a, 0x5b, 0xe5, 0x9d, 0xab, 0x0b, 0x85, 0xd0, 0x91, 0x1c, 0x3d, 0x26,
+ 0xe3, 0x16, 0xa0, 0xc0, 0x1d, 0xfb, 0x26, 0x35, 0x4c, 0xd7, 0xa2, 0x86, 0xcd, 0x06, 0xae, 0x5a,
+ 0x12, 0x01, 0xae, 0xcf, 0x6f, 0x44, 0x10, 0x1b, 0xae, 0x45, 0x5b, 0x6c, 0xe0, 0xea, 0xd5, 0x60,
+ 0x6a, 0x8d, 0x2f, 0x41, 0x21, 0x38, 0x63, 0x21, 0x79, 0xad, 0x56, 0x84, 0x42, 0xa2, 0x55, 0xed,
+ 0xdf, 0x79, 0x58, 0xb9, 0x88, 0xc4, 0x3e, 0x85, 0xfc, 0x80, 0xef, 0x52, 0xcd, 0xfc, 0x37, 0x35,
+ 0x90, 0x3e, 0xd3, 0x45, 0x2c, 0x7c, 0xc7, 0x22, 0xd6, 0xa1, 0xcc, 0x68, 0x10, 0x52, 0x4b, 0x2a,
+ 0x22, 0x7b, 0x41, 0x4d, 0x81, 0x74, 0x9a, 0x97, 0x54, 0xee, 0x3b, 0x49, 0xea, 0x19, 0xac, 0x24,
+ 0x29, 0x19, 0x3e, 0x61, 0xc3, 0x58, 0x9b, 0xf7, 0xdf, 0x96, 0xc9, 0xb6, 0x16, 0xfb, 0xe9, 0xdc,
+ 0x4d, 0xaf, 0xd2, 0xa9, 0x35, 0x6e, 0x02, 0xb8, 0x8c, 0xba, 0x03, 0xc3, 0xa2, 0xa6, 0xa3, 0x16,
+ 0xcf, 0xa9, 0x52, 0x87, 0x53, 0xe6, 0xaa, 0xe4, 0x4a, 0xd4, 0x74, 0xf0, 0x27, 0x13, 0xa9, 0x2d,
+ 0x9d, 0xa3, 0x94, 0x43, 0xf9, 0x90, 0xcd, 0xa9, 0xad, 0x0f, 0x55, 0x9f, 0x72, 0xdd, 0x53, 0x2b,
+ 0xda, 0x59, 0x49, 0x24, 0xb1, 0xfd, 0xd6, 0x9d, 0xe9, 0x91, 0x9b, 0xdc, 0xd8, 0xb2, 0x9f, 0x5e,
+ 0xe2, 0x0f, 0x20, 0x01, 0x0c, 0x21, 0x2b, 0x10, 0xb7, 0x50, 0x25, 0x06, 0x8f, 0xc8, 0x88, 0x6e,
+ 0xec, 0x42, 0x75, 0xba, 0x3c, 0x78, 0x1d, 0xf2, 0x41, 0x48, 0xfc, 0x50, 0xa8, 0x30, 0xaf, 0xcb,
+ 0x05, 0x46, 0x90, 0xa5, 0xcc, 0x12, 0xb7, 0x5c, 0x5e, 0xe7, 0xff, 0x6e, 0x7c, 0x0c, 0xcb, 0x53,
+ 0x3f, 0x7f, 0x51, 0xc7, 0xda, 0x6f, 0x0a, 0xb0, 0xbe, 0x48, 0x73, 0x0b, 0xe5, 0x7f, 0x09, 0x0a,
+ 0x6c, 0x3c, 0x3a, 0xa1, 0xbe, 0x9a, 0x15, 0x11, 0xa2, 0x15, 0xae, 0x43, 0xde, 0x21, 0x27, 0xd4,
+ 0x51, 0x73, 0x9b, 0xca, 0x56, 0x75, 0xe7, 0xde, 0x85, 0x54, 0xbd, 0xdd, 0xe6, 0x2e, 0xba, 0xf4,
+ 0xc4, 0x9f, 0x41, 0x2e, 0xba, 0xe2, 0x78, 0x84, 0xbb, 0x17, 0x8b, 0xc0, 0xb5, 0xa8, 0x0b, 0x3f,
+ 0x7c, 0x05, 0x4a, 0xfc, 0xaf, 0xac, 0x6d, 0x41, 0xe4, 0x5c, 0xe4, 0x00, 0xaf, 0x2b, 0xde, 0x80,
+ 0xa2, 0x90, 0x99, 0x45, 0xe3, 0xd6, 0x90, 0xac, 0xf9, 0xc1, 0x58, 0x74, 0x40, 0xc6, 0x4e, 0x68,
+ 0xbc, 0x24, 0xce, 0x98, 0x0a, 0xc1, 0x94, 0xf4, 0x4a, 0x04, 0xfe, 0x9c, 0x63, 0xf8, 0x3a, 0x94,
+ 0xa5, 0x2a, 0x6d, 0x66, 0xd1, 0xd7, 0xe2, 0xf6, 0xc9, 0xeb, 0x52, 0xa8, 0x2d, 0x8e, 0xf0, 0x9f,
+ 0x7f, 0x1e, 0xb8, 0x2c, 0x3e, 0x5a, 0xf1, 0x13, 0x1c, 0x10, 0x3f, 0xff, 0xf1, 0xec, 0xc5, 0x77,
+ 0x6d, 0xf1, 0xf6, 0x66, 0xb5, 0x58, 0xfb, 0x53, 0x06, 0x72, 0xe2, 0x79, 0x5b, 0x81, 0xf2, 0xf1,
+ 0x17, 0x5d, 0xcd, 0x68, 0x76, 0xfa, 0xfb, 0x6d, 0x0d, 0x29, 0xb8, 0x0a, 0x20, 0x80, 0x27, 0xed,
+ 0x4e, 0xfd, 0x18, 0x65, 0x92, 0x75, 0xeb, 0xe8, 0xf8, 0xf1, 0x43, 0x94, 0x4d, 0x1c, 0xfa, 0x12,
+ 0xc8, 0xa5, 0x09, 0x0f, 0x76, 0x50, 0x1e, 0x23, 0xa8, 0xc8, 0x00, 0xad, 0x67, 0x5a, 0xf3, 0xf1,
+ 0x43, 0x54, 0x98, 0x46, 0x1e, 0xec, 0xa0, 0x25, 0xbc, 0x0c, 0x25, 0x81, 0xec, 0x77, 0x3a, 0x6d,
+ 0x54, 0x4c, 0x62, 0xf6, 0x8e, 0xf5, 0xd6, 0xd1, 0x01, 0x2a, 0x25, 0x31, 0x0f, 0xf4, 0x4e, 0xbf,
+ 0x8b, 0x20, 0x89, 0x70, 0xa8, 0xf5, 0x7a, 0xf5, 0x03, 0x0d, 0x95, 0x13, 0xc6, 0xfe, 0x17, 0xc7,
+ 0x5a, 0x0f, 0x55, 0xa6, 0xd2, 0x7a, 0xb0, 0x83, 0x96, 0x93, 0x9f, 0xd0, 0x8e, 0xfa, 0x87, 0xa8,
+ 0x8a, 0x57, 0x61, 0x59, 0xfe, 0x44, 0x9c, 0xc4, 0xca, 0x0c, 0xf4, 0xf8, 0x21, 0x42, 0x93, 0x44,
+ 0x64, 0x94, 0xd5, 0x29, 0xe0, 0xf1, 0x43, 0x84, 0x6b, 0x0d, 0xc8, 0x0b, 0x75, 0x61, 0x0c, 0xd5,
+ 0x76, 0x7d, 0x5f, 0x6b, 0x1b, 0x9d, 0xee, 0x71, 0xab, 0x73, 0x54, 0x6f, 0x23, 0x65, 0x82, 0xe9,
+ 0xda, 0xcf, 0xfa, 0x2d, 0x5d, 0x6b, 0xa2, 0x4c, 0x1a, 0xeb, 0x6a, 0xf5, 0x63, 0xad, 0x89, 0xb2,
+ 0xb5, 0xbb, 0xb0, 0xbe, 0xe8, 0x9e, 0x59, 0xf4, 0x64, 0xd4, 0xbe, 0x56, 0x60, 0x6d, 0xc1, 0x95,
+ 0xb9, 0xf0, 0x29, 0xfa, 0x29, 0xe4, 0xa5, 0xd2, 0x64, 0x13, 0xb9, 0xb3, 0xf0, 0xee, 0x15, 0xba,
+ 0x9b, 0x6b, 0x24, 0xc2, 0x2f, 0xdd, 0x48, 0xb3, 0xe7, 0x34, 0x52, 0x1e, 0x62, 0x4e, 0x4e, 0xbf,
+ 0x52, 0x40, 0x3d, 0x2f, 0xf6, 0x5b, 0x9e, 0xf7, 0xcc, 0xd4, 0xf3, 0xfe, 0xe9, 0x6c, 0x02, 0x37,
+ 0xce, 0xdf, 0xc3, 0x5c, 0x16, 0xdf, 0x28, 0x70, 0x69, 0xf1, 0xbc, 0xb1, 0x30, 0x87, 0xcf, 0xa0,
+ 0x30, 0xa2, 0xe1, 0xa9, 0x1b, 0xf7, 0xdc, 0x0f, 0x17, 0xdc, 0xe4, 0xdc, 0x3c, 0x5b, 0xab, 0xc8,
+ 0x2b, 0xdd, 0x0a, 0xb2, 0xe7, 0x0d, 0x0d, 0x32, 0x9b, 0xb9, 0x4c, 0x7f, 0x9d, 0x81, 0x77, 0x17,
+ 0x06, 0x5f, 0x98, 0xe8, 0x35, 0x00, 0x9b, 0x79, 0xe3, 0x50, 0xf6, 0x55, 0x79, 0xcd, 0x94, 0x04,
+ 0x22, 0x1e, 0x61, 0x7e, 0x85, 0x8c, 0xc3, 0xc4, 0x9e, 0x15, 0x76, 0x90, 0x90, 0x20, 0xec, 0x4e,
+ 0x12, 0xcd, 0x89, 0x44, 0xdf, 0x3f, 0x67, 0xa7, 0x73, 0x2d, 0xeb, 0x23, 0x40, 0xa6, 0x63, 0x53,
+ 0x16, 0x1a, 0x41, 0xe8, 0x53, 0x32, 0xb2, 0xd9, 0x50, 0xdc, 0xa3, 0xc5, 0xbd, 0xfc, 0x80, 0x38,
+ 0x01, 0xd5, 0x57, 0xa4, 0xb9, 0x17, 0x5b, 0xb9, 0x87, 0x68, 0x16, 0x7e, 0xca, 0xa3, 0x30, 0xe5,
+ 0x21, 0xcd, 0x89, 0x47, 0xed, 0x6f, 0x4b, 0x50, 0x4e, 0x4d, 0x67, 0xf8, 0x06, 0x54, 0x9e, 0x93,
+ 0x97, 0xc4, 0x88, 0x27, 0x6e, 0x59, 0x89, 0x32, 0xc7, 0xba, 0xd1, 0xd4, 0xfd, 0x11, 0xac, 0x0b,
+ 0x8a, 0x3b, 0x0e, 0xa9, 0x6f, 0x98, 0x0e, 0x09, 0x02, 0x51, 0xb4, 0xa2, 0xa0, 0x62, 0x6e, 0xeb,
+ 0x70, 0x53, 0x23, 0xb6, 0xe0, 0x47, 0xb0, 0x26, 0x3c, 0x46, 0x63, 0x27, 0xb4, 0x3d, 0x87, 0x1a,
+ 0xfc, 0x1d, 0x20, 0x10, 0xf7, 0x69, 0x92, 0xd9, 0x2a, 0x67, 0x1c, 0x46, 0x04, 0x9e, 0x51, 0x80,
+ 0x0f, 0xe0, 0x9a, 0x70, 0x1b, 0x52, 0x46, 0x7d, 0x12, 0x52, 0x83, 0xfe, 0x72, 0x4c, 0x9c, 0xc0,
+ 0x20, 0xcc, 0x32, 0x4e, 0x49, 0x70, 0xaa, 0xae, 0xa7, 0x03, 0x5c, 0xe6, 0xdc, 0x83, 0x88, 0xaa,
+ 0x09, 0x66, 0x9d, 0x59, 0x9f, 0x93, 0xe0, 0x14, 0xef, 0xc1, 0x25, 0x11, 0x28, 0x08, 0x7d, 0x9b,
+ 0x0d, 0x0d, 0xf3, 0x94, 0x9a, 0x2f, 0x8c, 0x71, 0x38, 0xd8, 0x55, 0xaf, 0xa4, 0x23, 0x88, 0x24,
+ 0x7b, 0x82, 0xd3, 0xe0, 0x94, 0x7e, 0x38, 0xd8, 0xc5, 0x3d, 0xa8, 0xf0, 0xf3, 0x18, 0xd9, 0x5f,
+ 0x51, 0x63, 0xe0, 0xfa, 0xa2, 0x47, 0x54, 0x17, 0x3c, 0xdc, 0xa9, 0x22, 0x6e, 0x77, 0x22, 0x87,
+ 0x43, 0xd7, 0xa2, 0x7b, 0xf9, 0x5e, 0x57, 0xd3, 0x9a, 0x7a, 0x39, 0x8e, 0xf2, 0xc4, 0xf5, 0xb9,
+ 0xa6, 0x86, 0x6e, 0x52, 0xe3, 0xb2, 0xd4, 0xd4, 0xd0, 0x8d, 0x2b, 0xfc, 0x08, 0xd6, 0x4c, 0x53,
+ 0x6e, 0xdb, 0x36, 0x8d, 0x68, 0x58, 0x0f, 0x54, 0x34, 0x55, 0x2f, 0xd3, 0x3c, 0x90, 0x84, 0x48,
+ 0xe6, 0x01, 0xfe, 0x04, 0xde, 0x9d, 0xd4, 0x2b, 0xed, 0xb8, 0x3a, 0xb7, 0xcb, 0x59, 0xd7, 0x47,
+ 0xb0, 0xe6, 0x9d, 0xcd, 0x3b, 0xe2, 0xa9, 0x5f, 0xf4, 0xce, 0x66, 0xdd, 0x6e, 0x89, 0x17, 0x30,
+ 0x9f, 0x9a, 0x24, 0xa4, 0x96, 0xfa, 0x5e, 0x9a, 0x9d, 0x32, 0xe0, 0xfb, 0x80, 0x4c, 0xd3, 0xa0,
+ 0x8c, 0x9c, 0x38, 0xd4, 0x20, 0x3e, 0x65, 0x24, 0x50, 0xaf, 0xa7, 0xc9, 0x55, 0xd3, 0xd4, 0x84,
+ 0xb5, 0x2e, 0x8c, 0xf8, 0x2e, 0xac, 0xba, 0x27, 0xcf, 0x4d, 0x29, 0x2e, 0xc3, 0xf3, 0xe9, 0xc0,
+ 0x7e, 0xad, 0xde, 0x14, 0x65, 0x5a, 0xe1, 0x06, 0x21, 0xad, 0xae, 0x80, 0xf1, 0x1d, 0x40, 0x66,
+ 0x70, 0x4a, 0x7c, 0x4f, 0x34, 0xe9, 0xc0, 0x23, 0x26, 0x55, 0x6f, 0x49, 0xaa, 0xc4, 0x8f, 0x62,
+ 0x18, 0x6b, 0x70, 0x9d, 0x6f, 0x9e, 0x11, 0xe6, 0x1a, 0xe3, 0x80, 0x1a, 0x93, 0x14, 0x93, 0xb3,
+ 0xf8, 0x90, 0xa7, 0xa5, 0x5f, 0x8d, 0x69, 0xfd, 0x80, 0x36, 0x13, 0x52, 0x7c, 0x3c, 0xcf, 0x60,
+ 0x7d, 0xcc, 0x6c, 0x16, 0x52, 0xdf, 0xf3, 0x29, 0x77, 0x96, 0x0f, 0xac, 0xfa, 0xcf, 0xa5, 0x73,
+ 0x86, 0xee, 0x7e, 0x9a, 0x2d, 0x45, 0xa2, 0xaf, 0x8d, 0xe7, 0xc1, 0xda, 0x1e, 0x54, 0xd2, 0xda,
+ 0xc1, 0x25, 0x90, 0xea, 0x41, 0x0a, 0xef, 0xa8, 0x8d, 0x4e, 0x93, 0xf7, 0xc2, 0x2f, 0x35, 0x94,
+ 0xe1, 0x3d, 0xb9, 0xdd, 0x3a, 0xd6, 0x0c, 0xbd, 0x7f, 0x74, 0xdc, 0x3a, 0xd4, 0x50, 0xf6, 0x6e,
+ 0xa9, 0xf8, 0xaf, 0x25, 0xf4, 0xe6, 0xcd, 0x9b, 0x37, 0x99, 0xda, 0x5f, 0x32, 0x50, 0x9d, 0x9e,
+ 0x83, 0xf1, 0x4f, 0xe0, 0xbd, 0xf8, 0xa5, 0x35, 0xa0, 0xa1, 0xf1, 0xca, 0xf6, 0x85, 0x9c, 0x47,
+ 0x44, 0x4e, 0x92, 0xc9, 0x49, 0xac, 0x47, 0xac, 0x1e, 0x0d, 0x7f, 0x61, 0xfb, 0x5c, 0xac, 0x23,
+ 0x12, 0xe2, 0x36, 0x5c, 0x67, 0xae, 0x11, 0x84, 0x84, 0x59, 0xc4, 0xb7, 0x8c, 0xc9, 0xe7, 0x02,
+ 0x83, 0x98, 0x26, 0x0d, 0x02, 0x57, 0x76, 0x92, 0x24, 0xca, 0x55, 0xe6, 0xf6, 0x22, 0xf2, 0xe4,
+ 0x8a, 0xad, 0x47, 0xd4, 0x19, 0xd5, 0x64, 0xcf, 0x53, 0xcd, 0x15, 0x28, 0x8d, 0x88, 0x67, 0x50,
+ 0x16, 0xfa, 0x67, 0x62, 0x7a, 0x2b, 0xea, 0xc5, 0x11, 0xf1, 0x34, 0xbe, 0xfe, 0xfe, 0xce, 0x20,
+ 0x5d, 0xc7, 0xbf, 0x67, 0xa1, 0x92, 0x9e, 0xe0, 0xf8, 0x40, 0x6c, 0x8a, 0x6b, 0x5e, 0x11, 0xb7,
+ 0xc0, 0x07, 0xdf, 0x3a, 0xef, 0x6d, 0x37, 0xf8, 0xfd, 0xbf, 0x57, 0x90, 0x73, 0x95, 0x2e, 0x3d,
+ 0x79, 0xef, 0xe5, 0x5a, 0xa3, 0x72, 0x5a, 0x2f, 0xea, 0xd1, 0x0a, 0x1f, 0x40, 0xe1, 0x79, 0x20,
+ 0x62, 0x17, 0x44, 0xec, 0x9b, 0xdf, 0x1e, 0xfb, 0x69, 0x4f, 0x04, 0x2f, 0x3d, 0xed, 0x19, 0x47,
+ 0x1d, 0xfd, 0xb0, 0xde, 0xd6, 0x23, 0x77, 0x7c, 0x19, 0x72, 0x0e, 0xf9, 0xea, 0x6c, 0xba, 0x53,
+ 0x08, 0xe8, 0xa2, 0x85, 0xbf, 0x0c, 0xb9, 0x57, 0x94, 0xbc, 0x98, 0xbe, 0x9f, 0x05, 0xf4, 0x3d,
+ 0x4a, 0xff, 0x3e, 0xe4, 0x45, 0xbd, 0x30, 0x40, 0x54, 0x31, 0xf4, 0x0e, 0x2e, 0x42, 0xae, 0xd1,
+ 0xd1, 0xb9, 0xfc, 0x11, 0x54, 0x24, 0x6a, 0x74, 0x5b, 0x5a, 0x43, 0x43, 0x99, 0xda, 0x23, 0x28,
+ 0xc8, 0x22, 0xf0, 0x47, 0x23, 0x29, 0x03, 0x7a, 0x27, 0x5a, 0x46, 0x31, 0x94, 0xd8, 0xda, 0x3f,
+ 0xdc, 0xd7, 0x74, 0x94, 0x49, 0x1f, 0xef, 0x9f, 0x15, 0x28, 0xa7, 0x06, 0x2a, 0xde, 0xca, 0x89,
+ 0xe3, 0xb8, 0xaf, 0x0c, 0xe2, 0xd8, 0x24, 0x88, 0xce, 0x07, 0x04, 0x54, 0xe7, 0xc8, 0x45, 0xeb,
+ 0xf7, 0x3f, 0xd1, 0xe6, 0x1f, 0x14, 0x40, 0xb3, 0xc3, 0xd8, 0x4c, 0x82, 0xca, 0x0f, 0x9a, 0xe0,
+ 0xef, 0x15, 0xa8, 0x4e, 0x4f, 0x60, 0x33, 0xe9, 0xdd, 0xf8, 0x41, 0xd3, 0xfb, 0x9d, 0x02, 0xcb,
+ 0x53, 0x73, 0xd7, 0xff, 0x55, 0x76, 0xbf, 0xcd, 0xc2, 0xda, 0x02, 0x3f, 0x5c, 0x8f, 0x06, 0x54,
+ 0x39, 0x33, 0xff, 0xf8, 0x22, 0xbf, 0xb5, 0xcd, 0xfb, 0x5f, 0x97, 0xf8, 0x61, 0x34, 0xcf, 0xde,
+ 0x01, 0x64, 0x5b, 0x94, 0x85, 0xf6, 0xc0, 0xa6, 0x7e, 0xf4, 0x6e, 0x2c, 0xa7, 0xd6, 0x95, 0x09,
+ 0x2e, 0x5f, 0x8f, 0x7f, 0x04, 0xd8, 0x73, 0x03, 0x3b, 0xb4, 0x5f, 0x52, 0xc3, 0x66, 0xf1, 0x8b,
+ 0x34, 0x9f, 0x62, 0x73, 0x3a, 0x8a, 0x2d, 0x2d, 0x16, 0x26, 0x6c, 0x46, 0x87, 0x64, 0x86, 0xcd,
+ 0xaf, 0xa1, 0xac, 0x8e, 0x62, 0x4b, 0xc2, 0xbe, 0x01, 0x15, 0xcb, 0x1d, 0xf3, 0x81, 0x40, 0xf2,
+ 0xf8, 0xad, 0xa7, 0xe8, 0x65, 0x89, 0x25, 0x94, 0x68, 0x62, 0x9b, 0xbc, 0xc1, 0x57, 0xf4, 0xb2,
+ 0xc4, 0x24, 0xe5, 0x36, 0xac, 0x90, 0xe1, 0xd0, 0xe7, 0xc1, 0xe3, 0x40, 0x72, 0x0c, 0xad, 0x26,
+ 0xb0, 0x20, 0x6e, 0x3c, 0x85, 0x62, 0x5c, 0x07, 0xde, 0x58, 0x78, 0x25, 0x0c, 0x4f, 0x7e, 0x47,
+ 0xc9, 0xf0, 0x97, 0x7a, 0x16, 0x1b, 0x6f, 0x40, 0xc5, 0x0e, 0x8c, 0xc9, 0x07, 0xbd, 0xcc, 0x66,
+ 0x66, 0xab, 0xa8, 0x97, 0xed, 0x20, 0xf9, 0x82, 0x53, 0xfb, 0x26, 0x03, 0xd5, 0xe9, 0x0f, 0x92,
+ 0xb8, 0x09, 0x45, 0xc7, 0x35, 0x89, 0x10, 0x82, 0xfc, 0x1a, 0xbe, 0xf5, 0x96, 0x6f, 0x98, 0xdb,
+ 0xed, 0x88, 0xaf, 0x27, 0x9e, 0x1b, 0x7f, 0x55, 0xa0, 0x18, 0xc3, 0xf8, 0x12, 0xe4, 0x3c, 0x12,
+ 0x9e, 0x8a, 0x70, 0xf9, 0xfd, 0x0c, 0x52, 0x74, 0xb1, 0xe6, 0x78, 0xe0, 0x11, 0x26, 0x24, 0x10,
+ 0xe1, 0x7c, 0xcd, 0xcf, 0xd5, 0xa1, 0xc4, 0x12, 0x03, 0xae, 0x3b, 0x1a, 0x51, 0x16, 0x06, 0xf1,
+ 0xb9, 0x46, 0x78, 0x23, 0x82, 0xf1, 0x3d, 0x58, 0x0d, 0x7d, 0x62, 0x3b, 0x53, 0xdc, 0x9c, 0xe0,
+ 0xa2, 0xd8, 0x90, 0x90, 0xf7, 0xe0, 0x72, 0x1c, 0xd7, 0xa2, 0x21, 0x31, 0x4f, 0xa9, 0x35, 0x71,
+ 0x2a, 0x88, 0xaf, 0x5d, 0xef, 0x45, 0x84, 0x66, 0x64, 0x8f, 0x7d, 0xf7, 0x9f, 0xc1, 0x9a, 0xe9,
+ 0x8e, 0x66, 0x2b, 0xb1, 0x8f, 0x66, 0xde, 0xbb, 0x82, 0xcf, 0x95, 0x2f, 0x61, 0x32, 0x54, 0x7c,
+ 0x9d, 0xc9, 0x1e, 0x74, 0xf7, 0xff, 0x98, 0xd9, 0x38, 0x90, 0x7e, 0xdd, 0xb8, 0x82, 0x3a, 0x1d,
+ 0x38, 0xd4, 0xe4, 0xd5, 0xf9, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x78, 0x42, 0x69, 0x71, 0xb3,
+ 0x18, 0x00, 0x00,
+}
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
new file mode 100644
index 000000000..b175f555b
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
@@ -0,0 +1,658 @@
+// Code generated by protoc-gen-gogo.
+// source: descriptor.proto
+// DO NOT EDIT!
+
+/*
+Package descriptor is a generated protocol buffer package.
+
+It is generated from these files:
+ descriptor.proto
+
+It has these top-level messages:
+ FileDescriptorSet
+ FileDescriptorProto
+ DescriptorProto
+ FieldDescriptorProto
+ OneofDescriptorProto
+ EnumDescriptorProto
+ EnumValueDescriptorProto
+ ServiceDescriptorProto
+ MethodDescriptorProto
+ FileOptions
+ MessageOptions
+ FieldOptions
+ EnumOptions
+ EnumValueOptions
+ ServiceOptions
+ MethodOptions
+ UninterpretedOption
+ SourceCodeInfo
+*/
+package descriptor
+
+import fmt "fmt"
+import strings "strings"
+import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
+import sort "sort"
+import strconv "strconv"
+import reflect "reflect"
+import proto "github.com/gogo/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+func (this *FileDescriptorSet) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&descriptor.FileDescriptorSet{")
+ if this.File != nil {
+ s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *FileDescriptorProto) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 16)
+ s = append(s, "&descriptor.FileDescriptorProto{")
+ if this.Name != nil {
+ s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+ }
+ if this.Package != nil {
+ s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n")
+ }
+ if this.Dependency != nil {
+ s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n")
+ }
+ if this.PublicDependency != nil {
+ s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n")
+ }
+ if this.WeakDependency != nil {
+ s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n")
+ }
+ if this.MessageType != nil {
+ s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n")
+ }
+ if this.EnumType != nil {
+ s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
+ }
+ if this.Service != nil {
+ s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n")
+ }
+ if this.Extension != nil {
+ s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
+ }
+ if this.Options != nil {
+ s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+ }
+ if this.SourceCodeInfo != nil {
+ s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n")
+ }
+ if this.Syntax != nil {
+ s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *DescriptorProto) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 14)
+ s = append(s, "&descriptor.DescriptorProto{")
+ if this.Name != nil {
+ s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+ }
+ if this.Field != nil {
+ s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n")
+ }
+ if this.Extension != nil {
+ s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
+ }
+ if this.NestedType != nil {
+ s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n")
+ }
+ if this.EnumType != nil {
+ s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
+ }
+ if this.ExtensionRange != nil {
+ s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n")
+ }
+ if this.OneofDecl != nil {
+ s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n")
+ }
+ if this.Options != nil {
+ s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+ }
+ if this.ReservedRange != nil {
+ s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n")
+ }
+ if this.ReservedName != nil {
+ s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *DescriptorProto_ExtensionRange) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&descriptor.DescriptorProto_ExtensionRange{")
+ if this.Start != nil {
+ s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
+ }
+ if this.End != nil {
+ s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *DescriptorProto_ReservedRange) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&descriptor.DescriptorProto_ReservedRange{")
+ if this.Start != nil {
+ s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
+ }
+ if this.End != nil {
+ s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *FieldDescriptorProto) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 14)
+ s = append(s, "&descriptor.FieldDescriptorProto{")
+ if this.Name != nil {
+ s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+ }
+ if this.Number != nil {
+ s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
+ }
+ if this.Label != nil {
+ s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "descriptor.FieldDescriptorProto_Label")+",\n")
+ }
+ if this.Type != nil {
+ s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "descriptor.FieldDescriptorProto_Type")+",\n")
+ }
+ if this.TypeName != nil {
+ s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n")
+ }
+ if this.Extendee != nil {
+ s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n")
+ }
+ if this.DefaultValue != nil {
+ s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n")
+ }
+ if this.OneofIndex != nil {
+ s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n")
+ }
+ if this.JsonName != nil {
+ s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n")
+ }
+ if this.Options != nil {
+ s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *OneofDescriptorProto) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&descriptor.OneofDescriptorProto{")
+ if this.Name != nil {
+ s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *EnumDescriptorProto) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&descriptor.EnumDescriptorProto{")
+ if this.Name != nil {
+ s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+ }
+ if this.Value != nil {
+ s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
+ }
+ if this.Options != nil {
+ s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *EnumValueDescriptorProto) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&descriptor.EnumValueDescriptorProto{")
+ if this.Name != nil {
+ s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+ }
+ if this.Number != nil {
+ s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
+ }
+ if this.Options != nil {
+ s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ServiceDescriptorProto) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&descriptor.ServiceDescriptorProto{")
+ if this.Name != nil {
+ s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+ }
+ if this.Method != nil {
+ s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n")
+ }
+ if this.Options != nil {
+ s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *MethodDescriptorProto) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 10)
+ s = append(s, "&descriptor.MethodDescriptorProto{")
+ if this.Name != nil {
+ s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+ }
+ if this.InputType != nil {
+ s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n")
+ }
+ if this.OutputType != nil {
+ s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n")
+ }
+ if this.Options != nil {
+ s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+ }
+ if this.ClientStreaming != nil {
+ s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n")
+ }
+ if this.ServerStreaming != nil {
+ s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *FileOptions) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 20)
+ s = append(s, "&descriptor.FileOptions{")
+ if this.JavaPackage != nil {
+ s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n")
+ }
+ if this.JavaOuterClassname != nil {
+ s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n")
+ }
+ if this.JavaMultipleFiles != nil {
+ s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n")
+ }
+ if this.JavaGenerateEqualsAndHash != nil {
+ s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n")
+ }
+ if this.JavaStringCheckUtf8 != nil {
+ s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n")
+ }
+ if this.OptimizeFor != nil {
+ s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "descriptor.FileOptions_OptimizeMode")+",\n")
+ }
+ if this.GoPackage != nil {
+ s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n")
+ }
+ if this.CcGenericServices != nil {
+ s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n")
+ }
+ if this.JavaGenericServices != nil {
+ s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n")
+ }
+ if this.PyGenericServices != nil {
+ s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n")
+ }
+ if this.Deprecated != nil {
+ s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+ }
+ if this.CcEnableArenas != nil {
+ s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n")
+ }
+ if this.ObjcClassPrefix != nil {
+ s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n")
+ }
+ if this.CsharpNamespace != nil {
+ s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n")
+ }
+ if this.JavananoUseDeprecatedPackage != nil {
+ s = append(s, "JavananoUseDeprecatedPackage: "+valueToGoStringDescriptor(this.JavananoUseDeprecatedPackage, "bool")+",\n")
+ }
+ if this.UninterpretedOption != nil {
+ s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+ }
+ s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *MessageOptions) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 9)
+ s = append(s, "&descriptor.MessageOptions{")
+ if this.MessageSetWireFormat != nil {
+ s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n")
+ }
+ if this.NoStandardDescriptorAccessor != nil {
+ s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n")
+ }
+ if this.Deprecated != nil {
+ s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+ }
+ if this.MapEntry != nil {
+ s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n")
+ }
+ if this.UninterpretedOption != nil {
+ s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+ }
+ s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *FieldOptions) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 11)
+ s = append(s, "&descriptor.FieldOptions{")
+ if this.Ctype != nil {
+ s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "descriptor.FieldOptions_CType")+",\n")
+ }
+ if this.Packed != nil {
+ s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n")
+ }
+ if this.Jstype != nil {
+ s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "descriptor.FieldOptions_JSType")+",\n")
+ }
+ if this.Lazy != nil {
+ s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n")
+ }
+ if this.Deprecated != nil {
+ s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+ }
+ if this.Weak != nil {
+ s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n")
+ }
+ if this.UninterpretedOption != nil {
+ s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+ }
+ s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *EnumOptions) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&descriptor.EnumOptions{")
+ if this.AllowAlias != nil {
+ s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n")
+ }
+ if this.Deprecated != nil {
+ s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+ }
+ if this.UninterpretedOption != nil {
+ s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+ }
+ s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *EnumValueOptions) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&descriptor.EnumValueOptions{")
+ if this.Deprecated != nil {
+ s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+ }
+ if this.UninterpretedOption != nil {
+ s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+ }
+ s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ServiceOptions) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&descriptor.ServiceOptions{")
+ if this.Deprecated != nil {
+ s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+ }
+ if this.UninterpretedOption != nil {
+ s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+ }
+ s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *MethodOptions) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&descriptor.MethodOptions{")
+ if this.Deprecated != nil {
+ s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+ }
+ if this.UninterpretedOption != nil {
+ s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+ }
+ s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *UninterpretedOption) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 11)
+ s = append(s, "&descriptor.UninterpretedOption{")
+ if this.Name != nil {
+ s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
+ }
+ if this.IdentifierValue != nil {
+ s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n")
+ }
+ if this.PositiveIntValue != nil {
+ s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n")
+ }
+ if this.NegativeIntValue != nil {
+ s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n")
+ }
+ if this.DoubleValue != nil {
+ s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n")
+ }
+ if this.StringValue != nil {
+ s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n")
+ }
+ if this.AggregateValue != nil {
+ s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *UninterpretedOption_NamePart) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&descriptor.UninterpretedOption_NamePart{")
+ if this.NamePart != nil {
+ s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n")
+ }
+ if this.IsExtension != nil {
+ s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *SourceCodeInfo) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&descriptor.SourceCodeInfo{")
+ if this.Location != nil {
+ s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *SourceCodeInfo_Location) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 9)
+ s = append(s, "&descriptor.SourceCodeInfo_Location{")
+ if this.Path != nil {
+ s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
+ }
+ if this.Span != nil {
+ s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n")
+ }
+ if this.LeadingComments != nil {
+ s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n")
+ }
+ if this.TrailingComments != nil {
+ s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n")
+ }
+ if this.LeadingDetachedComments != nil {
+ s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n")
+ }
+ if this.XXX_unrecognized != nil {
+ s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringDescriptor(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string {
+ e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
+ if e == nil {
+ return "nil"
+ }
+ s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
+ keys := make([]int, 0, len(e))
+ for k := range e {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ ss := []string{}
+ for _, k := range keys {
+ ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
+ }
+ s += strings.Join(ss, ",") + "})"
+ return s
+}
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
new file mode 100644
index 000000000..861f4d028
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
@@ -0,0 +1,357 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package descriptor
+
+import (
+ "strings"
+)
+
+func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) {
+ if !msg.GetOptions().GetMapEntry() {
+ return nil, nil
+ }
+ return msg.GetField()[0], msg.GetField()[1]
+}
+
+func dotToUnderscore(r rune) rune {
+ if r == '.' {
+ return '_'
+ }
+ return r
+}
+
+func (field *FieldDescriptorProto) WireType() (wire int) {
+ switch *field.Type {
+ case FieldDescriptorProto_TYPE_DOUBLE:
+ return 1
+ case FieldDescriptorProto_TYPE_FLOAT:
+ return 5
+ case FieldDescriptorProto_TYPE_INT64:
+ return 0
+ case FieldDescriptorProto_TYPE_UINT64:
+ return 0
+ case FieldDescriptorProto_TYPE_INT32:
+ return 0
+ case FieldDescriptorProto_TYPE_UINT32:
+ return 0
+ case FieldDescriptorProto_TYPE_FIXED64:
+ return 1
+ case FieldDescriptorProto_TYPE_FIXED32:
+ return 5
+ case FieldDescriptorProto_TYPE_BOOL:
+ return 0
+ case FieldDescriptorProto_TYPE_STRING:
+ return 2
+ case FieldDescriptorProto_TYPE_GROUP:
+ return 2
+ case FieldDescriptorProto_TYPE_MESSAGE:
+ return 2
+ case FieldDescriptorProto_TYPE_BYTES:
+ return 2
+ case FieldDescriptorProto_TYPE_ENUM:
+ return 0
+ case FieldDescriptorProto_TYPE_SFIXED32:
+ return 5
+ case FieldDescriptorProto_TYPE_SFIXED64:
+ return 1
+ case FieldDescriptorProto_TYPE_SINT32:
+ return 0
+ case FieldDescriptorProto_TYPE_SINT64:
+ return 0
+ }
+ panic("unreachable")
+}
+
+func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) {
+ packed := field.IsPacked()
+ wireType := field.WireType()
+ fieldNumber := field.GetNumber()
+ if packed {
+ wireType = 2
+ }
+ x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
+ return x
+}
+
+func (field *FieldDescriptorProto) GetKey() []byte {
+ x := field.GetKeyUint64()
+ i := 0
+ keybuf := make([]byte, 0)
+ for i = 0; x > 127; i++ {
+ keybuf = append(keybuf, 0x80|uint8(x&0x7F))
+ x >>= 7
+ }
+ keybuf = append(keybuf, uint8(x))
+ return keybuf
+}
+
+func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto {
+ msg := desc.GetMessage(packageName, messageName)
+ if msg == nil {
+ return nil
+ }
+ for _, field := range msg.GetField() {
+ if field.GetName() == fieldName {
+ return field
+ }
+ }
+ return nil
+}
+
+func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto {
+ for _, msg := range file.GetMessageType() {
+ if msg.GetName() == typeName {
+ return msg
+ }
+ nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+"."))
+ if nes != nil {
+ return nes
+ }
+ }
+ return nil
+}
+
+func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto {
+ for _, nes := range msg.GetNestedType() {
+ if nes.GetName() == typeName {
+ return nes
+ }
+ res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+"."))
+ if res != nil {
+ return res
+ }
+ }
+ return nil
+}
+
+func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto {
+ for _, file := range desc.GetFile() {
+ if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
+ continue
+ }
+ for _, msg := range file.GetMessageType() {
+ if msg.GetName() == typeName {
+ return msg
+ }
+ }
+ for _, msg := range file.GetMessageType() {
+ for _, nes := range msg.GetNestedType() {
+ if nes.GetName() == typeName {
+ return nes
+ }
+ if msg.GetName()+"."+nes.GetName() == typeName {
+ return nes
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool {
+ for _, file := range desc.GetFile() {
+ if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
+ continue
+ }
+ for _, msg := range file.GetMessageType() {
+ if msg.GetName() == typeName {
+ return file.GetSyntax() == "proto3"
+ }
+ }
+ for _, msg := range file.GetMessageType() {
+ for _, nes := range msg.GetNestedType() {
+ if nes.GetName() == typeName {
+ return file.GetSyntax() == "proto3"
+ }
+ if msg.GetName()+"."+nes.GetName() == typeName {
+ return file.GetSyntax() == "proto3"
+ }
+ }
+ }
+ }
+ return false
+}
+
+func (msg *DescriptorProto) IsExtendable() bool {
+ return len(msg.GetExtensionRange()) > 0
+}
+
+func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) {
+ parent := desc.GetMessage(packageName, typeName)
+ if parent == nil {
+ return "", nil
+ }
+ if !parent.IsExtendable() {
+ return "", nil
+ }
+ extendee := "." + packageName + "." + typeName
+ for _, file := range desc.GetFile() {
+ for _, ext := range file.GetExtension() {
+ if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
+ if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
+ continue
+ }
+ } else {
+ if ext.GetExtendee() != extendee {
+ continue
+ }
+ }
+ if ext.GetName() == fieldName {
+ return file.GetPackage(), ext
+ }
+ }
+ }
+ return "", nil
+}
+
+func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) {
+ parent := desc.GetMessage(packageName, typeName)
+ if parent == nil {
+ return "", nil
+ }
+ if !parent.IsExtendable() {
+ return "", nil
+ }
+ extendee := "." + packageName + "." + typeName
+ for _, file := range desc.GetFile() {
+ for _, ext := range file.GetExtension() {
+ if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
+ if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
+ continue
+ }
+ } else {
+ if ext.GetExtendee() != extendee {
+ continue
+ }
+ }
+ if ext.GetNumber() == fieldNum {
+ return file.GetPackage(), ext
+ }
+ }
+ }
+ return "", nil
+}
+
+func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) {
+ parent := desc.GetMessage(packageName, typeName)
+ if parent == nil {
+ return "", ""
+ }
+ field := parent.GetFieldDescriptor(fieldName)
+ if field == nil {
+ var extPackageName string
+ extPackageName, field = desc.FindExtension(packageName, typeName, fieldName)
+ if field == nil {
+ return "", ""
+ }
+ packageName = extPackageName
+ }
+ typeNames := strings.Split(field.GetTypeName(), ".")
+ if len(typeNames) == 1 {
+ msg := desc.GetMessage(packageName, typeName)
+ if msg == nil {
+ return "", ""
+ }
+ return packageName, msg.GetName()
+ }
+ if len(typeNames) > 2 {
+ for i := 1; i < len(typeNames)-1; i++ {
+ packageName = strings.Join(typeNames[1:len(typeNames)-i], ".")
+ typeName = strings.Join(typeNames[len(typeNames)-i:], ".")
+ msg := desc.GetMessage(packageName, typeName)
+ if msg != nil {
+ typeNames := strings.Split(msg.GetName(), ".")
+ if len(typeNames) == 1 {
+ return packageName, msg.GetName()
+ }
+ return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1]
+ }
+ }
+ }
+ return "", ""
+}
+
+func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto {
+ for _, field := range msg.GetField() {
+ if field.GetName() == fieldName {
+ return field
+ }
+ }
+ return nil
+}
+
+func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto {
+ for _, file := range desc.GetFile() {
+ if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
+ continue
+ }
+ for _, enum := range file.GetEnumType() {
+ if enum.GetName() == typeName {
+ return enum
+ }
+ }
+ }
+ return nil
+}
+
+func (f *FieldDescriptorProto) IsEnum() bool {
+ return *f.Type == FieldDescriptorProto_TYPE_ENUM
+}
+
+func (f *FieldDescriptorProto) IsMessage() bool {
+ return *f.Type == FieldDescriptorProto_TYPE_MESSAGE
+}
+
+func (f *FieldDescriptorProto) IsBytes() bool {
+ return *f.Type == FieldDescriptorProto_TYPE_BYTES
+}
+
+func (f *FieldDescriptorProto) IsRepeated() bool {
+ return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED
+}
+
+func (f *FieldDescriptorProto) IsString() bool {
+ return *f.Type == FieldDescriptorProto_TYPE_STRING
+}
+
+func (f *FieldDescriptorProto) IsBool() bool {
+ return *f.Type == FieldDescriptorProto_TYPE_BOOL
+}
+
+func (f *FieldDescriptorProto) IsRequired() bool {
+ return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED
+}
+
+func (f *FieldDescriptorProto) IsPacked() bool {
+ return f.Options != nil && f.GetOptions().GetPacked()
+}
+
+func (m *DescriptorProto) HasExtension() bool {
+ return len(m.ExtensionRange) > 0
+}
diff --git a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go
new file mode 100644
index 000000000..ceadde6a5
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go
@@ -0,0 +1,101 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package sortkeys
+
+import (
+ "sort"
+)
+
+func Strings(l []string) {
+ sort.Strings(l)
+}
+
+func Float64s(l []float64) {
+ sort.Float64s(l)
+}
+
+func Float32s(l []float32) {
+ sort.Sort(Float32Slice(l))
+}
+
+func Int64s(l []int64) {
+ sort.Sort(Int64Slice(l))
+}
+
+func Int32s(l []int32) {
+ sort.Sort(Int32Slice(l))
+}
+
+func Uint64s(l []uint64) {
+ sort.Sort(Uint64Slice(l))
+}
+
+func Uint32s(l []uint32) {
+ sort.Sort(Uint32Slice(l))
+}
+
+func Bools(l []bool) {
+ sort.Sort(BoolSlice(l))
+}
+
+type BoolSlice []bool
+
+func (p BoolSlice) Len() int { return len(p) }
+func (p BoolSlice) Less(i, j int) bool { return p[j] }
+func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+type Int64Slice []int64
+
+func (p Int64Slice) Len() int { return len(p) }
+func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+type Int32Slice []int32
+
+func (p Int32Slice) Len() int { return len(p) }
+func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+type Uint64Slice []uint64
+
+func (p Uint64Slice) Len() int { return len(p) }
+func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+type Uint32Slice []uint32
+
+func (p Uint32Slice) Len() int { return len(p) }
+func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+type Float32Slice []float32
+
+func (p Float32Slice) Len() int { return len(p) }
+func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/golang/glog/LICENSE b/vendor/github.com/golang/glog/LICENSE
new file mode 100644
index 000000000..37ec93a14
--- /dev/null
+++ b/vendor/github.com/golang/glog/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/golang/glog/README b/vendor/github.com/golang/glog/README
new file mode 100644
index 000000000..387b4eb68
--- /dev/null
+++ b/vendor/github.com/golang/glog/README
@@ -0,0 +1,44 @@
+glog
+====
+
+Leveled execution logs for Go.
+
+This is an efficient pure Go implementation of leveled logs in the
+manner of the open source C++ package
+ https://github.com/google/glog
+
+By binding methods to booleans it is possible to use the log package
+without paying the expense of evaluating the arguments to the log.
+Through the -vmodule flag, the package also provides fine-grained
+control over logging at the file level.
+
+The comment from glog.go introduces the ideas:
+
+ Package glog implements logging analogous to the Google-internal
+ C++ INFO/ERROR/V setup. It provides functions Info, Warning,
+ Error, Fatal, plus formatting variants such as Infof. It
+ also provides V-style logging controlled by the -v and
+ -vmodule=file=2 flags.
+
+ Basic examples:
+
+ glog.Info("Prepare to repel boarders")
+
+ glog.Fatalf("Initialization failed: %s", err)
+
+ See the documentation for the V function for an explanation
+ of these examples:
+
+ if glog.V(2) {
+ glog.Info("Starting transaction...")
+ }
+
+ glog.V(2).Infoln("Processed", nItems, "elements")
+
+
+The repository contains an open source version of the log package
+used inside Google. The master copy of the source lives inside
+Google, not here. The code in this repo is for export only and is not itself
+under development. Feature requests will be ignored.
+
+Send bug reports to golang-nuts@googlegroups.com.
diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go
new file mode 100644
index 000000000..54bd7afdc
--- /dev/null
+++ b/vendor/github.com/golang/glog/glog.go
@@ -0,0 +1,1180 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
+// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
+// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
+//
+// Basic examples:
+//
+// glog.Info("Prepare to repel boarders")
+//
+// glog.Fatalf("Initialization failed: %s", err)
+//
+// See the documentation for the V function for an explanation of these examples:
+//
+// if glog.V(2) {
+// glog.Info("Starting transaction...")
+// }
+//
+// glog.V(2).Infoln("Processed", nItems, "elements")
+//
+// Log output is buffered and written periodically using Flush. Programs
+// should call Flush before exiting to guarantee all log output is written.
+//
+// By default, all log statements write to files in a temporary directory.
+// This package provides several flags that modify this behavior.
+// As a result, flag.Parse must be called before any logging is done.
+//
+// -logtostderr=false
+// Logs are written to standard error instead of to files.
+// -alsologtostderr=false
+// Logs are written to standard error as well as to files.
+// -stderrthreshold=ERROR
+// Log events at or above this severity are logged to standard
+// error as well as to files.
+// -log_dir=""
+// Log files will be written to this directory instead of the
+// default temporary directory.
+//
+// Other flags provide aids to debugging.
+//
+// -log_backtrace_at=""
+// When set to a file and line number holding a logging statement,
+// such as
+// -log_backtrace_at=gopherflakes.go:234
+// a stack trace will be written to the Info log whenever execution
+// hits that statement. (Unlike with -vmodule, the ".go" must be
+// present.)
+// -v=0
+// Enable V-leveled logging at the specified level.
+// -vmodule=""
+// The syntax of the argument is a comma-separated list of pattern=N,
+// where pattern is a literal file name (minus the ".go" suffix) or
+// "glob" pattern and N is a V level. For instance,
+// -vmodule=gopher*=3
+// sets the V level to 3 in all Go files whose names begin "gopher".
+//
+package glog
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ stdLog "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// severity identifies the sort of log: info, warning etc. It also implements
+// the flag.Value interface. The -stderrthreshold flag is of type severity and
+// should be modified only through the flag.Value interface. The values match
+// the corresponding constants in C++.
+type severity int32 // sync/atomic int32
+
+// These constants identify the log levels in order of increasing severity.
+// A message written to a high-severity log file is also written to each
+// lower-severity log file.
+const (
+ infoLog severity = iota
+ warningLog
+ errorLog
+ fatalLog
+ numSeverity = 4
+)
+
+const severityChar = "IWEF"
+
+var severityName = []string{
+ infoLog: "INFO",
+ warningLog: "WARNING",
+ errorLog: "ERROR",
+ fatalLog: "FATAL",
+}
+
+// get returns the value of the severity.
+func (s *severity) get() severity {
+ return severity(atomic.LoadInt32((*int32)(s)))
+}
+
+// set sets the value of the severity.
+func (s *severity) set(val severity) {
+ atomic.StoreInt32((*int32)(s), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (s *severity) String() string {
+ return strconv.FormatInt(int64(*s), 10)
+}
+
+// Get is part of the flag.Value interface.
+func (s *severity) Get() interface{} {
+ return *s
+}
+
+// Set is part of the flag.Value interface.
+func (s *severity) Set(value string) error {
+ var threshold severity
+ // Is it a known name?
+ if v, ok := severityByName(value); ok {
+ threshold = v
+ } else {
+ v, err := strconv.Atoi(value)
+ if err != nil {
+ return err
+ }
+ threshold = severity(v)
+ }
+ logging.stderrThreshold.set(threshold)
+ return nil
+}
+
+func severityByName(s string) (severity, bool) {
+ s = strings.ToUpper(s)
+ for i, name := range severityName {
+ if name == s {
+ return severity(i), true
+ }
+ }
+ return 0, false
+}
+
+// OutputStats tracks the number of output lines and bytes written.
+type OutputStats struct {
+ lines int64
+ bytes int64
+}
+
+// Lines returns the number of lines written.
+func (s *OutputStats) Lines() int64 {
+ return atomic.LoadInt64(&s.lines)
+}
+
+// Bytes returns the number of bytes written.
+func (s *OutputStats) Bytes() int64 {
+ return atomic.LoadInt64(&s.bytes)
+}
+
+// Stats tracks the number of lines of output and number of bytes
+// per severity level. Values must be read with atomic.LoadInt64.
+var Stats struct {
+ Info, Warning, Error OutputStats
+}
+
+var severityStats = [numSeverity]*OutputStats{
+ infoLog: &Stats.Info,
+ warningLog: &Stats.Warning,
+ errorLog: &Stats.Error,
+}
+
+// Level is exported because it appears in the arguments to V and is
+// the type of the v flag, which can be set programmatically.
+// It's a distinct type because we want to discriminate it from logType.
+// Variables of type level are only changed under logging.mu.
+// The -v flag is read only with atomic ops, so the state of the logging
+// module is consistent.
+
+// Level is treated as a sync/atomic int32.
+
+// Level specifies a level of verbosity for V logs. *Level implements
+// flag.Value; the -v flag is of type Level and should be modified
+// only through the flag.Value interface.
+type Level int32
+
+// get returns the value of the Level.
+func (l *Level) get() Level {
+ return Level(atomic.LoadInt32((*int32)(l)))
+}
+
+// set sets the value of the Level.
+func (l *Level) set(val Level) {
+ atomic.StoreInt32((*int32)(l), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (l *Level) String() string {
+ return strconv.FormatInt(int64(*l), 10)
+}
+
+// Get is part of the flag.Value interface.
+func (l *Level) Get() interface{} {
+ return *l
+}
+
+// Set is part of the flag.Value interface.
+func (l *Level) Set(value string) error {
+ v, err := strconv.Atoi(value)
+ if err != nil {
+ return err
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ logging.setVState(Level(v), logging.vmodule.filter, false)
+ return nil
+}
+
+// moduleSpec represents the setting of the -vmodule flag.
+type moduleSpec struct {
+ filter []modulePat
+}
+
+// modulePat contains a filter for the -vmodule flag.
+// It holds a verbosity level and a file pattern to match.
+type modulePat struct {
+ pattern string
+ literal bool // The pattern is a literal string
+ level Level
+}
+
+// match reports whether the file matches the pattern. It uses a string
+// comparison if the pattern contains no metacharacters.
+func (m *modulePat) match(file string) bool {
+ if m.literal {
+ return file == m.pattern
+ }
+ match, _ := filepath.Match(m.pattern, file)
+ return match
+}
+
+func (m *moduleSpec) String() string {
+ // Lock because the type is not atomic. TODO: clean this up.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ var b bytes.Buffer
+ for i, f := range m.filter {
+ if i > 0 {
+ b.WriteRune(',')
+ }
+ fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
+ }
+ return b.String()
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported.
+func (m *moduleSpec) Get() interface{} {
+ return nil
+}
+
+var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
+
+// Syntax: -vmodule=recordio=2,file=1,gfs*=3
+func (m *moduleSpec) Set(value string) error {
+ var filter []modulePat
+ for _, pat := range strings.Split(value, ",") {
+ if len(pat) == 0 {
+ // Empty strings such as from a trailing comma can be ignored.
+ continue
+ }
+ patLev := strings.Split(pat, "=")
+ if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
+ return errVmoduleSyntax
+ }
+ pattern := patLev[0]
+ v, err := strconv.Atoi(patLev[1])
+ if err != nil {
+ return errors.New("syntax error: expect comma-separated list of filename=N")
+ }
+ if v < 0 {
+ return errors.New("negative value for vmodule level")
+ }
+ if v == 0 {
+ continue // Ignore. It's harmless but no point in paying the overhead.
+ }
+ // TODO: check syntax of filter?
+ filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ logging.setVState(logging.verbosity, filter, true)
+ return nil
+}
+
+// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
+// that require filepath.Match to be called to match the pattern.
+func isLiteral(pattern string) bool {
+ return !strings.ContainsAny(pattern, `\*?[]`)
+}
+
+// traceLocation represents the setting of the -log_backtrace_at flag.
+type traceLocation struct {
+ file string
+ line int
+}
+
+// isSet reports whether the trace location has been specified.
+// logging.mu is held.
+func (t *traceLocation) isSet() bool {
+ return t.line > 0
+}
+
+// match reports whether the specified file and line matches the trace location.
+// The argument file name is the full path, not the basename specified in the flag.
+// logging.mu is held.
+func (t *traceLocation) match(file string, line int) bool {
+ if t.line != line {
+ return false
+ }
+ if i := strings.LastIndex(file, "/"); i >= 0 {
+ file = file[i+1:]
+ }
+ return t.file == file
+}
+
+func (t *traceLocation) String() string {
+ // Lock because the type is not atomic. TODO: clean this up.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ return fmt.Sprintf("%s:%d", t.file, t.line)
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported
+func (t *traceLocation) Get() interface{} {
+ return nil
+}
+
+var errTraceSyntax = errors.New("syntax error: expect file.go:234")
+
+// Syntax: -log_backtrace_at=gopherflakes.go:234
+// Note that unlike vmodule the file extension is included here.
+func (t *traceLocation) Set(value string) error {
+ if value == "" {
+ // Unset.
+ t.line = 0
+ t.file = ""
+ }
+ fields := strings.Split(value, ":")
+ if len(fields) != 2 {
+ return errTraceSyntax
+ }
+ file, line := fields[0], fields[1]
+ if !strings.Contains(file, ".") {
+ return errTraceSyntax
+ }
+ v, err := strconv.Atoi(line)
+ if err != nil {
+ return errTraceSyntax
+ }
+ if v <= 0 {
+ return errors.New("negative or zero value for level")
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ t.line = v
+ t.file = file
+ return nil
+}
+
+// flushSyncWriter is the interface satisfied by logging destinations.
+type flushSyncWriter interface {
+ Flush() error
+ Sync() error
+ io.Writer
+}
+
+func init() {
+ flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
+ flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
+ flag.Var(&logging.verbosity, "v", "log level for V logs")
+ flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
+ flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
+ flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
+
+ // Default stderrThreshold is ERROR.
+ logging.stderrThreshold = errorLog
+
+ logging.setVState(0, nil, false)
+ go logging.flushDaemon()
+}
+
+// Flush flushes all pending log I/O.
+func Flush() {
+ logging.lockAndFlushAll()
+}
+
+// loggingT collects all the global state of the logging setup.
+type loggingT struct {
+ // Boolean flags. Not handled atomically because the flag.Value interface
+ // does not let us avoid the =true, and that shorthand is necessary for
+ // compatibility. TODO: does this matter enough to fix? Seems unlikely.
+ toStderr bool // The -logtostderr flag.
+ alsoToStderr bool // The -alsologtostderr flag.
+
+ // Level flag. Handled atomically.
+ stderrThreshold severity // The -stderrthreshold flag.
+
+ // freeList is a list of byte buffers, maintained under freeListMu.
+ freeList *buffer
+ // freeListMu maintains the free list. It is separate from the main mutex
+ // so buffers can be grabbed and printed to without holding the main lock,
+ // for better parallelization.
+ freeListMu sync.Mutex
+
+ // mu protects the remaining elements of this structure and is
+ // used to synchronize logging.
+ mu sync.Mutex
+ // file holds writer for each of the log types.
+ file [numSeverity]flushSyncWriter
+ // pcs is used in V to avoid an allocation when computing the caller's PC.
+ pcs [1]uintptr
+ // vmap is a cache of the V Level for each V() call site, identified by PC.
+ // It is wiped whenever the vmodule flag changes state.
+ vmap map[uintptr]Level
+ // filterLength stores the length of the vmodule filter chain. If greater
+ // than zero, it means vmodule is enabled. It may be read safely
+ // using sync.LoadInt32, but is only modified under mu.
+ filterLength int32
+ // traceLocation is the state of the -log_backtrace_at flag.
+ traceLocation traceLocation
+ // These flags are modified only under lock, although verbosity may be fetched
+ // safely using atomic.LoadInt32.
+ vmodule moduleSpec // The state of the -vmodule flag.
+ verbosity Level // V logging level, the value of the -v flag/
+}
+
+// buffer holds a byte Buffer for reuse. The zero value is ready for use.
+type buffer struct {
+ bytes.Buffer
+ tmp [64]byte // temporary byte array for creating headers.
+ next *buffer
+}
+
+var logging loggingT
+
+// setVState sets a consistent state for V logging.
+// l.mu is held.
+func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {
+ // Turn verbosity off so V will not fire while we are in transition.
+ logging.verbosity.set(0)
+ // Ditto for filter length.
+ atomic.StoreInt32(&logging.filterLength, 0)
+
+ // Set the new filters and wipe the pc->Level map if the filter has changed.
+ if setFilter {
+ logging.vmodule.filter = filter
+ logging.vmap = make(map[uintptr]Level)
+ }
+
+ // Things are consistent now, so enable filtering and verbosity.
+ // They are enabled in order opposite to that in V.
+ atomic.StoreInt32(&logging.filterLength, int32(len(filter)))
+ logging.verbosity.set(verbosity)
+}
+
+// getBuffer returns a new, ready-to-use buffer.
+func (l *loggingT) getBuffer() *buffer {
+ l.freeListMu.Lock()
+ b := l.freeList
+ if b != nil {
+ l.freeList = b.next
+ }
+ l.freeListMu.Unlock()
+ if b == nil {
+ b = new(buffer)
+ } else {
+ b.next = nil
+ b.Reset()
+ }
+ return b
+}
+
+// putBuffer returns a buffer to the free list.
+func (l *loggingT) putBuffer(b *buffer) {
+ if b.Len() >= 256 {
+ // Let big buffers die a natural death.
+ return
+ }
+ l.freeListMu.Lock()
+ b.next = l.freeList
+ l.freeList = b
+ l.freeListMu.Unlock()
+}
+
+var timeNow = time.Now // Stubbed out for testing.
+
+/*
+header formats a log header as defined by the C++ implementation.
+It returns a buffer containing the formatted header and the user's file and line number.
+The depth specifies how many stack frames above lives the source line to be identified in the log message.
+
+Log lines have this form:
+ Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
+where the fields are defined as follows:
+ L A single character, representing the log level (eg 'I' for INFO)
+ mm The month (zero padded; ie May is '05')
+ dd The day (zero padded)
+ hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds
+ threadid The space-padded thread ID as returned by GetTID()
+ file The file name
+ line The line number
+ msg The user-supplied message
+*/
+func (l *loggingT) header(s severity, depth int) (*buffer, string, int) {
+ _, file, line, ok := runtime.Caller(3 + depth)
+ if !ok {
+ file = "???"
+ line = 1
+ } else {
+ slash := strings.LastIndex(file, "/")
+ if slash >= 0 {
+ file = file[slash+1:]
+ }
+ }
+ return l.formatHeader(s, file, line), file, line
+}
+
+// formatHeader formats a log header using the provided file name and line number.
+func (l *loggingT) formatHeader(s severity, file string, line int) *buffer {
+ now := timeNow()
+ if line < 0 {
+ line = 0 // not a real line number, but acceptable to someDigits
+ }
+ if s > fatalLog {
+ s = infoLog // for safety.
+ }
+ buf := l.getBuffer()
+
+ // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
+ // It's worth about 3X. Fprintf is hard.
+ _, month, day := now.Date()
+ hour, minute, second := now.Clock()
+ // Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+ buf.tmp[0] = severityChar[s]
+ buf.twoDigits(1, int(month))
+ buf.twoDigits(3, day)
+ buf.tmp[5] = ' '
+ buf.twoDigits(6, hour)
+ buf.tmp[8] = ':'
+ buf.twoDigits(9, minute)
+ buf.tmp[11] = ':'
+ buf.twoDigits(12, second)
+ buf.tmp[14] = '.'
+ buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
+ buf.tmp[21] = ' '
+ buf.nDigits(7, 22, pid, ' ') // TODO: should be TID
+ buf.tmp[29] = ' '
+ buf.Write(buf.tmp[:30])
+ buf.WriteString(file)
+ buf.tmp[0] = ':'
+ n := buf.someDigits(1, line)
+ buf.tmp[n+1] = ']'
+ buf.tmp[n+2] = ' '
+ buf.Write(buf.tmp[:n+3])
+ return buf
+}
+
+// Some custom tiny helper functions to print the log header efficiently.
+
+const digits = "0123456789"
+
+// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i].
+func (buf *buffer) twoDigits(i, d int) {
+ buf.tmp[i+1] = digits[d%10]
+ d /= 10
+ buf.tmp[i] = digits[d%10]
+}
+
+// nDigits formats an n-digit integer at buf.tmp[i],
+// padding with pad on the left.
+// It assumes d >= 0.
+func (buf *buffer) nDigits(n, i, d int, pad byte) {
+ j := n - 1
+ for ; j >= 0 && d > 0; j-- {
+ buf.tmp[i+j] = digits[d%10]
+ d /= 10
+ }
+ for ; j >= 0; j-- {
+ buf.tmp[i+j] = pad
+ }
+}
+
+// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i].
+func (buf *buffer) someDigits(i, d int) int {
+ // Print into the top, then copy down. We know there's space for at least
+ // a 10-digit number.
+ j := len(buf.tmp)
+ for {
+ j--
+ buf.tmp[j] = digits[d%10]
+ d /= 10
+ if d == 0 {
+ break
+ }
+ }
+ return copy(buf.tmp[i:], buf.tmp[j:])
+}
+
+func (l *loggingT) println(s severity, args ...interface{}) {
+ buf, file, line := l.header(s, 0)
+ fmt.Fprintln(buf, args...)
+ l.output(s, buf, file, line, false)
+}
+
+func (l *loggingT) print(s severity, args ...interface{}) {
+ l.printDepth(s, 1, args...)
+}
+
+func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) {
+ buf, file, line := l.header(s, depth)
+ fmt.Fprint(buf, args...)
+ if buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, buf, file, line, false)
+}
+
+func (l *loggingT) printf(s severity, format string, args ...interface{}) {
+ buf, file, line := l.header(s, 0)
+ fmt.Fprintf(buf, format, args...)
+ if buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, buf, file, line, false)
+}
+
+// printWithFileLine behaves like print but uses the provided file and line number. If
+// alsoLogToStderr is true, the log message always appears on standard error; it
+// will also appear in the log file unless --logtostderr is set.
+func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) {
+ buf := l.formatHeader(s, file, line)
+ fmt.Fprint(buf, args...)
+ if buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, buf, file, line, alsoToStderr)
+}
+
+// output writes the data to the log files and releases the buffer.
+func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) {
+ l.mu.Lock()
+ if l.traceLocation.isSet() {
+ if l.traceLocation.match(file, line) {
+ buf.Write(stacks(false))
+ }
+ }
+ data := buf.Bytes()
+ if !flag.Parsed() {
+ os.Stderr.Write([]byte("ERROR: logging before flag.Parse: "))
+ os.Stderr.Write(data)
+ } else if l.toStderr {
+ os.Stderr.Write(data)
+ } else {
+ if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
+ os.Stderr.Write(data)
+ }
+ if l.file[s] == nil {
+ if err := l.createFiles(s); err != nil {
+ os.Stderr.Write(data) // Make sure the message appears somewhere.
+ l.exit(err)
+ }
+ }
+ switch s {
+ case fatalLog:
+ l.file[fatalLog].Write(data)
+ fallthrough
+ case errorLog:
+ l.file[errorLog].Write(data)
+ fallthrough
+ case warningLog:
+ l.file[warningLog].Write(data)
+ fallthrough
+ case infoLog:
+ l.file[infoLog].Write(data)
+ }
+ }
+ if s == fatalLog {
+ // If we got here via Exit rather than Fatal, print no stacks.
+ if atomic.LoadUint32(&fatalNoStacks) > 0 {
+ l.mu.Unlock()
+ timeoutFlush(10 * time.Second)
+ os.Exit(1)
+ }
+ // Dump all goroutine stacks before exiting.
+ // First, make sure we see the trace for the current goroutine on standard error.
+ // If -logtostderr has been specified, the loop below will do that anyway
+ // as the first stack in the full dump.
+ if !l.toStderr {
+ os.Stderr.Write(stacks(false))
+ }
+ // Write the stack trace for all goroutines to the files.
+ trace := stacks(true)
+ logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
+ for log := fatalLog; log >= infoLog; log-- {
+ if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
+ f.Write(trace)
+ }
+ }
+ l.mu.Unlock()
+ timeoutFlush(10 * time.Second)
+ os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
+ }
+ l.putBuffer(buf)
+ l.mu.Unlock()
+ if stats := severityStats[s]; stats != nil {
+ atomic.AddInt64(&stats.lines, 1)
+ atomic.AddInt64(&stats.bytes, int64(len(data)))
+ }
+}
+
+// timeoutFlush calls Flush and returns when it completes or after timeout
+// elapses, whichever happens first. This is needed because the hooks invoked
+// by Flush may deadlock when glog.Fatal is called from a hook that holds
+// a lock.
+func timeoutFlush(timeout time.Duration) {
+ done := make(chan bool, 1)
+ go func() {
+ Flush() // calls logging.lockAndFlushAll()
+ done <- true
+ }()
+ select {
+ case <-done:
+ case <-time.After(timeout):
+ fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout)
+ }
+}
+
+// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines.
+func stacks(all bool) []byte {
+ // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
+ n := 10000
+ if all {
+ n = 100000
+ }
+ var trace []byte
+ for i := 0; i < 5; i++ {
+ trace = make([]byte, n)
+ nbytes := runtime.Stack(trace, all)
+ if nbytes < len(trace) {
+ return trace[:nbytes]
+ }
+ n *= 2
+ }
+ return trace
+}
+
+// logExitFunc provides a simple mechanism to override the default behavior
+// of exiting on error. Used in testing and to guarantee we reach a required exit
+// for fatal logs. Instead, exit could be a function rather than a method but that
+// would make its use clumsier.
+var logExitFunc func(error)
+
+// exit is called if there is trouble creating or writing log files.
+// It flushes the logs and exits the program; there's no point in hanging around.
+// l.mu is held.
+func (l *loggingT) exit(err error) {
+ fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err)
+ // If logExitFunc is set, we do that instead of exiting.
+ if logExitFunc != nil {
+ logExitFunc(err)
+ return
+ }
+ l.flushAll()
+ os.Exit(2)
+}
+
+// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
+// file's Sync method and providing a wrapper for the Write method that provides log
+// file rotation. There are conflicting methods, so the file cannot be embedded.
+// l.mu is held for all its methods.
+type syncBuffer struct {
+ logger *loggingT
+ *bufio.Writer
+ file *os.File
+ sev severity
+ nbytes uint64 // The number of bytes written to this file
+}
+
+func (sb *syncBuffer) Sync() error {
+ return sb.file.Sync()
+}
+
+func (sb *syncBuffer) Write(p []byte) (n int, err error) {
+ if sb.nbytes+uint64(len(p)) >= MaxSize {
+ if err := sb.rotateFile(time.Now()); err != nil {
+ sb.logger.exit(err)
+ }
+ }
+ n, err = sb.Writer.Write(p)
+ sb.nbytes += uint64(n)
+ if err != nil {
+ sb.logger.exit(err)
+ }
+ return
+}
+
+// rotateFile closes the syncBuffer's file and starts a new one.
+func (sb *syncBuffer) rotateFile(now time.Time) error {
+ if sb.file != nil {
+ sb.Flush()
+ sb.file.Close()
+ }
+ var err error
+ sb.file, _, err = create(severityName[sb.sev], now)
+ sb.nbytes = 0
+ if err != nil {
+ return err
+ }
+
+ sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
+
+ // Write header.
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
+ fmt.Fprintf(&buf, "Running on machine: %s\n", host)
+ fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
+ fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
+ n, err := sb.file.Write(buf.Bytes())
+ sb.nbytes += uint64(n)
+ return err
+}
+
+// bufferSize sizes the buffer associated with each log file. It's large
+// so that log records can accumulate without the logging thread blocking
+// on disk I/O. The flushDaemon will block instead.
+const bufferSize = 256 * 1024
+
+// createFiles creates all the log files for severity from sev down to infoLog.
+// l.mu is held.
+func (l *loggingT) createFiles(sev severity) error {
+ now := time.Now()
+ // Files are created in decreasing severity order, so as soon as we find one
+ // has already been created, we can stop.
+ for s := sev; s >= infoLog && l.file[s] == nil; s-- {
+ sb := &syncBuffer{
+ logger: l,
+ sev: s,
+ }
+ if err := sb.rotateFile(now); err != nil {
+ return err
+ }
+ l.file[s] = sb
+ }
+ return nil
+}
+
+const flushInterval = 30 * time.Second
+
+// flushDaemon periodically flushes the log file buffers.
+func (l *loggingT) flushDaemon() {
+ for _ = range time.NewTicker(flushInterval).C {
+ l.lockAndFlushAll()
+ }
+}
+
+// lockAndFlushAll is like flushAll but locks l.mu first.
+func (l *loggingT) lockAndFlushAll() {
+ l.mu.Lock()
+ l.flushAll()
+ l.mu.Unlock()
+}
+
+// flushAll flushes all the logs and attempts to "sync" their data to disk.
+// l.mu is held.
+func (l *loggingT) flushAll() {
+ // Flush from fatal down, in case there's trouble flushing.
+ for s := fatalLog; s >= infoLog; s-- {
+ file := l.file[s]
+ if file != nil {
+ file.Flush() // ignore error
+ file.Sync() // ignore error
+ }
+ }
+}
+
+// CopyStandardLogTo arranges for messages written to the Go "log" package's
+// default logs to also appear in the Google logs for the named and lower
+// severities. Subsequent changes to the standard log's default output location
+// or format may break this behavior.
+//
+// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not
+// recognized, CopyStandardLogTo panics.
+func CopyStandardLogTo(name string) {
+ sev, ok := severityByName(name)
+ if !ok {
+ panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name))
+ }
+ // Set a log format that captures the user's file and line:
+ // d.go:23: message
+ stdLog.SetFlags(stdLog.Lshortfile)
+ stdLog.SetOutput(logBridge(sev))
+}
+
+// logBridge provides the Write method that enables CopyStandardLogTo to connect
+// Go's standard logs to the logs provided by this package.
+type logBridge severity
+
+// Write parses the standard logging line and passes its components to the
+// logger for severity(lb).
+func (lb logBridge) Write(b []byte) (n int, err error) {
+ var (
+ file = "???"
+ line = 1
+ text string
+ )
+ // Split "d.go:23: message" into "d.go", "23", and "message".
+ if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
+ text = fmt.Sprintf("bad log format: %s", b)
+ } else {
+ file = string(parts[0])
+ text = string(parts[2][1:]) // skip leading space
+ line, err = strconv.Atoi(string(parts[1]))
+ if err != nil {
+ text = fmt.Sprintf("bad line number: %s", b)
+ line = 1
+ }
+ }
+ // printWithFileLine with alsoToStderr=true, so standard log messages
+ // always appear on standard error.
+ logging.printWithFileLine(severity(lb), file, line, true, text)
+ return len(b), nil
+}
+
+// setV computes and remembers the V level for a given PC
+// when vmodule is enabled.
+// File pattern matching takes the basename of the file, stripped
+// of its .go suffix, and uses filepath.Match, which is a little more
+// general than the *? matching used in C++.
+// l.mu is held.
+func (l *loggingT) setV(pc uintptr) Level {
+ fn := runtime.FuncForPC(pc)
+ file, _ := fn.FileLine(pc)
+ // The file is something like /a/b/c/d.go. We want just the d.
+ if strings.HasSuffix(file, ".go") {
+ file = file[:len(file)-3]
+ }
+ if slash := strings.LastIndex(file, "/"); slash >= 0 {
+ file = file[slash+1:]
+ }
+ for _, filter := range l.vmodule.filter {
+ if filter.match(file) {
+ l.vmap[pc] = filter.level
+ return filter.level
+ }
+ }
+ l.vmap[pc] = 0
+ return 0
+}
+
+// Verbose is a boolean type that implements Infof (like Printf) etc.
+// See the documentation of V for more information.
+type Verbose bool
+
+// V reports whether verbosity at the call site is at least the requested level.
+// The returned value is a boolean of type Verbose, which implements Info, Infoln
+// and Infof. These methods will write to the Info log if called.
+// Thus, one may write either
+// if glog.V(2) { glog.Info("log this") }
+// or
+// glog.V(2).Info("log this")
+// The second form is shorter but the first is cheaper if logging is off because it does
+// not evaluate its arguments.
+//
+// Whether an individual call to V generates a log record depends on the setting of
+// the -v and --vmodule flags; both are off by default. If the level in the call to
+// V is at least the value of -v, or of -vmodule for the source file containing the
+// call, the V call will log.
+func V(level Level) Verbose {
+ // This function tries hard to be cheap unless there's work to do.
+ // The fast path is two atomic loads and compares.
+
+ // Here is a cheap but safe test to see if V logging is enabled globally.
+ if logging.verbosity.get() >= level {
+ return Verbose(true)
+ }
+
+ // It's off globally but it vmodule may still be set.
+ // Here is another cheap but safe test to see if vmodule is enabled.
+ if atomic.LoadInt32(&logging.filterLength) > 0 {
+ // Now we need a proper lock to use the logging structure. The pcs field
+ // is shared so we must lock before accessing it. This is fairly expensive,
+ // but if V logging is enabled we're slow anyway.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ if runtime.Callers(2, logging.pcs[:]) == 0 {
+ return Verbose(false)
+ }
+ v, ok := logging.vmap[logging.pcs[0]]
+ if !ok {
+ v = logging.setV(logging.pcs[0])
+ }
+ return Verbose(v >= level)
+ }
+ return Verbose(false)
+}
+
+// Info is equivalent to the global Info function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Info(args ...interface{}) {
+ if v {
+ logging.print(infoLog, args...)
+ }
+}
+
+// Infoln is equivalent to the global Infoln function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infoln(args ...interface{}) {
+ if v {
+ logging.println(infoLog, args...)
+ }
+}
+
+// Infof is equivalent to the global Infof function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infof(format string, args ...interface{}) {
+ if v {
+ logging.printf(infoLog, format, args...)
+ }
+}
+
+// Info logs to the INFO log.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Info(args ...interface{}) {
+ logging.print(infoLog, args...)
+}
+
+// InfoDepth acts as Info but uses depth to determine which call frame to log.
+// InfoDepth(0, "msg") is the same as Info("msg").
+func InfoDepth(depth int, args ...interface{}) {
+ logging.printDepth(infoLog, depth, args...)
+}
+
+// Infoln logs to the INFO log.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Infoln(args ...interface{}) {
+ logging.println(infoLog, args...)
+}
+
+// Infof logs to the INFO log.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Infof(format string, args ...interface{}) {
+ logging.printf(infoLog, format, args...)
+}
+
+// Warning logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Warning(args ...interface{}) {
+ logging.print(warningLog, args...)
+}
+
+// WarningDepth acts as Warning but uses depth to determine which call frame to log.
+// WarningDepth(0, "msg") is the same as Warning("msg").
+func WarningDepth(depth int, args ...interface{}) {
+ logging.printDepth(warningLog, depth, args...)
+}
+
+// Warningln logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Warningln(args ...interface{}) {
+ logging.println(warningLog, args...)
+}
+
+// Warningf logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Warningf(format string, args ...interface{}) {
+ logging.printf(warningLog, format, args...)
+}
+
+// Error logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Error(args ...interface{}) {
+ logging.print(errorLog, args...)
+}
+
+// ErrorDepth acts as Error but uses depth to determine which call frame to log.
+// ErrorDepth(0, "msg") is the same as Error("msg").
+func ErrorDepth(depth int, args ...interface{}) {
+ logging.printDepth(errorLog, depth, args...)
+}
+
+// Errorln logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Errorln(args ...interface{}) {
+ logging.println(errorLog, args...)
+}
+
+// Errorf logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Errorf(format string, args ...interface{}) {
+ logging.printf(errorLog, format, args...)
+}
+
+// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Fatal(args ...interface{}) {
+ logging.print(fatalLog, args...)
+}
+
+// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
+// FatalDepth(0, "msg") is the same as Fatal("msg").
+func FatalDepth(depth int, args ...interface{}) {
+ logging.printDepth(fatalLog, depth, args...)
+}
+
+// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Fatalln(args ...interface{}) {
+ logging.println(fatalLog, args...)
+}
+
+// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Fatalf(format string, args ...interface{}) {
+ logging.printf(fatalLog, format, args...)
+}
+
+// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
+// It allows Exit and relatives to use the Fatal logs.
+var fatalNoStacks uint32
+
+// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Exit(args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.print(fatalLog, args...)
+}
+
+// ExitDepth acts as Exit but uses depth to determine which call frame to log.
+// ExitDepth(0, "msg") is the same as Exit("msg").
+func ExitDepth(depth int, args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.printDepth(fatalLog, depth, args...)
+}
+
+// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+func Exitln(args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.println(fatalLog, args...)
+}
+
+// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Exitf(format string, args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.printf(fatalLog, format, args...)
+}
diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go
new file mode 100644
index 000000000..65075d281
--- /dev/null
+++ b/vendor/github.com/golang/glog/glog_file.go
@@ -0,0 +1,124 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// File I/O for logs.
+
+package glog
+
+import (
+ "errors"
+ "flag"
+ "fmt"
+ "os"
+ "os/user"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+)
+
+// MaxSize is the maximum size of a log file in bytes.
+var MaxSize uint64 = 1024 * 1024 * 1800
+
+// logDirs lists the candidate directories for new log files.
+var logDirs []string
+
+// If non-empty, overrides the choice of directory in which to write logs.
+// See createLogDirs for the full list of possible destinations.
+var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
+
+func createLogDirs() {
+ if *logDir != "" {
+ logDirs = append(logDirs, *logDir)
+ }
+ logDirs = append(logDirs, os.TempDir())
+}
+
+var (
+ pid = os.Getpid()
+ program = filepath.Base(os.Args[0])
+ host = "unknownhost"
+ userName = "unknownuser"
+)
+
+func init() {
+ h, err := os.Hostname()
+ if err == nil {
+ host = shortHostname(h)
+ }
+
+ current, err := user.Current()
+ if err == nil {
+ userName = current.Username
+ }
+
+ // Sanitize userName since it may contain filepath separators on Windows.
+ userName = strings.Replace(userName, `\`, "_", -1)
+}
+
+// shortHostname returns its argument, truncating at the first period.
+// For instance, given "www.google.com" it returns "www".
+func shortHostname(hostname string) string {
+ if i := strings.Index(hostname, "."); i >= 0 {
+ return hostname[:i]
+ }
+ return hostname
+}
+
+// logName returns a new log file name containing tag, with start time t, and
+// the name for the symlink for tag.
+func logName(tag string, t time.Time) (name, link string) {
+ name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
+ program,
+ host,
+ userName,
+ tag,
+ t.Year(),
+ t.Month(),
+ t.Day(),
+ t.Hour(),
+ t.Minute(),
+ t.Second(),
+ pid)
+ return name, program + "." + tag
+}
+
+var onceLogDirs sync.Once
+
+// create creates a new log file and returns the file and its filename, which
+// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
+// successfully, create also attempts to update the symlink for that tag, ignoring
+// errors.
+func create(tag string, t time.Time) (f *os.File, filename string, err error) {
+ onceLogDirs.Do(createLogDirs)
+ if len(logDirs) == 0 {
+ return nil, "", errors.New("log: no log dirs")
+ }
+ name, link := logName(tag, t)
+ var lastErr error
+ for _, dir := range logDirs {
+ fname := filepath.Join(dir, name)
+ f, err := os.Create(fname)
+ if err == nil {
+ symlink := filepath.Join(dir, link)
+ os.Remove(symlink) // ignore err
+ os.Symlink(name, symlink) // ignore err
+ return f, fname, nil
+ }
+ lastErr = err
+ }
+ return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
+}
diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE
new file mode 100644
index 000000000..37ec93a14
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/golang/groupcache/README.md b/vendor/github.com/golang/groupcache/README.md
new file mode 100644
index 000000000..70c29da16
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/README.md
@@ -0,0 +1,73 @@
+# groupcache
+
+## Summary
+
+groupcache is a caching and cache-filling library, intended as a
+replacement for memcached in many cases.
+
+For API docs and examples, see http://godoc.org/github.com/golang/groupcache
+
+## Comparison to memcached
+
+### **Like memcached**, groupcache:
+
+ * shards by key to select which peer is responsible for that key
+
+### **Unlike memcached**, groupcache:
+
+ * does not require running a separate set of servers, thus massively
+ reducing deployment/configuration pain. groupcache is a client
+ library as well as a server. It connects to its own peers.
+
+ * comes with a cache filling mechanism. Whereas memcached just says
+ "Sorry, cache miss", often resulting in a thundering herd of
+ database (or whatever) loads from an unbounded number of clients
+ (which has resulted in several fun outages), groupcache coordinates
+ cache fills such that only one load in one process of an entire
+ replicated set of processes populates the cache, then multiplexes
+ the loaded value to all callers.
+
+ * does not support versioned values. If key "foo" is value "bar",
+ key "foo" must always be "bar". There are neither cache expiration
+ times, nor explicit cache evictions. Thus there is also no CAS,
+ nor Increment/Decrement. This also means that groupcache....
+
+ * ... supports automatic mirroring of super-hot items to multiple
+ processes. This prevents memcached hot spotting where a machine's
+ CPU and/or NIC are overloaded by very popular keys/values.
+
+ * is currently only available for Go. It's very unlikely that I
+ (bradfitz@) will port the code to any other language.
+
+## Loading process
+
+In a nutshell, a groupcache lookup of **Get("foo")** looks like:
+
+(On machine #5 of a set of N machines running the same code)
+
+ 1. Is the value of "foo" in local memory because it's super hot? If so, use it.
+
+ 2. Is the value of "foo" in local memory because peer #5 (the current
+ peer) is the owner of it? If so, use it.
+
+ 3. Amongst all the peers in my set of N, am I the owner of the key
+ "foo"? (e.g. does it consistent hash to 5?) If so, load it. If
+ other callers come in, via the same process or via RPC requests
+ from peers, they block waiting for the load to finish and get the
+ same answer. If not, RPC to the peer that's the owner and get
+ the answer. If the RPC fails, just load it locally (still with
+ local dup suppression).
+
+## Users
+
+groupcache is in production use by dl.google.com (its original user),
+parts of Blogger, parts of Google Code, parts of Google Fiber, parts
+of Google production monitoring systems, etc.
+
+## Presentations
+
+See http://talks.golang.org/2013/oscon-dl.slide
+
+## Help
+
+Use the golang-nuts mailing list for any discussion or questions.
diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go
new file mode 100644
index 000000000..532cc45e6
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/lru/lru.go
@@ -0,0 +1,133 @@
+/*
+Copyright 2013 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package lru implements an LRU cache.
+package lru
+
+import "container/list"
+
+// Cache is an LRU cache. It is not safe for concurrent access.
+type Cache struct {
+ // MaxEntries is the maximum number of cache entries before
+ // an item is evicted. Zero means no limit.
+ MaxEntries int
+
+ // OnEvicted optionally specificies a callback function to be
+ // executed when an entry is purged from the cache.
+ OnEvicted func(key Key, value interface{})
+
+ ll *list.List
+ cache map[interface{}]*list.Element
+}
+
+// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators
+type Key interface{}
+
+type entry struct {
+ key Key
+ value interface{}
+}
+
+// New creates a new Cache.
+// If maxEntries is zero, the cache has no limit and it's assumed
+// that eviction is done by the caller.
+func New(maxEntries int) *Cache {
+ return &Cache{
+ MaxEntries: maxEntries,
+ ll: list.New(),
+ cache: make(map[interface{}]*list.Element),
+ }
+}
+
+// Add adds a value to the cache.
+func (c *Cache) Add(key Key, value interface{}) {
+ if c.cache == nil {
+ c.cache = make(map[interface{}]*list.Element)
+ c.ll = list.New()
+ }
+ if ee, ok := c.cache[key]; ok {
+ c.ll.MoveToFront(ee)
+ ee.Value.(*entry).value = value
+ return
+ }
+ ele := c.ll.PushFront(&entry{key, value})
+ c.cache[key] = ele
+ if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {
+ c.RemoveOldest()
+ }
+}
+
+// Get looks up a key's value from the cache.
+func (c *Cache) Get(key Key) (value interface{}, ok bool) {
+ if c.cache == nil {
+ return
+ }
+ if ele, hit := c.cache[key]; hit {
+ c.ll.MoveToFront(ele)
+ return ele.Value.(*entry).value, true
+ }
+ return
+}
+
+// Remove removes the provided key from the cache.
+func (c *Cache) Remove(key Key) {
+ if c.cache == nil {
+ return
+ }
+ if ele, hit := c.cache[key]; hit {
+ c.removeElement(ele)
+ }
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *Cache) RemoveOldest() {
+ if c.cache == nil {
+ return
+ }
+ ele := c.ll.Back()
+ if ele != nil {
+ c.removeElement(ele)
+ }
+}
+
+func (c *Cache) removeElement(e *list.Element) {
+ c.ll.Remove(e)
+ kv := e.Value.(*entry)
+ delete(c.cache, kv.key)
+ if c.OnEvicted != nil {
+ c.OnEvicted(kv.key, kv.value)
+ }
+}
+
+// Len returns the number of items in the cache.
+func (c *Cache) Len() int {
+ if c.cache == nil {
+ return 0
+ }
+ return c.ll.Len()
+}
+
+// Clear purges all stored items from the cache.
+func (c *Cache) Clear() {
+ if c.OnEvicted != nil {
+ for _, e := range c.cache {
+ kv := e.Value.(*entry)
+ c.OnEvicted(kv.key, kv.value)
+ }
+ }
+ c.ll = nil
+ c.cache = nil
+}
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 000000000..1b1b1921e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,31 @@
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md
new file mode 100644
index 000000000..795f53f6f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/README.md
@@ -0,0 +1,242 @@
+# Go support for Protocol Buffers
+
+[![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf)
+
+Google's data interchange format.
+Copyright 2010 The Go Authors.
+https://github.com/golang/protobuf
+
+This package and the code it generates requires at least Go 1.4.
+
+This software implements Go bindings for protocol buffers. For
+information about protocol buffers themselves, see
+ https://developers.google.com/protocol-buffers/
+
+## Installation ##
+
+To use this software, you must:
+- Install the standard C++ implementation of protocol buffers from
+ https://developers.google.com/protocol-buffers/
+- Of course, install the Go compiler and tools from
+ https://golang.org/
+ See
+ https://golang.org/doc/install
+ for details or, if you are using gccgo, follow the instructions at
+ https://golang.org/doc/install/gccgo
+- Grab the code from the repository and install the proto package.
+ The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`.
+ The compiler plugin, protoc-gen-go, will be installed in $GOBIN,
+ defaulting to $GOPATH/bin. It must be in your $PATH for the protocol
+ compiler, protoc, to find it.
+
+This software has two parts: a 'protocol compiler plugin' that
+generates Go source files that, once compiled, can access and manage
+protocol buffers; and a library that implements run-time support for
+encoding (marshaling), decoding (unmarshaling), and accessing protocol
+buffers.
+
+There is support for gRPC in Go using protocol buffers.
+See the note at the bottom of this file for details.
+
+There are no insertion points in the plugin.
+
+
+## Using protocol buffers with Go ##
+
+Once the software is installed, there are two steps to using it.
+First you must compile the protocol buffer definitions and then import
+them, with the support library, into your program.
+
+To compile the protocol buffer definition, run protoc with the --go_out
+parameter set to the directory you want to output the Go code to.
+
+ protoc --go_out=. *.proto
+
+The generated files will be suffixed .pb.go. See the Test code below
+for an example using such a file.
+
+
+The package comment for the proto library contains text describing
+the interface provided in Go for protocol buffers. Here is an edited
+version.
+
+==========
+
+The proto package converts data structures to and from the
+wire format of protocol buffers. It works in concert with the
+Go source code generated for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ Helpers for getting values are superseded by the
+ GetFoo methods and their use is deprecated.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed with the enum's type name. Enum types have
+ a String method, and a Enum method to assist in message construction.
+ - Nested groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Enum types do not get an Enum method.
+
+Consider file test.proto, containing
+
+```proto
+ package example;
+
+ enum FOO { X = 17; };
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ }
+```
+
+To create and play with a Test object from the example package,
+
+```go
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ "path/to/example"
+ )
+
+ func main() {
+ test := &example.Test {
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &example.Test_OptionalGroup {
+ RequiredField: proto.String("good bye"),
+ },
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &example.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // etc.
+ }
+```
+
+## Parameters ##
+
+To pass extra parameters to the plugin, use a comma-separated
+parameter list separated from the output directory by a colon:
+
+
+ protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto
+
+
+- `import_prefix=xxx` - a prefix that is added onto the beginning of
+ all imports. Useful for things like generating protos in a
+ subdirectory, or regenerating vendored protobufs in-place.
+- `import_path=foo/bar` - used as the package if no input files
+ declare `go_package`. If it contains slashes, everything up to the
+ rightmost slash is ignored.
+- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to
+ load. The only plugin in this repo is `grpc`.
+- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is
+ associated with Go package quux/shme. This is subject to the
+ import_prefix parameter.
+
+## gRPC Support ##
+
+If a proto file specifies RPC services, protoc-gen-go can be instructed to
+generate code compatible with gRPC (http://www.grpc.io/). To do this, pass
+the `plugins` parameter to protoc-gen-go; the usual way is to insert it into
+the --go_out argument to protoc:
+
+ protoc --go_out=plugins=grpc:. *.proto
+
+## Compatibility ##
+
+The library and the generated code are expected to be stable over time.
+However, we reserve the right to make breaking changes without notice for the
+following reasons:
+
+- Security. A security issue in the specification or implementation may come to
+ light whose resolution requires breaking compatibility. We reserve the right
+ to address such security issues.
+- Unspecified behavior. There are some aspects of the Protocol Buffers
+ specification that are undefined. Programs that depend on such unspecified
+ behavior may break in future releases.
+- Specification errors or changes. If it becomes necessary to address an
+ inconsistency, incompleteness, or change in the Protocol Buffers
+ specification, resolving the issue could affect the meaning or legality of
+ existing programs. We reserve the right to address such issues, including
+ updating the implementations.
+- Bugs. If the library has a bug that violates the specification, a program
+ that depends on the buggy behavior may break if the bug is fixed. We reserve
+ the right to fix such bugs.
+- Adding methods or fields to generated structs. These may conflict with field
+ names that already exist in a schema, causing applications to break. When the
+ code generator encounters a field in the schema that would collide with a
+ generated field or method name, the code generator will append an underscore
+ to the generated field or method name.
+- Adding, removing, or changing methods or fields in generated structs that
+ start with `XXX`. These parts of the generated code are exported out of
+ necessity, but should not be considered part of the public API.
+- Adding, removing, or changing unexported symbols in generated code.
+
+Any breaking changes outside of these will be announced 6 months in advance to
+protobuf@googlegroups.com.
+
+You should, whenever possible, use generated code created by the `protoc-gen-go`
+tool built at the same commit as the `proto` package. The `proto` package
+declares package-level constants in the form `ProtoPackageIsVersionX`.
+Application code and generated code may depend on one of these constants to
+ensure that compilation will fail if the available version of the proto library
+is too old. Whenever we make a change to the generated code that requires newer
+library support, in the same commit we will increment the version number of the
+generated code and declare a new package-level constant whose name incorporates
+the latest version number. Removing a compatibility constant is considered a
+breaking change and would be subject to the announcement policy stated above.
+
+The `protoc-gen-go/generator` package exposes a plugin interface,
+which is used by the gRPC code generation. This interface is not
+supported and is subject to incompatible changes without notice.
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 000000000..e392575b3
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,229 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+
+ out := reflect.New(in.Type().Elem())
+ // out is empty so a merge is a deep copy.
+ mergeStruct(out.Elem(), in.Elem())
+ return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ // Explicit test prior to mergeStruct so that mistyped nils will fail
+ panic("proto: type mismatch")
+ }
+ if in.IsNil() {
+ // Merging nil into non-nil is a quiet no-op
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, ok := extendable(in.Addr().Interface()); ok {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 000000000..aa207298f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,970 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ i := p.index
+ buf := p.buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ p.index++
+ return uint64(buf[i]), nil
+ } else if len(buf)-i < 10 {
+ return p.decodeVarintSlow()
+ }
+
+ var b uint64
+ // we already checked the first byte
+ x = uint64(buf[i]) - 0x80
+ i++
+
+ b = uint64(buf[i])
+ i++
+ x += b << 7
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 7
+
+ b = uint64(buf[i])
+ i++
+ x += b << 14
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 14
+
+ b = uint64(buf[i])
+ i++
+ x += b << 21
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 21
+
+ b = uint64(buf[i])
+ i++
+ x += b << 28
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 28
+
+ b = uint64(buf[i])
+ i++
+ x += b << 35
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 35
+
+ b = uint64(buf[i])
+ i++
+ x += b << 42
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 42
+
+ b = uint64(buf[i])
+ i++
+ x += b << 49
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 49
+
+ b = uint64(buf[i])
+ i++
+ x += b << 56
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 56
+
+ b = uint64(buf[i])
+ i++
+ x += b << 63
+ if b&0x80 == 0 {
+ goto done
+ }
+ // x -= 0x80 << 63 // Always zero.
+
+ return 0, errOverflow
+
+done:
+ p.index = i
+ return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+ oi := o.index
+
+ err := o.skip(t, tag, wire)
+ if err != nil {
+ return err
+ }
+
+ if !unrecField.IsValid() {
+ return nil
+ }
+
+ ptr := structPointer_Bytes(base, unrecField)
+
+ // Add the skipped field to struct field
+ obuf := o.buf
+
+ o.buf = *ptr
+ o.EncodeVarint(uint64(tag<<3 | wire))
+ *ptr = append(o.buf, obuf[oi:o.index]...)
+
+ o.buf = obuf
+
+ return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+ var u uint64
+ var err error
+
+ switch wire {
+ case WireVarint:
+ _, err = o.DecodeVarint()
+ case WireFixed64:
+ _, err = o.DecodeFixed64()
+ case WireBytes:
+ _, err = o.DecodeRawBytes(false)
+ case WireFixed32:
+ _, err = o.DecodeFixed32()
+ case WireStartGroup:
+ for {
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ fwire := int(u & 0x7)
+ if fwire == WireEndGroup {
+ break
+ }
+ ftag := int(u >> 3)
+ err = o.skip(t, ftag, fwire)
+ if err != nil {
+ break
+ }
+ }
+ default:
+ err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+ }
+ return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The method should reset the receiver before
+// decoding starts. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+ return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+
+ err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+ if collectStats {
+ stats.Decode++
+ }
+
+ return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+ var state errorState
+ required, reqFields := prop.reqCount, uint64(0)
+
+ var err error
+ for err == nil && o.index < len(o.buf) {
+ oi := o.index
+ var u uint64
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ wire := int(u & 0x7)
+ if wire == WireEndGroup {
+ if is_group {
+ if required > 0 {
+ // Not enough information to determine the exact field.
+ // (See below.)
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ return nil // input is satisfied
+ }
+ return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+ }
+ tag := int(u >> 3)
+ if tag <= 0 {
+ return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+ }
+ fieldnum, ok := prop.decoderTags.get(tag)
+ if !ok {
+ // Maybe it's an extension?
+ if prop.extendable {
+ if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ extmap := e.extensionsWrite()
+ ext := extmap[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+ extmap[int32(tag)] = ext
+ }
+ continue
+ }
+ }
+ // Maybe it's a oneof?
+ if prop.oneofUnmarshaler != nil {
+ m := structPointer_Interface(base, st).(Message)
+ // First return value indicates whether tag is a oneof field.
+ ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+ if err == ErrInternalBadWireType {
+ // Map the error to something more descriptive.
+ // Do the formatting here to save generated code space.
+ err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+ }
+ if ok {
+ continue
+ }
+ }
+ err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+ continue
+ }
+ p := prop.Prop[fieldnum]
+
+ if p.dec == nil {
+ fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+ continue
+ }
+ dec := p.dec
+ if wire != WireStartGroup && wire != p.WireType {
+ if wire == WireBytes && p.packedDec != nil {
+ // a packable field
+ dec = p.packedDec
+ } else {
+ err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+ continue
+ }
+ }
+ decErr := dec(o, p, base)
+ if decErr != nil && !state.shouldContinue(decErr, p) {
+ err = decErr
+ }
+ if err == nil && p.Required {
+ // Successfully decoded a required field.
+ if tag <= 64 {
+ // use bitmap for fields 1-64 to catch field reuse.
+ var mask uint64 = 1 << uint64(tag-1)
+ if reqFields&mask == 0 {
+ // new required field
+ reqFields |= mask
+ required--
+ }
+ } else {
+ // This is imprecise. It can be fooled by a required field
+ // with a tag > 64 that is encoded twice; that's very rare.
+ // A fully correct implementation would require allocating
+ // a data structure, which we would like to avoid.
+ required--
+ }
+ }
+ }
+ if err == nil {
+ if is_group {
+ return io.ErrUnexpectedEOF
+ }
+ if state.err != nil {
+ return state.err
+ }
+ if required > 0 {
+ // Not enough information to determine the exact field. If we use extra
+ // CPU, we could determine the field only if the missing required field
+ // has a tag <= 64 and we check reqFields.
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ }
+ return err
+}
+
+// Individual type decoders
+// For each,
+// u is the decoded value,
+// v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+ boolPoolSize = 16
+ uint32PoolSize = 8
+ uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ if len(o.bools) == 0 {
+ o.bools = make([]bool, boolPoolSize)
+ }
+ o.bools[0] = u != 0
+ *structPointer_Bool(base, p.field) = &o.bools[0]
+ o.bools = o.bools[1:]
+ return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ *structPointer_BoolVal(base, p.field) = u != 0
+ return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+ return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64_Set(structPointer_Word64(base, p.field), o, u)
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+ return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_String(base, p.field) = &s
+ return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_StringVal(base, p.field) = s
+ return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ *structPointer_Bytes(base, p.field) = b
+ return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BoolSlice(base, p.field)
+ *v = append(*v, u != 0)
+ return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+ v := structPointer_BoolSlice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded bools
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+
+ y := *v
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ y = append(y, u != 0)
+ }
+
+ *v = y
+ return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ structPointer_Word32Slice(base, p.field).Append(uint32(u))
+ return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int32s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(uint32(u))
+ }
+ return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+
+ structPointer_Word64Slice(base, p.field).Append(u)
+ return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int64s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(u)
+ }
+ return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ v := structPointer_StringSlice(base, p.field)
+ *v = append(*v, s)
+ return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BytesSlice(base, p.field)
+ *v = append(*v, b)
+ return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ oi := o.index // index at the end of this map entry
+ o.index -= len(raw) // move buffer back to start of map entry
+
+ mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
+ if mptr.Elem().IsNil() {
+ mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+ }
+ v := mptr.Elem() // map[K]V
+
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // See enc_new_map for why.
+ keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+ keybase := toStructPointer(keyptr.Addr()) // **K
+
+ var valbase structPointer
+ var valptr reflect.Value
+ switch p.mtype.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valptr = reflect.ValueOf(&dummy) // *[]byte
+ valbase = toStructPointer(valptr) // *[]byte
+ case reflect.Ptr:
+ // message; valptr is **Msg; need to allocate the intermediate pointer
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valptr.Set(reflect.New(valptr.Type().Elem()))
+ valbase = toStructPointer(valptr)
+ default:
+ // everything else
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+
+ // Decode.
+ // This parses a restricted wire format, namely the encoding of a message
+ // with two fields. See enc_new_map for the format.
+ for o.index < oi {
+ // tagcode for key and value properties are always a single byte
+ // because they have tags 1 and 2.
+ tagcode := o.buf[o.index]
+ o.index++
+ switch tagcode {
+ case p.mkeyprop.tagcode[0]:
+ if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ case p.mvalprop.tagcode[0]:
+ if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ default:
+ // TODO: Should we silently skip this instead?
+ return fmt.Errorf("proto: bad map data tag %d", raw[0])
+ }
+ }
+ keyelem, valelem := keyptr.Elem(), valptr.Elem()
+ if !keyelem.IsValid() {
+ keyelem = reflect.Zero(p.mtype.Key())
+ }
+ if !valelem.IsValid() {
+ valelem = reflect.Zero(p.mtype.Elem())
+ }
+
+ v.SetMapIndex(keyelem, valelem)
+ return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+ return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := structPointer_Interface(bas, p.stype)
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+ v := reflect.New(p.stype)
+ bas := toStructPointer(v)
+ structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+ if is_group {
+ err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+ return err
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := v.Interface()
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 000000000..8b84d1b22
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,1362 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+ field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // errOneofHasNil is the error returned if Marshal is called with
+ // a struct with a oneof field containing a nil element.
+ errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // ErrTooLarge is the error returned if Marshal is called with a
+ // message that encodes to >2GB.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// maxMarshalSize is the largest allowed size of an encoded protobuf,
+// since C++ and Java use signed int32s for the size.
+const maxMarshalSize = 1<<31 - 1
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ return sizeVarint(x)
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+func sizeFixed64(x uint64) int {
+ return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func sizeFixed32(x uint64) int {
+ return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63)))
+}
+
+func sizeZigzag64(x uint64) int {
+ return sizeVarint((x << 1) ^ uint64((int64(x) >> 63)))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+ return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+func sizeRawBytes(b []byte) int {
+ return sizeVarint(uint64(len(b))) +
+ len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+func sizeStringBytes(s string) int {
+ return sizeVarint(uint64(len(s))) +
+ len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ return m.Marshal()
+ }
+ p := NewBuffer(nil)
+ err := p.Marshal(pb)
+ if p.buf == nil && err == nil {
+ // Return a non-nil slice on success.
+ return []byte{}, nil
+ }
+ return p.buf, err
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ var state errorState
+ err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+ }
+ return err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ data, err := m.Marshal()
+ p.buf = append(p.buf, data...)
+ return err
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ err = p.enc_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ (stats).Encode++ // Parens are to work around a goimports bug.
+ }
+
+ if len(p.buf) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+ // Can the object marshal itself? If so, Size is slow.
+ // TODO: add Size to Marshaler, or add a Sizer interface.
+ if m, ok := pb.(Marshaler); ok {
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return 0
+ }
+ if err == nil {
+ n = size_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ (stats).Size++ // Parens are to work around a goimports bug.
+ }
+
+ return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := 0
+ if *v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, 1)
+ return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v && !p.oneof {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := word32_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := word32_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return ErrNil
+ }
+ x := word64_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return 0
+ }
+ x := word64_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := *v
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return 0
+ }
+ x := *v
+ n += len(p.tagcode)
+ n += sizeStringBytes(x)
+ return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return state.err
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return ErrNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ err := o.enc_struct(p.sprop, b)
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return 0
+ }
+
+ n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ n += size_struct(p.sprop, b)
+ n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ for _, x := range s {
+ o.buf = append(o.buf, p.tagcode...)
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+ for _, x := range s {
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(l))
+ n += l // each bool takes exactly one byte
+ return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(buf, uint64(x))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ bufSize += p.valSize(uint64(x))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := s.Index(i)
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := s.Index(i)
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, uint64(s.Index(i)))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(uint64(s.Index(i)))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, s.Index(i))
+ }
+ return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ n += p.valSize(s.Index(i))
+ }
+ return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, s.Index(i))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(s.Index(i))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return 0
+ }
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeRawBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeStringBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+ }
+ return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return errRepeatedHasNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+ err := o.enc_struct(p.sprop, b)
+
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ }
+ return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return // return size up to this point
+ }
+
+ n += size_struct(p.sprop, b)
+ }
+ return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+ exts := structPointer_ExtMap(base, p.field)
+ if err := encodeExtensionsMap(*exts); err != nil {
+ return err
+ }
+
+ return o.enc_map_body(*exts)
+}
+
+func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
+ exts := structPointer_Extensions(base, p.field)
+
+ v, mu := exts.extensionsRead()
+ if v == nil {
+ return nil
+ }
+
+ mu.Lock()
+ defer mu.Unlock()
+ if err := encodeExtensionsMap(v); err != nil {
+ return err
+ }
+
+ return o.enc_map_body(v)
+}
+
+func (o *Buffer) enc_map_body(v map[int32]Extension) error {
+ // Fast-path for common cases: zero or one extensions.
+ if len(v) <= 1 {
+ for _, e := range v {
+ o.buf = append(o.buf, e.enc...)
+ }
+ return nil
+ }
+
+ // Sort keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(v))
+ for k := range v {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ o.buf = append(o.buf, v[int32(k)].enc...)
+ }
+ return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+ v := structPointer_ExtMap(base, p.field)
+ return extensionsMapSize(*v)
+}
+
+func size_exts(p *Properties, base structPointer) int {
+ v := structPointer_Extensions(base, p.field)
+ return extensionsSize(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+ var state errorState // XXX: or do we need to plumb this through?
+
+ /*
+ A map defined as
+ map<key_type, value_type> map_field = N;
+ is encoded in the same way as
+ message MapFieldEntry {
+ key_type key = 1;
+ value_type value = 2;
+ }
+ repeated MapFieldEntry map_field = N;
+ */
+
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+ if v.Len() == 0 {
+ return nil
+ }
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ enc := func() error {
+ if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
+ return err
+ }
+ return nil
+ }
+
+ // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ o.buf = append(o.buf, p.tagcode...)
+ if err := o.enc_len_thing(enc, &state); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ n := 0
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ // Tag codes for key and val are the responsibility of the sub-sizer.
+ keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+ valsize := p.mvalprop.size(p.mvalprop, valbase)
+ entry := keysize + valsize
+ // Add on tag code and length of map entry itself.
+ n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+ }
+ return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+ keycopy = reflect.New(mapType.Key()).Elem() // addressable K
+ keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+ keyptr.Set(keycopy.Addr()) //
+ keybase = toStructPointer(keyptr.Addr()) // **K
+
+ // Value types are more varied and require special handling.
+ switch mapType.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+ valbase = toStructPointer(valcopy.Addr())
+ case reflect.Ptr:
+ // message; the generated field type is map[K]*Msg (so V is *Msg),
+ // so we only need one level of indirection.
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valbase = toStructPointer(valcopy.Addr())
+ default:
+ // everything else
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+ valptr.Set(valcopy.Addr()) //
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+ return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+ var state errorState
+ // Encode fields in tag order so that decoders may use optimizations
+ // that depend on the ordering.
+ // https://developers.google.com/protocol-buffers/docs/encoding#order
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.enc != nil {
+ err := p.enc(o, p, base)
+ if err != nil {
+ if err == ErrNil {
+ if p.Required && state.err == nil {
+ state.err = &RequiredNotSetError{p.Name}
+ }
+ } else if err == errRepeatedHasNil {
+ // Give more context to nil values in repeated fields.
+ return errors.New("repeated field " + p.OrigName + " has nil element")
+ } else if !state.shouldContinue(err, p) {
+ return err
+ }
+ }
+ if len(o.buf) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ }
+ }
+
+ // Do oneof fields.
+ if prop.oneofMarshaler != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ if err := prop.oneofMarshaler(m, o); err == ErrNil {
+ return errOneofHasNil
+ } else if err != nil {
+ return err
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ if len(o.buf)+len(v) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ if len(v) > 0 {
+ o.buf = append(o.buf, v...)
+ }
+ }
+
+ return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.size != nil {
+ n += p.size(p, base)
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ n += len(v)
+ }
+
+ // Factor in any oneof fields.
+ if prop.oneofSizer != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ n += prop.oneofSizer(m)
+ }
+
+ return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+ return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+ iLen := len(o.buf)
+ o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+ iMsg := len(o.buf)
+ err := enc()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ lMsg := len(o.buf) - iMsg
+ lLen := sizeVarint(uint64(lMsg))
+ switch x := lLen - (iMsg - iLen); {
+ case x > 0: // actual length is x bytes larger than the space we reserved
+ // Move msg x bytes right.
+ o.buf = append(o.buf, zeroes[:x]...)
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ case x < 0: // actual length is x bytes smaller than the space we reserved
+ // Move msg x bytes left.
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ o.buf = o.buf[:len(o.buf)+x] // x is negative
+ }
+ // Encode the length in the reserved space.
+ o.buf = o.buf[:iLen]
+ o.EncodeVarint(uint64(lMsg))
+ o.buf = o.buf[:len(o.buf)+lMsg]
+ return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+ err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+ // Ignore unset required fields.
+ reqNotSet, ok := err.(*RequiredNotSetError)
+ if !ok {
+ return false
+ }
+ if s.err == nil {
+ if prop != nil {
+ err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+ }
+ s.err = err
+ }
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 000000000..2ed1cf596
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN. If the message is defined
+ in a proto3 .proto file, fields are not "set"; specifically,
+ zero length proto3 "bytes" fields are equal (nil == {}).
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal. Note a "bytes" field,
+ although represented by []byte, is not a repeated field and the
+ rule for the scalar fields described above applies.
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Two map fields are equal iff their lengths are the same,
+ and they contain the same set of elements. Zero-length map
+ fields are equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ sprop := GetProperties(v1.Type())
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ b1, ok := f1.Interface().(raw)
+ if ok {
+ b2 := f2.Interface().(raw)
+ // RawMessage
+ if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+ return false
+ }
+ continue
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2, sprop.Prop[i]) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_InternalExtensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ if !bytes.Equal(u1, u2) {
+ return false
+ }
+
+ return true
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2, nil)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2, nil) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ // Maps may have nil values in them, so check for nil.
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return equalAny(v1.Elem(), v2.Elem(), prop)
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value.
+ if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i), prop) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+ em1, _ := x1.extensionsRead()
+ em2, _ := x2.extensionsRead()
+ return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ continue
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 000000000..eaad21831
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,587 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ extensionsWrite() map[int32]Extension
+ extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+ extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+ return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+ return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock() {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, bool) {
+ if ep, ok := p.(extendableProto); ok {
+ return ep, ok
+ }
+ if ep, ok := p.(extendableProtoV1); ok {
+ return extensionAdapter{ep}, ok
+ }
+ return nil, false
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+ // The struct must be indirect so that if a user inadvertently copies a
+ // generated message and its embedded XXX_InternalExtensions, they
+ // avoid the mayhem of a copied mutex.
+ //
+ // The mutex serializes all logically read-only operations to p.extensionMap.
+ // It is up to the client to ensure that write operations to p.extensionMap are
+ // mutually exclusive with other accesses.
+ p *struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+ if e.p == nil {
+ e.p = new(struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ })
+ e.p.extensionMap = make(map[int32]Extension)
+ }
+ return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use. It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+ if e.p == nil {
+ return nil, nil
+ }
+ return e.p.extensionMap, &e.p.mu
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+ Filename string // name of the file in which the extension is defined
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+ epb, ok := extendable(base)
+ if !ok {
+ return
+ }
+ extmap := epb.extensionsWrite()
+ extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ var pbi interface{} = pb
+ // Check the extended type.
+ if ea, ok := pbi.(extensionAdapter); ok {
+ pbi = ea.extendableProtoV1
+ }
+ if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+ return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensions(e *XXX_InternalExtensions) error {
+ m, mu := e.extensionsRead()
+ if m == nil {
+ return nil // fast path
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ return encodeExtensionsMap(m)
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensionsMap(m map[int32]Extension) error {
+ for k, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ p := NewBuffer(nil)
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ e.enc = p.buf
+ m[k] = e
+ }
+ return nil
+}
+
+func extensionsSize(e *XXX_InternalExtensions) (n int) {
+ m, mu := e.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ return extensionsMapSize(m)
+}
+
+func extensionsMapSize(m map[int32]Extension) (n int) {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ n += props.size(props, toStructPointer(x))
+ }
+ return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ epb, ok := extendable(pb)
+ if !ok {
+ return false
+ }
+ extmap, mu := epb.extensionsRead()
+ if extmap == nil {
+ return false
+ }
+ mu.Lock()
+ _, ok = extmap[extension.Field]
+ mu.Unlock()
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return
+ }
+ // TODO: Check types, field numbers, etc.?
+ extmap := epb.extensionsWrite()
+ delete(extmap, extension.Field)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present and has no default value it returns ErrMissingExtension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, errors.New("proto: not an extendable proto")
+ }
+
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return nil, err
+ }
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return defaultExtensionValue(extension)
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ o := NewBuffer(b)
+
+ t := reflect.TypeOf(extension.ExtensionType)
+
+ props := extensionProperties(extension)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate a "field" to store the pointer/slice itself; the
+ // pointer/slice will be stored here. We pass
+ // the address of this field to props.dec.
+ // This passes a zero field and a *t and lets props.dec
+ // interpret it as a *struct{ x t }.
+ value := reflect.New(t).Elem()
+
+ for {
+ // Discard wire type and field number varint. It isn't needed.
+ if _, err := o.DecodeVarint(); err != nil {
+ return nil, err
+ }
+
+ if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ return nil, err
+ }
+
+ if o.index >= len(o.buf) {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, errors.New("proto: not an extendable proto")
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
+ }
+ registeredExtensions := RegisteredExtensions(pb)
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return nil, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ extensions := make([]*ExtensionDesc, 0, len(emap))
+ for extid, e := range emap {
+ desc := e.desc
+ if desc == nil {
+ desc = registeredExtensions[extid]
+ if desc == nil {
+ desc = &ExtensionDesc{Field: extid}
+ }
+ }
+
+ extensions = append(extensions, desc)
+ }
+ return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+ epb, ok := extendable(pb)
+ if !ok {
+ return errors.New("proto: not an extendable proto")
+ }
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ extmap := epb.extensionsWrite()
+ extmap[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return
+ }
+ m := epb.extensionsWrite()
+ for k := range m {
+ delete(m, k)
+ }
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 000000000..1c225504a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,897 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // read point
+
+ // pools of basic types to amortize allocation.
+ bools []bool
+ uint32s []uint32
+ uint64s []uint64
+
+ // extra pools, only used with pointer_reflect.go
+ int32s []int32
+ int64s []int64
+ float32s []float32
+ float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{
+ vs: vs,
+ // default Less function: textual comparison
+ less: func(a, b reflect.Value) bool {
+ return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+ },
+ }
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+ // numeric keys are sorted numerically.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion2 = true
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion1 = true
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 000000000..fd982decd
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,311 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ if ms.find(pb) != nil {
+ return true
+ }
+ return false
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(exts interface{}) ([]byte, error) {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ if err := encodeExtensions(exts); err != nil {
+ return nil, err
+ }
+ m, _ = exts.extensionsRead()
+ case map[int32]Extension:
+ if err := encodeExtensionsMap(exts); err != nil {
+ return nil, err
+ }
+ m = exts
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+
+ // Sort extension IDs to provide a deterministic encoding.
+ // See also enc_map in encode.go.
+ ids := make([]int, 0, len(m))
+ for id := range m {
+ ids = append(ids, int(id))
+ }
+ sort.Ints(ids)
+
+ ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+ for _, id := range ids {
+ e := m[int32(id)]
+ // Remove the wire type and field number varint, as well as the length varint.
+ msg := skipVarint(skipVarint(e.enc))
+
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: Int32(int32(id)),
+ Message: msg,
+ })
+ }
+ return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m = exts.extensionsWrite()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return errors.New("proto: not an extension map")
+ }
+
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m, _ = exts.extensionsRead()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ if i > 0 {
+ b.WriteByte(',')
+ }
+
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 000000000..fb512e2e1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,484 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "math"
+ "reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+ v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+ return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+ // Special case: an extension map entry with a value of type T
+ // passes a *T to the struct-handling code with a zero field,
+ // expecting that it will be treated as equivalent to *struct{ X T },
+ // which has the same memory layout. We have to handle that case
+ // specially, because reflect will panic if we call FieldByIndex on a
+ // non-struct.
+ if f == nil {
+ return p.v.Elem()
+ }
+
+ return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+ return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return structPointer_ifield(p, f).(*[]string)
+}
+
+// Extensions returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+ return structPointer_ifield(p, f).(*XXX_InternalExtensions)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+ return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+ v reflect.Value
+}
+
+func (p structPointerSlice) Len() int { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+ p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ float32Type = reflect.TypeOf(float32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+ v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+ return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int32Type:
+ if len(o.int32s) == 0 {
+ o.int32s = make([]int32, uint32PoolSize)
+ }
+ o.int32s[0] = int32(x)
+ p.v.Set(reflect.ValueOf(&o.int32s[0]))
+ o.int32s = o.int32s[1:]
+ return
+ case uint32Type:
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+ o.uint32s = o.uint32s[1:]
+ return
+ case float32Type:
+ if len(o.float32s) == 0 {
+ o.float32s = make([]float32, uint32PoolSize)
+ }
+ o.float32s[0] = math.Float32frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float32s[0]))
+ o.float32s = o.float32s[1:]
+ return
+ }
+
+ // must be enum
+ p.v.Set(reflect.New(t))
+ p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+ v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ switch p.v.Type() {
+ case int32Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint32Type:
+ p.v.SetUint(uint64(x))
+ return
+ case float32Type:
+ p.v.SetFloat(float64(math.Float32frombits(x)))
+ return
+ }
+
+ // must be enum
+ p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+ v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int32:
+ elem.SetInt(int64(int32(x)))
+ case reflect.Uint32:
+ elem.SetUint(uint64(x))
+ case reflect.Float32:
+ elem.SetFloat(float64(math.Float32frombits(x)))
+ }
+}
+
+func (p word32Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+ return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+ v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int64Type:
+ if len(o.int64s) == 0 {
+ o.int64s = make([]int64, uint64PoolSize)
+ }
+ o.int64s[0] = int64(x)
+ p.v.Set(reflect.ValueOf(&o.int64s[0]))
+ o.int64s = o.int64s[1:]
+ return
+ case uint64Type:
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+ o.uint64s = o.uint64s[1:]
+ return
+ case float64Type:
+ if len(o.float64s) == 0 {
+ o.float64s = make([]float64, uint64PoolSize)
+ }
+ o.float64s[0] = math.Float64frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float64s[0]))
+ o.float64s = o.float64s[1:]
+ return
+ }
+ panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+ return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+ v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ switch p.v.Type() {
+ case int64Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint64Type:
+ p.v.SetUint(x)
+ return
+ case float64Type:
+ p.v.SetFloat(math.Float64frombits(x))
+ return
+ }
+ panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+ v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int64:
+ elem.SetInt(int64(int64(x)))
+ case reflect.Uint64:
+ elem.SetUint(uint64(x))
+ case reflect.Float64:
+ elem.SetFloat(float64(math.Float64frombits(x)))
+ }
+}
+
+func (p word64Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return uint64(elem.Uint())
+ case reflect.Float64:
+ return math.Float64bits(float64(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+ return word64Slice{structPointer_field(p, f)}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 000000000..6b5567d47
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,270 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+// type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+ return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+ return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+ return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+ return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ *p = &o.uint32s[0]
+ o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+ return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ *p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+ return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
+func (v *word32Slice) Len() int { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+ return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ *p = &o.uint64s[0]
+ o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+ return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+ return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ *p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
+func (v *word64Slice) Len() int { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+ return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 000000000..ec2289c00
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,872 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
+// A oneofSizer does the sizing for all oneof fields in a message.
+type oneofSizer func(Message) int
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+ unrecField field // field id of the XXX_unrecognized []byte field
+ extendable bool // is this an extendable proto
+
+ oneofMarshaler oneofMarshaler
+ oneofUnmarshaler oneofUnmarshaler
+ oneofSizer oneofSizer
+ stype reflect.Type
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ JSONName string // name to use for JSON; determined by protoc
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ def_uint64 uint64
+
+ enc encoder
+ valEnc valueEncoder // set for bool and numeric types only
+ field field
+ tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+ tagbuf [8]byte
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+ isMarshaler bool
+ isUnmarshaler bool
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+
+ size sizer
+ valSize valueSizer // set for bool and numeric types only
+
+ dec decoder
+ valDec valueDecoder // set for bool and numeric types only
+
+ // If this is a packable field, this will be the decoder for the packed version of the field.
+ packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s = ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != p.OrigName {
+ s += ",json=" + p.JSONName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeVarint
+ p.valDec = (*Buffer).DecodeVarint
+ p.valSize = sizeVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ p.valEnc = (*Buffer).EncodeFixed32
+ p.valDec = (*Buffer).DecodeFixed32
+ p.valSize = sizeFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ p.valEnc = (*Buffer).EncodeFixed64
+ p.valDec = (*Buffer).DecodeFixed64
+ p.valSize = sizeFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag32
+ p.valDec = (*Buffer).DecodeZigzag32
+ p.valSize = sizeZigzag32
+ case "zigzag64":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag64
+ p.valDec = (*Buffer).DecodeZigzag64
+ p.valSize = sizeZigzag64
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "json="):
+ p.JSONName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break
+ }
+ }
+ }
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+ fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ p.enc = nil
+ p.dec = nil
+ p.size = nil
+
+ switch t1 := typ; t1.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+ // proto3 scalar types
+
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_proto3_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_proto3_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_proto3_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_proto3_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_proto3_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_proto3_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_proto3_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_proto3_string
+
+ case reflect.Ptr:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+ break
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_bool
+ p.dec = (*Buffer).dec_bool
+ p.size = size_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_int32
+ p.dec = (*Buffer).dec_int32
+ p.size = size_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_uint32
+ p.dec = (*Buffer).dec_int32 // can reuse
+ p.size = size_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_int64
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_int32
+ p.size = size_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_string
+ p.dec = (*Buffer).dec_string
+ p.size = size_string
+ case reflect.Struct:
+ p.stype = t1.Elem()
+ p.isMarshaler = isMarshaler(t1)
+ p.isUnmarshaler = isUnmarshaler(t1)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_struct_message
+ p.dec = (*Buffer).dec_struct_message
+ p.size = size_struct_message
+ } else {
+ p.enc = (*Buffer).enc_struct_group
+ p.dec = (*Buffer).dec_struct_group
+ p.size = size_struct_group
+ }
+ }
+
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ case reflect.Bool:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_bool
+ p.size = size_slice_packed_bool
+ } else {
+ p.enc = (*Buffer).enc_slice_bool
+ p.size = size_slice_bool
+ }
+ p.dec = (*Buffer).dec_slice_bool
+ p.packedDec = (*Buffer).dec_slice_packed_bool
+ case reflect.Int32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int32
+ p.size = size_slice_packed_int32
+ } else {
+ p.enc = (*Buffer).enc_slice_int32
+ p.size = size_slice_int32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Uint32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Int64, reflect.Uint64:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ case reflect.Uint8:
+ p.dec = (*Buffer).dec_slice_byte
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_slice_byte
+ p.size = size_proto3_slice_byte
+ } else {
+ p.enc = (*Buffer).enc_slice_byte
+ p.size = size_slice_byte
+ }
+ case reflect.Float32, reflect.Float64:
+ switch t2.Bits() {
+ case 32:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case 64:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ }
+ case reflect.String:
+ p.enc = (*Buffer).enc_slice_string
+ p.dec = (*Buffer).dec_slice_string
+ p.size = size_slice_string
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+ break
+ case reflect.Struct:
+ p.stype = t2.Elem()
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_slice_struct_message
+ p.dec = (*Buffer).dec_slice_struct_message
+ p.size = size_slice_struct_message
+ } else {
+ p.enc = (*Buffer).enc_slice_struct_group
+ p.dec = (*Buffer).dec_slice_struct_group
+ p.size = size_slice_struct_group
+ }
+ }
+ case reflect.Slice:
+ switch t2.Elem().Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+ break
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_slice_byte
+ p.dec = (*Buffer).dec_slice_slice_byte
+ p.size = size_slice_slice_byte
+ }
+ }
+
+ case reflect.Map:
+ p.enc = (*Buffer).enc_new_map
+ p.dec = (*Buffer).dec_new_map
+ p.size = size_new_map
+
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ // precalculate tag code
+ wire := p.WireType
+ if p.Packed {
+ wire = WireBytes
+ }
+ x := uint32(p.Tag)<<3 | uint32(wire)
+ i := 0
+ for i = 0; x > 127; i++ {
+ p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ p.tagbuf[i] = uint8(x)
+ p.tagcode = p.tagbuf[0 : i+1]
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isMarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isMarshaler")
+ }
+ return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isUnmarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isUnmarshaler")
+ }
+ return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if f != nil {
+ p.field = toField(f)
+ }
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
+ reflect.PtrTo(t).Implements(extendableProtoV1Type)
+ prop.unrecField = invalidField
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ if f.Name == "XXX_InternalExtensions" { // special case
+ p.enc = (*Buffer).enc_exts
+ p.dec = nil // not needed
+ p.size = size_exts
+ } else if f.Name == "XXX_extensions" { // special case
+ p.enc = (*Buffer).enc_map
+ p.dec = nil // not needed
+ p.size = size_map
+ } else if f.Name == "XXX_unrecognized" { // special case
+ prop.unrecField = toField(&f)
+ }
+ oneof := f.Tag.Get("protobuf_oneof") // special case
+ if oneof != "" {
+ // Oneof fields don't use the traditional protobuf tag.
+ p.OrigName = oneof
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
+ fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ var oots []interface{}
+ prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
+ prop.stype = t
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+ if len(x) != 1 {
+ fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+ return nil
+ }
+ prop := GetProperties(t)
+ return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+ if pb == nil {
+ err = ErrNil
+ return
+ }
+ // get the reflect type of the pointer to the struct.
+ t = reflect.TypeOf(pb)
+ // get the address of the struct.
+ value := reflect.ValueOf(pb)
+ b = toStructPointer(value)
+ return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypes = make(map[string]reflect.Type)
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypes[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+ type xname interface {
+ XXX_MessageName() string
+ }
+ if m, ok := x.(xname); ok {
+ return m.XXX_MessageName()
+ }
+ return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+func MessageType(name string) reflect.Type { return protoTypes[name] }
+
+// A registry of all linked proto files.
+var (
+ protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+ protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 000000000..965876bf0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,854 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ gtNewline = []byte(">\n")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Print("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+ Bytes() []byte
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+ type wkt interface {
+ XXX_WellKnownType() string
+ }
+ t, ok := sv.Addr().Interface().(wkt)
+ return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+ turl := sv.FieldByName("TypeUrl")
+ val := sv.FieldByName("Value")
+ if !turl.IsValid() || !val.IsValid() {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ b, ok := val.Interface().([]byte)
+ if !ok {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ parts := strings.Split(turl.String(), "/")
+ mt := MessageType(parts[len(parts)-1])
+ if mt == nil {
+ return false, nil
+ }
+ m := reflect.New(mt.Elem())
+ if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ u := turl.String()
+ if requiresQuotes(u) {
+ writeString(w, u)
+ } else {
+ w.Write([]byte(u))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.ind++
+ }
+ if err := tm.writeStruct(w, m.Elem()); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.ind--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+ if tm.ExpandAny && isAny(sv) {
+ if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+ return err
+ }
+ }
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("<nil>\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := tm.writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if b, ok := fv.Interface().(raw); ok {
+ if err := writeRaw(w, b.Bytes()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := tm.writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if _, ok := extendable(pv.Interface()); ok {
+ if err := tm.writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, b); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Bytes())); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else if err := tm.writeStruct(w, v); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep, _ := extendable(pv.Interface())
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m, mu := ep.extensionsRead()
+ if m == nil {
+ return nil
+ }
+ mu.Lock()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+ mu.Unlock()
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line).
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte("<nil>"))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: tm.Compact,
+ }
+
+ if etm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := tm.writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+ var buf bytes.Buffer
+ tm.Marshal(&buf, pb)
+ return buf.String()
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 000000000..5e14513f2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,895 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+ errBadHex = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ base := 8
+ ss := s[:2]
+ s = s[2:]
+ if r == 'x' || r == 'X' {
+ base = 16
+ } else {
+ ss = string(r) + ss
+ }
+ i, err := strconv.ParseUint(ss, base, 8)
+ if err != nil {
+ return "", "", err
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'u', 'U':
+ n := 4
+ if r == 'U' {
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+ }
+
+ bs := make([]byte, n/2)
+ for i := 0; i < n; i += 2 {
+ a, ok1 := unhex(s[i])
+ b, ok2 := unhex(s[i+1])
+ if !ok1 || !ok2 {
+ return "", "", errBadHex
+ }
+ bs[i/2] = a<<4 | b
+ }
+ s = s[n:]
+ return string(bs), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+ switch {
+ case '0' <= b && b <= '9':
+ return b - '0', true
+ case 'a' <= b && b <= 'f':
+ return b - 'a' + 10, true
+ case 'A' <= b && b <= 'F':
+ return b - 'A' + 10, true
+ }
+ return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension or an Any.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ extName, err := p.consumeExtName()
+ if err != nil {
+ return err
+ }
+
+ if s := strings.LastIndex(extName, "/"); s >= 0 {
+ // If it contains a slash, it's an Any type URL.
+ messageName := extName[s+1:]
+ mt := MessageType(messageName)
+ if mt == nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+ }
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ v := reflect.New(mt.Elem())
+ if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+ return pe
+ }
+ b, err := Marshal(v.Interface().(Message))
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+ }
+ if fieldSet["type_url"] {
+ return p.errorf(anyRepeatedlyUnpacked, "type_url")
+ }
+ if fieldSet["value"] {
+ return p.errorf(anyRepeatedlyUnpacked, "value")
+ }
+ sv.FieldByName("TypeUrl").SetString(extName)
+ sv.FieldByName("Value").SetBytes(b)
+ fieldSet["type_url"] = true
+ fieldSet["value"] = true
+ continue
+ }
+
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == extName {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", extName)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(Message)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ field := sv.Field(oop.Field)
+ if !field.IsNil() {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+ }
+ field.Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order. See b/28924776 for a time
+ // this went wrong.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ case "value":
+ if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ default:
+ p.back()
+ return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // true/1/t/True or false/f/0/False.
+ switch tok.value {
+ case "true", "1", "t", "True":
+ fv.SetBool(true)
+ return nil
+ case "false", "0", "f", "False":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ err := um.UnmarshalText([]byte(s))
+ return err
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+ return pe
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/gofuzz/LICENSE b/vendor/github.com/google/gofuzz/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md
new file mode 100644
index 000000000..64869af34
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/README.md
@@ -0,0 +1,71 @@
+gofuzz
+======
+
+gofuzz is a library for populating go objects with random values.
+
+[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.png)](https://godoc.org/github.com/google/gofuzz)
+[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz)
+
+This is useful for testing:
+
+* Do your project's objects really serialize/unserialize correctly in all cases?
+* Is there an incorrectly formatted object that will cause your project to panic?
+
+Import with ```import "github.com/google/gofuzz"```
+
+You can use it on single variables:
+```go
+f := fuzz.New()
+var myInt int
+f.Fuzz(&myInt) // myInt gets a random value.
+```
+
+You can use it on maps:
+```go
+f := fuzz.New().NilChance(0).NumElements(1, 1)
+var myMap map[ComplexKeyType]string
+f.Fuzz(&myMap) // myMap will have exactly one element.
+```
+
+Customize the chance of getting a nil pointer:
+```go
+f := fuzz.New().NilChance(.5)
+var fancyStruct struct {
+ A, B, C, D *string
+}
+f.Fuzz(&fancyStruct) // About half the pointers should be set.
+```
+
+You can even customize the randomization completely if needed:
+```go
+type MyEnum string
+const (
+ A MyEnum = "A"
+ B MyEnum = "B"
+)
+type MyInfo struct {
+ Type MyEnum
+ AInfo *string
+ BInfo *string
+}
+
+f := fuzz.New().NilChance(0).Funcs(
+ func(e *MyInfo, c fuzz.Continue) {
+ switch c.Intn(2) {
+ case 0:
+ e.Type = A
+ c.Fuzz(&e.AInfo)
+ case 1:
+ e.Type = B
+ c.Fuzz(&e.BInfo)
+ }
+ },
+)
+
+var myObject MyInfo
+f.Fuzz(&myObject) // Type will correspond to whether A or B info is set.
+```
+
+See more examples in ```example_test.go```.
+
+Happy testing!
diff --git a/vendor/github.com/google/gofuzz/doc.go b/vendor/github.com/google/gofuzz/doc.go
new file mode 100644
index 000000000..9f9956d4a
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package fuzz is a library for populating go objects with random values.
+package fuzz
diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go
new file mode 100644
index 000000000..4f888fbc8
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/fuzz.go
@@ -0,0 +1,453 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fuzz
+
+import (
+ "fmt"
+ "math/rand"
+ "reflect"
+ "time"
+)
+
+// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type.
+type fuzzFuncMap map[reflect.Type]reflect.Value
+
+// Fuzzer knows how to fill any object with random fields.
+type Fuzzer struct {
+ fuzzFuncs fuzzFuncMap
+ defaultFuzzFuncs fuzzFuncMap
+ r *rand.Rand
+ nilChance float64
+ minElements int
+ maxElements int
+}
+
+// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs,
+// RandSource, NilChance, or NumElements in any order.
+func New() *Fuzzer {
+ f := &Fuzzer{
+ defaultFuzzFuncs: fuzzFuncMap{
+ reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime),
+ },
+
+ fuzzFuncs: fuzzFuncMap{},
+ r: rand.New(rand.NewSource(time.Now().UnixNano())),
+ nilChance: .2,
+ minElements: 1,
+ maxElements: 10,
+ }
+ return f
+}
+
+// Funcs adds each entry in fuzzFuncs as a custom fuzzing function.
+//
+// Each entry in fuzzFuncs must be a function taking two parameters.
+// The first parameter must be a pointer or map. It is the variable that
+// function will fill with random data. The second parameter must be a
+// fuzz.Continue, which will provide a source of randomness and a way
+// to automatically continue fuzzing smaller pieces of the first parameter.
+//
+// These functions are called sensibly, e.g., if you wanted custom string
+// fuzzing, the function `func(s *string, c fuzz.Continue)` would get
+// called and passed the address of strings. Maps and pointers will always
+// be made/new'd for you, ignoring the NilChange option. For slices, it
+// doesn't make much sense to pre-create them--Fuzzer doesn't know how
+// long you want your slice--so take a pointer to a slice, and make it
+// yourself. (If you don't want your map/pointer type pre-made, take a
+// pointer to it, and make it yourself.) See the examples for a range of
+// custom functions.
+func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer {
+ for i := range fuzzFuncs {
+ v := reflect.ValueOf(fuzzFuncs[i])
+ if v.Kind() != reflect.Func {
+ panic("Need only funcs!")
+ }
+ t := v.Type()
+ if t.NumIn() != 2 || t.NumOut() != 0 {
+ panic("Need 2 in and 0 out params!")
+ }
+ argT := t.In(0)
+ switch argT.Kind() {
+ case reflect.Ptr, reflect.Map:
+ default:
+ panic("fuzzFunc must take pointer or map type")
+ }
+ if t.In(1) != reflect.TypeOf(Continue{}) {
+ panic("fuzzFunc's second parameter must be type fuzz.Continue")
+ }
+ f.fuzzFuncs[argT] = v
+ }
+ return f
+}
+
+// RandSource causes f to get values from the given source of randomness.
+// Use if you want deterministic fuzzing.
+func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer {
+ f.r = rand.New(s)
+ return f
+}
+
+// NilChance sets the probability of creating a nil pointer, map, or slice to
+// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive.
+func (f *Fuzzer) NilChance(p float64) *Fuzzer {
+ if p < 0 || p > 1 {
+ panic("p should be between 0 and 1, inclusive.")
+ }
+ f.nilChance = p
+ return f
+}
+
+// NumElements sets the minimum and maximum number of elements that will be
+// added to a non-nil map or slice.
+func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer {
+ if atLeast > atMost {
+ panic("atLeast must be <= atMost")
+ }
+ if atLeast < 0 {
+ panic("atLeast must be >= 0")
+ }
+ f.minElements = atLeast
+ f.maxElements = atMost
+ return f
+}
+
+func (f *Fuzzer) genElementCount() int {
+ if f.minElements == f.maxElements {
+ return f.minElements
+ }
+ return f.minElements + f.r.Intn(f.maxElements-f.minElements+1)
+}
+
+func (f *Fuzzer) genShouldFill() bool {
+ return f.r.Float64() > f.nilChance
+}
+
+// Fuzz recursively fills all of obj's fields with something random. First
+// this tries to find a custom fuzz function (see Funcs). If there is no
+// custom function this tests whether the object implements fuzz.Interface and,
+// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if
+// there is a default fuzz function provided by this package. If all of that
+// fails, this will generate random values for all primitive fields and then
+// recurse for all non-primitives.
+//
+// Not safe for cyclic or tree-like structs!
+//
+// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
+// Intended for tests, so will panic on bad input or unimplemented fields.
+func (f *Fuzzer) Fuzz(obj interface{}) {
+ v := reflect.ValueOf(obj)
+ if v.Kind() != reflect.Ptr {
+ panic("needed ptr!")
+ }
+ v = v.Elem()
+ f.doFuzz(v, 0)
+}
+
+// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for
+// obj's type will not be called and obj will not be tested for fuzz.Interface
+// conformance. This applies only to obj and not other instances of obj's
+// type.
+// Not safe for cyclic or tree-like structs!
+// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
+// Intended for tests, so will panic on bad input or unimplemented fields.
+func (f *Fuzzer) FuzzNoCustom(obj interface{}) {
+ v := reflect.ValueOf(obj)
+ if v.Kind() != reflect.Ptr {
+ panic("needed ptr!")
+ }
+ v = v.Elem()
+ f.doFuzz(v, flagNoCustomFuzz)
+}
+
+const (
+ // Do not try to find a custom fuzz function. Does not apply recursively.
+ flagNoCustomFuzz uint64 = 1 << iota
+)
+
+func (f *Fuzzer) doFuzz(v reflect.Value, flags uint64) {
+ if !v.CanSet() {
+ return
+ }
+
+ if flags&flagNoCustomFuzz == 0 {
+ // Check for both pointer and non-pointer custom functions.
+ if v.CanAddr() && f.tryCustom(v.Addr()) {
+ return
+ }
+ if f.tryCustom(v) {
+ return
+ }
+ }
+
+ if fn, ok := fillFuncMap[v.Kind()]; ok {
+ fn(v, f.r)
+ return
+ }
+ switch v.Kind() {
+ case reflect.Map:
+ if f.genShouldFill() {
+ v.Set(reflect.MakeMap(v.Type()))
+ n := f.genElementCount()
+ for i := 0; i < n; i++ {
+ key := reflect.New(v.Type().Key()).Elem()
+ f.doFuzz(key, 0)
+ val := reflect.New(v.Type().Elem()).Elem()
+ f.doFuzz(val, 0)
+ v.SetMapIndex(key, val)
+ }
+ return
+ }
+ v.Set(reflect.Zero(v.Type()))
+ case reflect.Ptr:
+ if f.genShouldFill() {
+ v.Set(reflect.New(v.Type().Elem()))
+ f.doFuzz(v.Elem(), 0)
+ return
+ }
+ v.Set(reflect.Zero(v.Type()))
+ case reflect.Slice:
+ if f.genShouldFill() {
+ n := f.genElementCount()
+ v.Set(reflect.MakeSlice(v.Type(), n, n))
+ for i := 0; i < n; i++ {
+ f.doFuzz(v.Index(i), 0)
+ }
+ return
+ }
+ v.Set(reflect.Zero(v.Type()))
+ case reflect.Array:
+ if f.genShouldFill() {
+ n := v.Len()
+ for i := 0; i < n; i++ {
+ f.doFuzz(v.Index(i), 0)
+ }
+ return
+ }
+ v.Set(reflect.Zero(v.Type()))
+ case reflect.Struct:
+ for i := 0; i < v.NumField(); i++ {
+ f.doFuzz(v.Field(i), 0)
+ }
+ case reflect.Chan:
+ fallthrough
+ case reflect.Func:
+ fallthrough
+ case reflect.Interface:
+ fallthrough
+ default:
+ panic(fmt.Sprintf("Can't handle %#v", v.Interface()))
+ }
+}
+
+// tryCustom searches for custom handlers, and returns true iff it finds a match
+// and successfully randomizes v.
+func (f *Fuzzer) tryCustom(v reflect.Value) bool {
+ // First: see if we have a fuzz function for it.
+ doCustom, ok := f.fuzzFuncs[v.Type()]
+ if !ok {
+ // Second: see if it can fuzz itself.
+ if v.CanInterface() {
+ intf := v.Interface()
+ if fuzzable, ok := intf.(Interface); ok {
+ fuzzable.Fuzz(Continue{f: f, Rand: f.r})
+ return true
+ }
+ }
+ // Finally: see if there is a default fuzz function.
+ doCustom, ok = f.defaultFuzzFuncs[v.Type()]
+ if !ok {
+ return false
+ }
+ }
+
+ switch v.Kind() {
+ case reflect.Ptr:
+ if v.IsNil() {
+ if !v.CanSet() {
+ return false
+ }
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ case reflect.Map:
+ if v.IsNil() {
+ if !v.CanSet() {
+ return false
+ }
+ v.Set(reflect.MakeMap(v.Type()))
+ }
+ default:
+ return false
+ }
+
+ doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
+ f: f,
+ Rand: f.r,
+ })})
+ return true
+}
+
+// Interface represents an object that knows how to fuzz itself. Any time we
+// find a type that implements this interface we will delegate the act of
+// fuzzing itself.
+type Interface interface {
+ Fuzz(c Continue)
+}
+
+// Continue can be passed to custom fuzzing functions to allow them to use
+// the correct source of randomness and to continue fuzzing their members.
+type Continue struct {
+ f *Fuzzer
+
+ // For convenience, Continue implements rand.Rand via embedding.
+ // Use this for generating any randomness if you want your fuzzing
+ // to be repeatable for a given seed.
+ *rand.Rand
+}
+
+// Fuzz continues fuzzing obj. obj must be a pointer.
+func (c Continue) Fuzz(obj interface{}) {
+ v := reflect.ValueOf(obj)
+ if v.Kind() != reflect.Ptr {
+ panic("needed ptr!")
+ }
+ v = v.Elem()
+ c.f.doFuzz(v, 0)
+}
+
+// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for
+// obj's type will not be called and obj will not be tested for fuzz.Interface
+// conformance. This applies only to obj and not other instances of obj's
+// type.
+func (c Continue) FuzzNoCustom(obj interface{}) {
+ v := reflect.ValueOf(obj)
+ if v.Kind() != reflect.Ptr {
+ panic("needed ptr!")
+ }
+ v = v.Elem()
+ c.f.doFuzz(v, flagNoCustomFuzz)
+}
+
+// RandString makes a random string up to 20 characters long. The returned string
+// may include a variety of (valid) UTF-8 encodings.
+func (c Continue) RandString() string {
+ return randString(c.Rand)
+}
+
+// RandUint64 makes random 64 bit numbers.
+// Weirdly, rand doesn't have a function that gives you 64 random bits.
+func (c Continue) RandUint64() uint64 {
+ return randUint64(c.Rand)
+}
+
+// RandBool returns true or false randomly.
+func (c Continue) RandBool() bool {
+ return randBool(c.Rand)
+}
+
+func fuzzInt(v reflect.Value, r *rand.Rand) {
+ v.SetInt(int64(randUint64(r)))
+}
+
+func fuzzUint(v reflect.Value, r *rand.Rand) {
+ v.SetUint(randUint64(r))
+}
+
+func fuzzTime(t *time.Time, c Continue) {
+ var sec, nsec int64
+ // Allow for about 1000 years of random time values, which keeps things
+ // like JSON parsing reasonably happy.
+ sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60)
+ c.Fuzz(&nsec)
+ *t = time.Unix(sec, nsec)
+}
+
+var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
+ reflect.Bool: func(v reflect.Value, r *rand.Rand) {
+ v.SetBool(randBool(r))
+ },
+ reflect.Int: fuzzInt,
+ reflect.Int8: fuzzInt,
+ reflect.Int16: fuzzInt,
+ reflect.Int32: fuzzInt,
+ reflect.Int64: fuzzInt,
+ reflect.Uint: fuzzUint,
+ reflect.Uint8: fuzzUint,
+ reflect.Uint16: fuzzUint,
+ reflect.Uint32: fuzzUint,
+ reflect.Uint64: fuzzUint,
+ reflect.Uintptr: fuzzUint,
+ reflect.Float32: func(v reflect.Value, r *rand.Rand) {
+ v.SetFloat(float64(r.Float32()))
+ },
+ reflect.Float64: func(v reflect.Value, r *rand.Rand) {
+ v.SetFloat(r.Float64())
+ },
+ reflect.Complex64: func(v reflect.Value, r *rand.Rand) {
+ panic("unimplemented")
+ },
+ reflect.Complex128: func(v reflect.Value, r *rand.Rand) {
+ panic("unimplemented")
+ },
+ reflect.String: func(v reflect.Value, r *rand.Rand) {
+ v.SetString(randString(r))
+ },
+ reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) {
+ panic("unimplemented")
+ },
+}
+
+// randBool returns true or false randomly.
+func randBool(r *rand.Rand) bool {
+ if r.Int()&1 == 1 {
+ return true
+ }
+ return false
+}
+
+type charRange struct {
+ first, last rune
+}
+
+// choose returns a random unicode character from the given range, using the
+// given randomness source.
+func (r *charRange) choose(rand *rand.Rand) rune {
+ count := int64(r.last - r.first)
+ return r.first + rune(rand.Int63n(count))
+}
+
+var unicodeRanges = []charRange{
+ {' ', '~'}, // ASCII characters
+ {'\u00a0', '\u02af'}, // Multi-byte encoded characters
+ {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
+}
+
+// randString makes a random string up to 20 characters long. The returned string
+// may include a variety of (valid) UTF-8 encodings.
+func randString(r *rand.Rand) string {
+ n := r.Intn(20)
+ runes := make([]rune, n)
+ for i := range runes {
+ runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r)
+ }
+ return string(runes)
+}
+
+// randUint64 makes random 64 bit numbers.
+// Weirdly, rand doesn't have a function that gives you 64 random bits.
+func randUint64(r *rand.Rand) uint64 {
+ return uint64(r.Uint32())<<32 | uint64(r.Uint32())
+}
diff --git a/vendor/github.com/gorilla/context/LICENSE b/vendor/github.com/gorilla/context/LICENSE
new file mode 100644
index 000000000..0e5fb8728
--- /dev/null
+++ b/vendor/github.com/gorilla/context/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md
new file mode 100644
index 000000000..c60a31b05
--- /dev/null
+++ b/vendor/github.com/gorilla/context/README.md
@@ -0,0 +1,7 @@
+context
+=======
+[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context)
+
+gorilla/context is a general purpose registry for global request variables.
+
+Read the full documentation here: http://www.gorillatoolkit.org/pkg/context
diff --git a/vendor/github.com/gorilla/context/context.go b/vendor/github.com/gorilla/context/context.go
new file mode 100644
index 000000000..81cb128b1
--- /dev/null
+++ b/vendor/github.com/gorilla/context/context.go
@@ -0,0 +1,143 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "net/http"
+ "sync"
+ "time"
+)
+
+var (
+ mutex sync.RWMutex
+ data = make(map[*http.Request]map[interface{}]interface{})
+ datat = make(map[*http.Request]int64)
+)
+
+// Set stores a value for a given key in a given request.
+func Set(r *http.Request, key, val interface{}) {
+ mutex.Lock()
+ if data[r] == nil {
+ data[r] = make(map[interface{}]interface{})
+ datat[r] = time.Now().Unix()
+ }
+ data[r][key] = val
+ mutex.Unlock()
+}
+
+// Get returns a value stored for a given key in a given request.
+func Get(r *http.Request, key interface{}) interface{} {
+ mutex.RLock()
+ if ctx := data[r]; ctx != nil {
+ value := ctx[key]
+ mutex.RUnlock()
+ return value
+ }
+ mutex.RUnlock()
+ return nil
+}
+
+// GetOk returns stored value and presence state like multi-value return of map access.
+func GetOk(r *http.Request, key interface{}) (interface{}, bool) {
+ mutex.RLock()
+ if _, ok := data[r]; ok {
+ value, ok := data[r][key]
+ mutex.RUnlock()
+ return value, ok
+ }
+ mutex.RUnlock()
+ return nil, false
+}
+
+// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests.
+func GetAll(r *http.Request) map[interface{}]interface{} {
+ mutex.RLock()
+ if context, ok := data[r]; ok {
+ result := make(map[interface{}]interface{}, len(context))
+ for k, v := range context {
+ result[k] = v
+ }
+ mutex.RUnlock()
+ return result
+ }
+ mutex.RUnlock()
+ return nil
+}
+
+// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if
+// the request was registered.
+func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) {
+ mutex.RLock()
+ context, ok := data[r]
+ result := make(map[interface{}]interface{}, len(context))
+ for k, v := range context {
+ result[k] = v
+ }
+ mutex.RUnlock()
+ return result, ok
+}
+
+// Delete removes a value stored for a given key in a given request.
+func Delete(r *http.Request, key interface{}) {
+ mutex.Lock()
+ if data[r] != nil {
+ delete(data[r], key)
+ }
+ mutex.Unlock()
+}
+
+// Clear removes all values stored for a given request.
+//
+// This is usually called by a handler wrapper to clean up request
+// variables at the end of a request lifetime. See ClearHandler().
+func Clear(r *http.Request) {
+ mutex.Lock()
+ clear(r)
+ mutex.Unlock()
+}
+
+// clear is Clear without the lock.
+func clear(r *http.Request) {
+ delete(data, r)
+ delete(datat, r)
+}
+
+// Purge removes request data stored for longer than maxAge, in seconds.
+// It returns the amount of requests removed.
+//
+// If maxAge <= 0, all request data is removed.
+//
+// This is only used for sanity check: in case context cleaning was not
+// properly set some request data can be kept forever, consuming an increasing
+// amount of memory. In case this is detected, Purge() must be called
+// periodically until the problem is fixed.
+func Purge(maxAge int) int {
+ mutex.Lock()
+ count := 0
+ if maxAge <= 0 {
+ count = len(data)
+ data = make(map[*http.Request]map[interface{}]interface{})
+ datat = make(map[*http.Request]int64)
+ } else {
+ min := time.Now().Unix() - int64(maxAge)
+ for r := range data {
+ if datat[r] < min {
+ clear(r)
+ count++
+ }
+ }
+ }
+ mutex.Unlock()
+ return count
+}
+
+// ClearHandler wraps an http.Handler and clears request values at the end
+// of a request lifetime.
+func ClearHandler(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ defer Clear(r)
+ h.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go
new file mode 100644
index 000000000..73c740031
--- /dev/null
+++ b/vendor/github.com/gorilla/context/doc.go
@@ -0,0 +1,82 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package context stores values shared during a request lifetime.
+
+For example, a router can set variables extracted from the URL and later
+application handlers can access those values, or it can be used to store
+sessions values to be saved at the end of a request. There are several
+others common uses.
+
+The idea was posted by Brad Fitzpatrick to the go-nuts mailing list:
+
+ http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53
+
+Here's the basic usage: first define the keys that you will need. The key
+type is interface{} so a key can be of any type that supports equality.
+Here we define a key using a custom int type to avoid name collisions:
+
+ package foo
+
+ import (
+ "github.com/gorilla/context"
+ )
+
+ type key int
+
+ const MyKey key = 0
+
+Then set a variable. Variables are bound to an http.Request object, so you
+need a request instance to set a value:
+
+ context.Set(r, MyKey, "bar")
+
+The application can later access the variable using the same key you provided:
+
+ func MyHandler(w http.ResponseWriter, r *http.Request) {
+ // val is "bar".
+ val := context.Get(r, foo.MyKey)
+
+ // returns ("bar", true)
+ val, ok := context.GetOk(r, foo.MyKey)
+ // ...
+ }
+
+And that's all about the basic usage. We discuss some other ideas below.
+
+Any type can be stored in the context. To enforce a given type, make the key
+private and wrap Get() and Set() to accept and return values of a specific
+type:
+
+ type key int
+
+ const mykey key = 0
+
+ // GetMyKey returns a value for this package from the request values.
+ func GetMyKey(r *http.Request) SomeType {
+ if rv := context.Get(r, mykey); rv != nil {
+ return rv.(SomeType)
+ }
+ return nil
+ }
+
+ // SetMyKey sets a value for this package in the request values.
+ func SetMyKey(r *http.Request, val SomeType) {
+ context.Set(r, mykey, val)
+ }
+
+Variables must be cleared at the end of a request, to remove all values
+that were stored. This can be done in an http.Handler, after a request was
+served. Just call Clear() passing the request:
+
+ context.Clear(r)
+
+...or use ClearHandler(), which conveniently wraps an http.Handler to clear
+variables at the end of a request lifetime.
+
+The Routers from the packages gorilla/mux and gorilla/pat call Clear()
+so if you are using either of them you don't need to clear the context manually.
+*/
+package context
diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE
new file mode 100644
index 000000000..0e5fb8728
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md
new file mode 100644
index 000000000..94d396ca4
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/README.md
@@ -0,0 +1,339 @@
+gorilla/mux
+===
+[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
+[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux)
+
+![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png)
+
+http://www.gorillatoolkit.org/pkg/mux
+
+Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
+their respective handler.
+
+The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
+
+* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
+* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
+* URL hosts and paths can have variables with an optional regular expression.
+* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
+* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
+
+---
+
+* [Install](#install)
+* [Examples](#examples)
+* [Matching Routes](#matching-routes)
+* [Listing Routes](#listing-routes)
+* [Static Files](#static-files)
+* [Registered URLs](#registered-urls)
+* [Full Example](#full-example)
+
+---
+
+## Install
+
+With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain:
+
+```sh
+go get -u github.com/gorilla/mux
+```
+
+## Examples
+
+Let's start registering a couple of URL paths and handlers:
+
+```go
+func main() {
+ r := mux.NewRouter()
+ r.HandleFunc("/", HomeHandler)
+ r.HandleFunc("/products", ProductsHandler)
+ r.HandleFunc("/articles", ArticlesHandler)
+ http.Handle("/", r)
+}
+```
+
+Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters.
+
+Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/products/{key}", ProductHandler)
+r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
+
+```go
+func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
+ vars := mux.Vars(r)
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprintf(w, "Category: %v\n", vars["category"])
+}
+```
+
+And this is all you need to know about the basic usage. More advanced options are explained below.
+
+### Matching Routes
+
+Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables:
+
+```go
+r := mux.NewRouter()
+// Only matches if domain is "www.example.com".
+r.Host("www.example.com")
+// Matches a dynamic subdomain.
+r.Host("{subdomain:[a-z]+}.domain.com")
+```
+
+There are several other matchers that can be added. To match path prefixes:
+
+```go
+r.PathPrefix("/products/")
+```
+
+...or HTTP methods:
+
+```go
+r.Methods("GET", "POST")
+```
+
+...or URL schemes:
+
+```go
+r.Schemes("https")
+```
+
+...or header values:
+
+```go
+r.Headers("X-Requested-With", "XMLHttpRequest")
+```
+
+...or query values:
+
+```go
+r.Queries("key", "value")
+```
+
+...or to use a custom matcher function:
+
+```go
+r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+ return r.ProtoMajor == 0
+})
+```
+
+...and finally, it is possible to combine several matchers in a single route:
+
+```go
+r.HandleFunc("/products", ProductsHandler).
+ Host("www.example.com").
+ Methods("GET").
+ Schemes("http")
+```
+
+Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
+
+```go
+r := mux.NewRouter()
+s := r.Host("www.example.com").Subrouter()
+```
+
+Then register routes in the subrouter:
+
+```go
+s.HandleFunc("/products/", ProductsHandler)
+s.HandleFunc("/products/{key}", ProductHandler)
+s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths:
+
+```go
+r := mux.NewRouter()
+s := r.PathPrefix("/products").Subrouter()
+// "/products/"
+s.HandleFunc("/", ProductsHandler)
+// "/products/{key}/"
+s.HandleFunc("/{key}/", ProductHandler)
+// "/products/{key}/details"
+s.HandleFunc("/{key}/details", ProductDetailsHandler)
+```
+
+### Listing Routes
+
+Routes on a mux can be listed using the Router.Walk method—useful for generating documentation:
+
+```go
+package main
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/gorilla/mux"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+ return
+}
+
+func main() {
+ r := mux.NewRouter()
+ r.HandleFunc("/", handler)
+ r.HandleFunc("/products", handler)
+ r.HandleFunc("/articles", handler)
+ r.HandleFunc("/articles/{id}", handler)
+ r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
+ t, err := route.GetPathTemplate()
+ if err != nil {
+ return err
+ }
+ fmt.Println(t)
+ return nil
+ })
+ http.Handle("/", r)
+}
+```
+
+### Static Files
+
+Note that the path provided to `PathPrefix()` represents a "wildcard": calling
+`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
+request that matches "/static/*". This makes it easy to serve static files with mux:
+
+```go
+func main() {
+ var dir string
+
+ flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+ flag.Parse()
+ r := mux.NewRouter()
+
+ // This will serve files under http://localhost:8000/static/<filename>
+ r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+ srv := &http.Server{
+ Handler: r,
+ Addr: "127.0.0.1:8000",
+ // Good practice: enforce timeouts for servers you create!
+ WriteTimeout: 15 * time.Second,
+ ReadTimeout: 15 * time.Second,
+ }
+
+ log.Fatal(srv.ListenAndServe())
+}
+```
+
+### Registered URLs
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+ Name("article")
+```
+
+To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do:
+
+```go
+url, err := r.Get("article").URL("category", "technology", "id", "42")
+```
+
+...and the result will be a `url.URL` with the following path:
+
+```
+"/articles/technology/42"
+```
+
+This also works for host variables:
+
+```go
+r := mux.NewRouter()
+r.Host("{subdomain}.domain.com").
+ Path("/articles/{category}/{id:[0-9]+}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+// url.String() will be "http://news.domain.com/articles/technology/42"
+url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42")
+```
+
+All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+```go
+r.HeadersRegexp("Content-Type", "application/(text|json)")
+```
+
+...and the route will match both requests with a Content-Type of `application/json` as well as `application/text`
+
+There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
+
+```go
+// "http://news.domain.com/"
+host, err := r.Get("article").URLHost("subdomain", "news")
+
+// "/articles/technology/42"
+path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+```
+
+And if you use subrouters, host and path defined separately can be built as well:
+
+```go
+r := mux.NewRouter()
+s := r.Host("{subdomain}.domain.com").Subrouter()
+s.Path("/articles/{category}/{id:[0-9]+}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+// "http://news.domain.com/articles/technology/42"
+url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42")
+```
+
+## Full Example
+
+Here's a complete, runnable example of a small `mux` based server:
+
+```go
+package main
+
+import (
+ "net/http"
+ "log"
+ "github.com/gorilla/mux"
+)
+
+func YourHandler(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("Gorilla!\n"))
+}
+
+func main() {
+ r := mux.NewRouter()
+ // Routes consist of a path and a handler function.
+ r.HandleFunc("/", YourHandler)
+
+ // Bind to a port and pass our router in
+ log.Fatal(http.ListenAndServe(":8000", r))
+}
+```
+
+## License
+
+BSD licensed. See the LICENSE file for details.
diff --git a/vendor/github.com/gorilla/mux/context_gorilla.go b/vendor/github.com/gorilla/mux/context_gorilla.go
new file mode 100644
index 000000000..d7adaa8fa
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/context_gorilla.go
@@ -0,0 +1,26 @@
+// +build !go1.7
+
+package mux
+
+import (
+ "net/http"
+
+ "github.com/gorilla/context"
+)
+
+func contextGet(r *http.Request, key interface{}) interface{} {
+ return context.Get(r, key)
+}
+
+func contextSet(r *http.Request, key, val interface{}) *http.Request {
+ if val == nil {
+ return r
+ }
+
+ context.Set(r, key, val)
+ return r
+}
+
+func contextClear(r *http.Request) {
+ context.Clear(r)
+}
diff --git a/vendor/github.com/gorilla/mux/context_native.go b/vendor/github.com/gorilla/mux/context_native.go
new file mode 100644
index 000000000..209cbea7d
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/context_native.go
@@ -0,0 +1,24 @@
+// +build go1.7
+
+package mux
+
+import (
+ "context"
+ "net/http"
+)
+
+func contextGet(r *http.Request, key interface{}) interface{} {
+ return r.Context().Value(key)
+}
+
+func contextSet(r *http.Request, key, val interface{}) *http.Request {
+ if val == nil {
+ return r
+ }
+
+ return r.WithContext(context.WithValue(r.Context(), key, val))
+}
+
+func contextClear(r *http.Request) {
+ return
+}
diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go
new file mode 100644
index 000000000..00daf4a72
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/doc.go
@@ -0,0 +1,240 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package mux implements a request router and dispatcher.
+
+The name mux stands for "HTTP request multiplexer". Like the standard
+http.ServeMux, mux.Router matches incoming requests against a list of
+registered routes and calls a handler for the route that matches the URL
+or other conditions. The main features are:
+
+ * Requests can be matched based on URL host, path, path prefix, schemes,
+ header and query values, HTTP methods or using custom matchers.
+ * URL hosts and paths can have variables with an optional regular
+ expression.
+ * Registered URLs can be built, or "reversed", which helps maintaining
+ references to resources.
+ * Routes can be used as subrouters: nested routes are only tested if the
+ parent route matches. This is useful to define groups of routes that
+ share common conditions like a host, a path prefix or other repeated
+ attributes. As a bonus, this optimizes request matching.
+ * It implements the http.Handler interface so it is compatible with the
+ standard http.ServeMux.
+
+Let's start registering a couple of URL paths and handlers:
+
+ func main() {
+ r := mux.NewRouter()
+ r.HandleFunc("/", HomeHandler)
+ r.HandleFunc("/products", ProductsHandler)
+ r.HandleFunc("/articles", ArticlesHandler)
+ http.Handle("/", r)
+ }
+
+Here we register three routes mapping URL paths to handlers. This is
+equivalent to how http.HandleFunc() works: if an incoming request URL matches
+one of the paths, the corresponding handler is called passing
+(http.ResponseWriter, *http.Request) as parameters.
+
+Paths can have variables. They are defined using the format {name} or
+{name:pattern}. If a regular expression pattern is not defined, the matched
+variable will be anything until the next slash. For example:
+
+ r := mux.NewRouter()
+ r.HandleFunc("/products/{key}", ProductHandler)
+ r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+ r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+
+Groups can be used inside patterns, as long as they are non-capturing (?:re). For example:
+
+ r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler)
+
+The names are used to create a map of route variables which can be retrieved
+calling mux.Vars():
+
+ vars := mux.Vars(request)
+ category := vars["category"]
+
+Note that if any capturing groups are present, mux will panic() during parsing. To prevent
+this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to
+"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably
+when capturing groups were present.
+
+And this is all you need to know about the basic usage. More advanced options
+are explained below.
+
+Routes can also be restricted to a domain or subdomain. Just define a host
+pattern to be matched. They can also have variables:
+
+ r := mux.NewRouter()
+ // Only matches if domain is "www.example.com".
+ r.Host("www.example.com")
+ // Matches a dynamic subdomain.
+ r.Host("{subdomain:[a-z]+}.domain.com")
+
+There are several other matchers that can be added. To match path prefixes:
+
+ r.PathPrefix("/products/")
+
+...or HTTP methods:
+
+ r.Methods("GET", "POST")
+
+...or URL schemes:
+
+ r.Schemes("https")
+
+...or header values:
+
+ r.Headers("X-Requested-With", "XMLHttpRequest")
+
+...or query values:
+
+ r.Queries("key", "value")
+
+...or to use a custom matcher function:
+
+ r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+ return r.ProtoMajor == 0
+ })
+
+...and finally, it is possible to combine several matchers in a single route:
+
+ r.HandleFunc("/products", ProductsHandler).
+ Host("www.example.com").
+ Methods("GET").
+ Schemes("http")
+
+Setting the same matching conditions again and again can be boring, so we have
+a way to group several routes that share the same requirements.
+We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the
+host is "www.example.com". Create a route for that host and get a "subrouter"
+from it:
+
+ r := mux.NewRouter()
+ s := r.Host("www.example.com").Subrouter()
+
+Then register routes in the subrouter:
+
+ s.HandleFunc("/products/", ProductsHandler)
+ s.HandleFunc("/products/{key}", ProductHandler)
+ s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+
+The three URL paths we registered above will only be tested if the domain is
+"www.example.com", because the subrouter is tested first. This is not
+only convenient, but also optimizes request matching. You can create
+subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define
+subrouters in a central place and then parts of the app can register its
+paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix,
+the inner routes use it as base for their paths:
+
+ r := mux.NewRouter()
+ s := r.PathPrefix("/products").Subrouter()
+ // "/products/"
+ s.HandleFunc("/", ProductsHandler)
+ // "/products/{key}/"
+ s.HandleFunc("/{key}/", ProductHandler)
+ // "/products/{key}/details"
+ s.HandleFunc("/{key}/details", ProductDetailsHandler)
+
+Note that the path provided to PathPrefix() represents a "wildcard": calling
+PathPrefix("/static/").Handler(...) means that the handler will be passed any
+request that matches "/static/*". This makes it easy to serve static files with mux:
+
+ func main() {
+ var dir string
+
+ flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+ flag.Parse()
+ r := mux.NewRouter()
+
+ // This will serve files under http://localhost:8000/static/<filename>
+ r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+ srv := &http.Server{
+ Handler: r,
+ Addr: "127.0.0.1:8000",
+ // Good practice: enforce timeouts for servers you create!
+ WriteTimeout: 15 * time.Second,
+ ReadTimeout: 15 * time.Second,
+ }
+
+ log.Fatal(srv.ListenAndServe())
+ }
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built,
+or "reversed". We define a name calling Name() on a route. For example:
+
+ r := mux.NewRouter()
+ r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+ Name("article")
+
+To build a URL, get the route and call the URL() method, passing a sequence of
+key/value pairs for the route variables. For the previous route, we would do:
+
+ url, err := r.Get("article").URL("category", "technology", "id", "42")
+
+...and the result will be a url.URL with the following path:
+
+ "/articles/technology/42"
+
+This also works for host variables:
+
+ r := mux.NewRouter()
+ r.Host("{subdomain}.domain.com").
+ Path("/articles/{category}/{id:[0-9]+}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+ // url.String() will be "http://news.domain.com/articles/technology/42"
+ url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42")
+
+All variables defined in the route are required, and their values must
+conform to the corresponding patterns. These requirements guarantee that a
+generated URL will always match a registered route -- the only exception is
+for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+ r.HeadersRegexp("Content-Type", "application/(text|json)")
+
+...and the route will match both requests with a Content-Type of `application/json` as well as
+`application/text`
+
+There's also a way to build only the URL host or path for a route:
+use the methods URLHost() or URLPath() instead. For the previous route,
+we would do:
+
+ // "http://news.domain.com/"
+ host, err := r.Get("article").URLHost("subdomain", "news")
+
+ // "/articles/technology/42"
+ path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+
+And if you use subrouters, host and path defined separately can be built
+as well:
+
+ r := mux.NewRouter()
+ s := r.Host("{subdomain}.domain.com").Subrouter()
+ s.Path("/articles/{category}/{id:[0-9]+}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+ // "http://news.domain.com/articles/technology/42"
+ url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42")
+*/
+package mux
diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go
new file mode 100644
index 000000000..d66ec3841
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/mux.go
@@ -0,0 +1,542 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "path"
+ "regexp"
+ "strings"
+)
+
+// NewRouter returns a new router instance.
+func NewRouter() *Router {
+ return &Router{namedRoutes: make(map[string]*Route), KeepContext: false}
+}
+
+// Router registers routes to be matched and dispatches a handler.
+//
+// It implements the http.Handler interface, so it can be registered to serve
+// requests:
+//
+// var router = mux.NewRouter()
+//
+// func main() {
+// http.Handle("/", router)
+// }
+//
+// Or, for Google App Engine, register it in a init() function:
+//
+// func init() {
+// http.Handle("/", router)
+// }
+//
+// This will send all incoming requests to the router.
+type Router struct {
+ // Configurable Handler to be used when no route matches.
+ NotFoundHandler http.Handler
+ // Parent route, if this is a subrouter.
+ parent parentRoute
+ // Routes to be matched, in order.
+ routes []*Route
+ // Routes by name for URL building.
+ namedRoutes map[string]*Route
+ // See Router.StrictSlash(). This defines the flag for new routes.
+ strictSlash bool
+ // See Router.SkipClean(). This defines the flag for new routes.
+ skipClean bool
+ // If true, do not clear the request context after handling the request.
+ // This has no effect when go1.7+ is used, since the context is stored
+ // on the request itself.
+ KeepContext bool
+ // see Router.UseEncodedPath(). This defines a flag for all routes.
+ useEncodedPath bool
+}
+
+// Match matches registered routes against the request.
+func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
+ for _, route := range r.routes {
+ if route.Match(req, match) {
+ return true
+ }
+ }
+
+ // Closest match for a router (includes sub-routers)
+ if r.NotFoundHandler != nil {
+ match.Handler = r.NotFoundHandler
+ return true
+ }
+ return false
+}
+
+// ServeHTTP dispatches the handler registered in the matched route.
+//
+// When there is a match, the route variables can be retrieved calling
+// mux.Vars(request).
+func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ if !r.skipClean {
+ path := req.URL.Path
+ if r.useEncodedPath {
+ path = getPath(req)
+ }
+ // Clean path to canonical form and redirect.
+ if p := cleanPath(path); p != path {
+
+ // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query.
+ // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue:
+ // http://code.google.com/p/go/issues/detail?id=5252
+ url := *req.URL
+ url.Path = p
+ p = url.String()
+
+ w.Header().Set("Location", p)
+ w.WriteHeader(http.StatusMovedPermanently)
+ return
+ }
+ }
+ var match RouteMatch
+ var handler http.Handler
+ if r.Match(req, &match) {
+ handler = match.Handler
+ req = setVars(req, match.Vars)
+ req = setCurrentRoute(req, match.Route)
+ }
+ if handler == nil {
+ handler = http.NotFoundHandler()
+ }
+ if !r.KeepContext {
+ defer contextClear(req)
+ }
+ handler.ServeHTTP(w, req)
+}
+
+// Get returns a route registered with the given name.
+func (r *Router) Get(name string) *Route {
+ return r.getNamedRoutes()[name]
+}
+
+// GetRoute returns a route registered with the given name. This method
+// was renamed to Get() and remains here for backwards compatibility.
+func (r *Router) GetRoute(name string) *Route {
+ return r.getNamedRoutes()[name]
+}
+
+// StrictSlash defines the trailing slash behavior for new routes. The initial
+// value is false.
+//
+// When true, if the route path is "/path/", accessing "/path" will redirect
+// to the former and vice versa. In other words, your application will always
+// see the path as specified in the route.
+//
+// When false, if the route path is "/path", accessing "/path/" will not match
+// this route and vice versa.
+//
+// Special case: when a route sets a path prefix using the PathPrefix() method,
+// strict slash is ignored for that route because the redirect behavior can't
+// be determined from a prefix alone. However, any subrouters created from that
+// route inherit the original StrictSlash setting.
+func (r *Router) StrictSlash(value bool) *Router {
+ r.strictSlash = value
+ return r
+}
+
+// SkipClean defines the path cleaning behaviour for new routes. The initial
+// value is false. Users should be careful about which routes are not cleaned
+//
+// When true, if the route path is "/path//to", it will remain with the double
+// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/
+//
+// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will
+// become /fetch/http/xkcd.com/534
+func (r *Router) SkipClean(value bool) *Router {
+ r.skipClean = value
+ return r
+}
+
+// UseEncodedPath tells the router to match the encoded original path
+// to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
+// This behavior has the drawback of needing to match routes against
+// r.RequestURI instead of r.URL.Path. Any modifications (such as http.StripPrefix)
+// to r.URL.Path will not affect routing when this flag is on and thus may
+// induce unintended behavior.
+//
+// If not called, the router will match the unencoded path to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
+func (r *Router) UseEncodedPath() *Router {
+ r.useEncodedPath = true
+ return r
+}
+
+// ----------------------------------------------------------------------------
+// parentRoute
+// ----------------------------------------------------------------------------
+
+// getNamedRoutes returns the map where named routes are registered.
+func (r *Router) getNamedRoutes() map[string]*Route {
+ if r.namedRoutes == nil {
+ if r.parent != nil {
+ r.namedRoutes = r.parent.getNamedRoutes()
+ } else {
+ r.namedRoutes = make(map[string]*Route)
+ }
+ }
+ return r.namedRoutes
+}
+
+// getRegexpGroup returns regexp definitions from the parent route, if any.
+func (r *Router) getRegexpGroup() *routeRegexpGroup {
+ if r.parent != nil {
+ return r.parent.getRegexpGroup()
+ }
+ return nil
+}
+
+func (r *Router) buildVars(m map[string]string) map[string]string {
+ if r.parent != nil {
+ m = r.parent.buildVars(m)
+ }
+ return m
+}
+
+// ----------------------------------------------------------------------------
+// Route factories
+// ----------------------------------------------------------------------------
+
+// NewRoute registers an empty route.
+func (r *Router) NewRoute() *Route {
+ route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath}
+ r.routes = append(r.routes, route)
+ return route
+}
+
+// Handle registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.Handler().
+func (r *Router) Handle(path string, handler http.Handler) *Route {
+ return r.NewRoute().Path(path).Handler(handler)
+}
+
+// HandleFunc registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.HandlerFunc().
+func (r *Router) HandleFunc(path string, f func(http.ResponseWriter,
+ *http.Request)) *Route {
+ return r.NewRoute().Path(path).HandlerFunc(f)
+}
+
+// Headers registers a new route with a matcher for request header values.
+// See Route.Headers().
+func (r *Router) Headers(pairs ...string) *Route {
+ return r.NewRoute().Headers(pairs...)
+}
+
+// Host registers a new route with a matcher for the URL host.
+// See Route.Host().
+func (r *Router) Host(tpl string) *Route {
+ return r.NewRoute().Host(tpl)
+}
+
+// MatcherFunc registers a new route with a custom matcher function.
+// See Route.MatcherFunc().
+func (r *Router) MatcherFunc(f MatcherFunc) *Route {
+ return r.NewRoute().MatcherFunc(f)
+}
+
+// Methods registers a new route with a matcher for HTTP methods.
+// See Route.Methods().
+func (r *Router) Methods(methods ...string) *Route {
+ return r.NewRoute().Methods(methods...)
+}
+
+// Path registers a new route with a matcher for the URL path.
+// See Route.Path().
+func (r *Router) Path(tpl string) *Route {
+ return r.NewRoute().Path(tpl)
+}
+
+// PathPrefix registers a new route with a matcher for the URL path prefix.
+// See Route.PathPrefix().
+func (r *Router) PathPrefix(tpl string) *Route {
+ return r.NewRoute().PathPrefix(tpl)
+}
+
+// Queries registers a new route with a matcher for URL query values.
+// See Route.Queries().
+func (r *Router) Queries(pairs ...string) *Route {
+ return r.NewRoute().Queries(pairs...)
+}
+
+// Schemes registers a new route with a matcher for URL schemes.
+// See Route.Schemes().
+func (r *Router) Schemes(schemes ...string) *Route {
+ return r.NewRoute().Schemes(schemes...)
+}
+
+// BuildVarsFunc registers a new route with a custom function for modifying
+// route variables before building a URL.
+func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {
+ return r.NewRoute().BuildVarsFunc(f)
+}
+
+// Walk walks the router and all its sub-routers, calling walkFn for each route
+// in the tree. The routes are walked in the order they were added. Sub-routers
+// are explored depth-first.
+func (r *Router) Walk(walkFn WalkFunc) error {
+ return r.walk(walkFn, []*Route{})
+}
+
+// SkipRouter is used as a return value from WalkFuncs to indicate that the
+// router that walk is about to descend down to should be skipped.
+var SkipRouter = errors.New("skip this router")
+
+// WalkFunc is the type of the function called for each route visited by Walk.
+// At every invocation, it is given the current route, and the current router,
+// and a list of ancestor routes that lead to the current route.
+type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
+
+func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
+ for _, t := range r.routes {
+ if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" {
+ continue
+ }
+
+ err := walkFn(t, r, ancestors)
+ if err == SkipRouter {
+ continue
+ }
+ if err != nil {
+ return err
+ }
+ for _, sr := range t.matchers {
+ if h, ok := sr.(*Router); ok {
+ err := h.walk(walkFn, ancestors)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ if h, ok := t.handler.(*Router); ok {
+ ancestors = append(ancestors, t)
+ err := h.walk(walkFn, ancestors)
+ if err != nil {
+ return err
+ }
+ ancestors = ancestors[:len(ancestors)-1]
+ }
+ }
+ return nil
+}
+
+// ----------------------------------------------------------------------------
+// Context
+// ----------------------------------------------------------------------------
+
+// RouteMatch stores information about a matched route.
+type RouteMatch struct {
+ Route *Route
+ Handler http.Handler
+ Vars map[string]string
+}
+
+type contextKey int
+
+const (
+ varsKey contextKey = iota
+ routeKey
+)
+
+// Vars returns the route variables for the current request, if any.
+func Vars(r *http.Request) map[string]string {
+ if rv := contextGet(r, varsKey); rv != nil {
+ return rv.(map[string]string)
+ }
+ return nil
+}
+
+// CurrentRoute returns the matched route for the current request, if any.
+// This only works when called inside the handler of the matched route
+// because the matched route is stored in the request context which is cleared
+// after the handler returns, unless the KeepContext option is set on the
+// Router.
+func CurrentRoute(r *http.Request) *Route {
+ if rv := contextGet(r, routeKey); rv != nil {
+ return rv.(*Route)
+ }
+ return nil
+}
+
+func setVars(r *http.Request, val interface{}) *http.Request {
+ return contextSet(r, varsKey, val)
+}
+
+func setCurrentRoute(r *http.Request, val interface{}) *http.Request {
+ return contextSet(r, routeKey, val)
+}
+
+// ----------------------------------------------------------------------------
+// Helpers
+// ----------------------------------------------------------------------------
+
+// getPath returns the escaped path if possible; doing what URL.EscapedPath()
+// which was added in go1.5 does
+func getPath(req *http.Request) string {
+ if req.RequestURI != "" {
+ // Extract the path from RequestURI (which is escaped unlike URL.Path)
+ // as detailed here as detailed in https://golang.org/pkg/net/url/#URL
+ // for < 1.5 server side workaround
+ // http://localhost/path/here?v=1 -> /path/here
+ path := req.RequestURI
+ path = strings.TrimPrefix(path, req.URL.Scheme+`://`)
+ path = strings.TrimPrefix(path, req.URL.Host)
+ if i := strings.LastIndex(path, "?"); i > -1 {
+ path = path[:i]
+ }
+ if i := strings.LastIndex(path, "#"); i > -1 {
+ path = path[:i]
+ }
+ return path
+ }
+ return req.URL.Path
+}
+
+// cleanPath returns the canonical path for p, eliminating . and .. elements.
+// Borrowed from the net/http package.
+func cleanPath(p string) string {
+ if p == "" {
+ return "/"
+ }
+ if p[0] != '/' {
+ p = "/" + p
+ }
+ np := path.Clean(p)
+ // path.Clean removes trailing slash except for root;
+ // put the trailing slash back if necessary.
+ if p[len(p)-1] == '/' && np != "/" {
+ np += "/"
+ }
+
+ return np
+}
+
+// uniqueVars returns an error if two slices contain duplicated strings.
+func uniqueVars(s1, s2 []string) error {
+ for _, v1 := range s1 {
+ for _, v2 := range s2 {
+ if v1 == v2 {
+ return fmt.Errorf("mux: duplicated route variable %q", v2)
+ }
+ }
+ }
+ return nil
+}
+
+// checkPairs returns the count of strings passed in, and an error if
+// the count is not an even number.
+func checkPairs(pairs ...string) (int, error) {
+ length := len(pairs)
+ if length%2 != 0 {
+ return length, fmt.Errorf(
+ "mux: number of parameters must be multiple of 2, got %v", pairs)
+ }
+ return length, nil
+}
+
+// mapFromPairsToString converts variadic string parameters to a
+// string to string map.
+func mapFromPairsToString(pairs ...string) (map[string]string, error) {
+ length, err := checkPairs(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ m := make(map[string]string, length/2)
+ for i := 0; i < length; i += 2 {
+ m[pairs[i]] = pairs[i+1]
+ }
+ return m, nil
+}
+
+// mapFromPairsToRegex converts variadic string paramers to a
+// string to regex map.
+func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
+ length, err := checkPairs(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ m := make(map[string]*regexp.Regexp, length/2)
+ for i := 0; i < length; i += 2 {
+ regex, err := regexp.Compile(pairs[i+1])
+ if err != nil {
+ return nil, err
+ }
+ m[pairs[i]] = regex
+ }
+ return m, nil
+}
+
+// matchInArray returns true if the given string value is in the array.
+func matchInArray(arr []string, value string) bool {
+ for _, v := range arr {
+ if v == value {
+ return true
+ }
+ }
+ return false
+}
+
+// matchMapWithString returns true if the given key/value pairs exist in a given map.
+func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool {
+ for k, v := range toCheck {
+ // Check if key exists.
+ if canonicalKey {
+ k = http.CanonicalHeaderKey(k)
+ }
+ if values := toMatch[k]; values == nil {
+ return false
+ } else if v != "" {
+ // If value was defined as an empty string we only check that the
+ // key exists. Otherwise we also check for equality.
+ valueExists := false
+ for _, value := range values {
+ if v == value {
+ valueExists = true
+ break
+ }
+ }
+ if !valueExists {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against
+// the given regex
+func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool {
+ for k, v := range toCheck {
+ // Check if key exists.
+ if canonicalKey {
+ k = http.CanonicalHeaderKey(k)
+ }
+ if values := toMatch[k]; values == nil {
+ return false
+ } else if v != nil {
+ // If value was defined as an empty string we only check that the
+ // key exists. Otherwise we also check for equality.
+ valueExists := false
+ for _, value := range values {
+ if v.MatchString(value) {
+ valueExists = true
+ break
+ }
+ }
+ if !valueExists {
+ return false
+ }
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go
new file mode 100644
index 000000000..0189ad346
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/regexp.go
@@ -0,0 +1,323 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "bytes"
+ "fmt"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// newRouteRegexp parses a route template and returns a routeRegexp,
+// used to match a host, a path or a query string.
+//
+// It will extract named variables, assemble a regexp to be matched, create
+// a "reverse" template to build URLs and compile regexps to validate variable
+// values used in URL building.
+//
+// Previously we accepted only Python-like identifiers for variable
+// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
+// name and pattern can't be empty, and names can't contain a colon.
+func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash, useEncodedPath bool) (*routeRegexp, error) {
+ // Check if it is well-formed.
+ idxs, errBraces := braceIndices(tpl)
+ if errBraces != nil {
+ return nil, errBraces
+ }
+ // Backup the original.
+ template := tpl
+ // Now let's parse it.
+ defaultPattern := "[^/]+"
+ if matchQuery {
+ defaultPattern = "[^?&]*"
+ } else if matchHost {
+ defaultPattern = "[^.]+"
+ matchPrefix = false
+ }
+ // Only match strict slash if not matching
+ if matchPrefix || matchHost || matchQuery {
+ strictSlash = false
+ }
+ // Set a flag for strictSlash.
+ endSlash := false
+ if strictSlash && strings.HasSuffix(tpl, "/") {
+ tpl = tpl[:len(tpl)-1]
+ endSlash = true
+ }
+ varsN := make([]string, len(idxs)/2)
+ varsR := make([]*regexp.Regexp, len(idxs)/2)
+ pattern := bytes.NewBufferString("")
+ pattern.WriteByte('^')
+ reverse := bytes.NewBufferString("")
+ var end int
+ var err error
+ for i := 0; i < len(idxs); i += 2 {
+ // Set all values we are interested in.
+ raw := tpl[end:idxs[i]]
+ end = idxs[i+1]
+ parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2)
+ name := parts[0]
+ patt := defaultPattern
+ if len(parts) == 2 {
+ patt = parts[1]
+ }
+ // Name or pattern can't be empty.
+ if name == "" || patt == "" {
+ return nil, fmt.Errorf("mux: missing name or pattern in %q",
+ tpl[idxs[i]:end])
+ }
+ // Build the regexp pattern.
+ fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt)
+
+ // Build the reverse template.
+ fmt.Fprintf(reverse, "%s%%s", raw)
+
+ // Append variable name and compiled pattern.
+ varsN[i/2] = name
+ varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
+ if err != nil {
+ return nil, err
+ }
+ }
+ // Add the remaining.
+ raw := tpl[end:]
+ pattern.WriteString(regexp.QuoteMeta(raw))
+ if strictSlash {
+ pattern.WriteString("[/]?")
+ }
+ if matchQuery {
+ // Add the default pattern if the query value is empty
+ if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
+ pattern.WriteString(defaultPattern)
+ }
+ }
+ if !matchPrefix {
+ pattern.WriteByte('$')
+ }
+ reverse.WriteString(raw)
+ if endSlash {
+ reverse.WriteByte('/')
+ }
+ // Compile full regexp.
+ reg, errCompile := regexp.Compile(pattern.String())
+ if errCompile != nil {
+ return nil, errCompile
+ }
+
+ // Check for capturing groups which used to work in older versions
+ if reg.NumSubexp() != len(idxs)/2 {
+ panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) +
+ "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)")
+ }
+
+ // Done!
+ return &routeRegexp{
+ template: template,
+ matchHost: matchHost,
+ matchQuery: matchQuery,
+ strictSlash: strictSlash,
+ useEncodedPath: useEncodedPath,
+ regexp: reg,
+ reverse: reverse.String(),
+ varsN: varsN,
+ varsR: varsR,
+ }, nil
+}
+
+// routeRegexp stores a regexp to match a host or path and information to
+// collect and validate route variables.
+type routeRegexp struct {
+ // The unmodified template.
+ template string
+ // True for host match, false for path or query string match.
+ matchHost bool
+ // True for query string match, false for path and host match.
+ matchQuery bool
+ // The strictSlash value defined on the route, but disabled if PathPrefix was used.
+ strictSlash bool
+ // Determines whether to use encoded path from getPath function or unencoded
+ // req.URL.Path for path matching
+ useEncodedPath bool
+ // Expanded regexp.
+ regexp *regexp.Regexp
+ // Reverse template.
+ reverse string
+ // Variable names.
+ varsN []string
+ // Variable regexps (validators).
+ varsR []*regexp.Regexp
+}
+
+// Match matches the regexp against the URL host or path.
+func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
+ if !r.matchHost {
+ if r.matchQuery {
+ return r.matchQueryString(req)
+ }
+ path := req.URL.Path
+ if r.useEncodedPath {
+ path = getPath(req)
+ }
+ return r.regexp.MatchString(path)
+ }
+
+ return r.regexp.MatchString(getHost(req))
+}
+
+// url builds a URL part using the given values.
+func (r *routeRegexp) url(values map[string]string) (string, error) {
+ urlValues := make([]interface{}, len(r.varsN))
+ for k, v := range r.varsN {
+ value, ok := values[v]
+ if !ok {
+ return "", fmt.Errorf("mux: missing route variable %q", v)
+ }
+ urlValues[k] = value
+ }
+ rv := fmt.Sprintf(r.reverse, urlValues...)
+ if !r.regexp.MatchString(rv) {
+ // The URL is checked against the full regexp, instead of checking
+ // individual variables. This is faster but to provide a good error
+ // message, we check individual regexps if the URL doesn't match.
+ for k, v := range r.varsN {
+ if !r.varsR[k].MatchString(values[v]) {
+ return "", fmt.Errorf(
+ "mux: variable %q doesn't match, expected %q", values[v],
+ r.varsR[k].String())
+ }
+ }
+ }
+ return rv, nil
+}
+
+// getURLQuery returns a single query parameter from a request URL.
+// For a URL with foo=bar&baz=ding, we return only the relevant key
+// value pair for the routeRegexp.
+func (r *routeRegexp) getURLQuery(req *http.Request) string {
+ if !r.matchQuery {
+ return ""
+ }
+ templateKey := strings.SplitN(r.template, "=", 2)[0]
+ for key, vals := range req.URL.Query() {
+ if key == templateKey && len(vals) > 0 {
+ return key + "=" + vals[0]
+ }
+ }
+ return ""
+}
+
+func (r *routeRegexp) matchQueryString(req *http.Request) bool {
+ return r.regexp.MatchString(r.getURLQuery(req))
+}
+
+// braceIndices returns the first level curly brace indices from a string.
+// It returns an error in case of unbalanced braces.
+func braceIndices(s string) ([]int, error) {
+ var level, idx int
+ var idxs []int
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '{':
+ if level++; level == 1 {
+ idx = i
+ }
+ case '}':
+ if level--; level == 0 {
+ idxs = append(idxs, idx, i+1)
+ } else if level < 0 {
+ return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+ }
+ }
+ }
+ if level != 0 {
+ return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+ }
+ return idxs, nil
+}
+
+// varGroupName builds a capturing group name for the indexed variable.
+func varGroupName(idx int) string {
+ return "v" + strconv.Itoa(idx)
+}
+
+// ----------------------------------------------------------------------------
+// routeRegexpGroup
+// ----------------------------------------------------------------------------
+
+// routeRegexpGroup groups the route matchers that carry variables.
+type routeRegexpGroup struct {
+ host *routeRegexp
+ path *routeRegexp
+ queries []*routeRegexp
+}
+
+// setMatch extracts the variables from the URL once a route matches.
+func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
+ // Store host variables.
+ if v.host != nil {
+ host := getHost(req)
+ matches := v.host.regexp.FindStringSubmatchIndex(host)
+ if len(matches) > 0 {
+ extractVars(host, matches, v.host.varsN, m.Vars)
+ }
+ }
+ path := req.URL.Path
+ if r.useEncodedPath {
+ path = getPath(req)
+ }
+ // Store path variables.
+ if v.path != nil {
+ matches := v.path.regexp.FindStringSubmatchIndex(path)
+ if len(matches) > 0 {
+ extractVars(path, matches, v.path.varsN, m.Vars)
+ // Check if we should redirect.
+ if v.path.strictSlash {
+ p1 := strings.HasSuffix(path, "/")
+ p2 := strings.HasSuffix(v.path.template, "/")
+ if p1 != p2 {
+ u, _ := url.Parse(req.URL.String())
+ if p1 {
+ u.Path = u.Path[:len(u.Path)-1]
+ } else {
+ u.Path += "/"
+ }
+ m.Handler = http.RedirectHandler(u.String(), 301)
+ }
+ }
+ }
+ }
+ // Store query string variables.
+ for _, q := range v.queries {
+ queryURL := q.getURLQuery(req)
+ matches := q.regexp.FindStringSubmatchIndex(queryURL)
+ if len(matches) > 0 {
+ extractVars(queryURL, matches, q.varsN, m.Vars)
+ }
+ }
+}
+
+// getHost tries its best to return the request host.
+func getHost(r *http.Request) string {
+ if r.URL.IsAbs() {
+ return r.URL.Host
+ }
+ host := r.Host
+ // Slice off any port information.
+ if i := strings.Index(host, ":"); i != -1 {
+ host = host[:i]
+ }
+ return host
+
+}
+
+func extractVars(input string, matches []int, names []string, output map[string]string) {
+ for i, name := range names {
+ output[name] = input[matches[2*i+2]:matches[2*i+3]]
+ }
+}
diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go
new file mode 100644
index 000000000..922191592
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/route.go
@@ -0,0 +1,636 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strings"
+)
+
+// Route stores information to match a request and build URLs.
+type Route struct {
+ // Parent where the route was registered (a Router).
+ parent parentRoute
+ // Request handler for the route.
+ handler http.Handler
+ // List of matchers.
+ matchers []matcher
+ // Manager for the variables from host and path.
+ regexp *routeRegexpGroup
+ // If true, when the path pattern is "/path/", accessing "/path" will
+ // redirect to the former and vice versa.
+ strictSlash bool
+ // If true, when the path pattern is "/path//to", accessing "/path//to"
+ // will not redirect
+ skipClean bool
+ // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
+ useEncodedPath bool
+ // If true, this route never matches: it is only used to build URLs.
+ buildOnly bool
+ // The name used to build URLs.
+ name string
+ // Error resulted from building a route.
+ err error
+
+ buildVarsFunc BuildVarsFunc
+}
+
+func (r *Route) SkipClean() bool {
+ return r.skipClean
+}
+
+// Match matches the route against the request.
+func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
+ if r.buildOnly || r.err != nil {
+ return false
+ }
+ // Match everything.
+ for _, m := range r.matchers {
+ if matched := m.Match(req, match); !matched {
+ return false
+ }
+ }
+ // Yay, we have a match. Let's collect some info about it.
+ if match.Route == nil {
+ match.Route = r
+ }
+ if match.Handler == nil {
+ match.Handler = r.handler
+ }
+ if match.Vars == nil {
+ match.Vars = make(map[string]string)
+ }
+ // Set variables.
+ if r.regexp != nil {
+ r.regexp.setMatch(req, match, r)
+ }
+ return true
+}
+
+// ----------------------------------------------------------------------------
+// Route attributes
+// ----------------------------------------------------------------------------
+
+// GetError returns an error resulted from building the route, if any.
+func (r *Route) GetError() error {
+ return r.err
+}
+
+// BuildOnly sets the route to never match: it is only used to build URLs.
+func (r *Route) BuildOnly() *Route {
+ r.buildOnly = true
+ return r
+}
+
+// Handler --------------------------------------------------------------------
+
+// Handler sets a handler for the route.
+func (r *Route) Handler(handler http.Handler) *Route {
+ if r.err == nil {
+ r.handler = handler
+ }
+ return r
+}
+
+// HandlerFunc sets a handler function for the route.
+func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {
+ return r.Handler(http.HandlerFunc(f))
+}
+
+// GetHandler returns the handler for the route, if any.
+func (r *Route) GetHandler() http.Handler {
+ return r.handler
+}
+
+// Name -----------------------------------------------------------------------
+
+// Name sets the name for the route, used to build URLs.
+// If the name was registered already it will be overwritten.
+func (r *Route) Name(name string) *Route {
+ if r.name != "" {
+ r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
+ r.name, name)
+ }
+ if r.err == nil {
+ r.name = name
+ r.getNamedRoutes()[name] = r
+ }
+ return r
+}
+
+// GetName returns the name for the route, if any.
+func (r *Route) GetName() string {
+ return r.name
+}
+
+// ----------------------------------------------------------------------------
+// Matchers
+// ----------------------------------------------------------------------------
+
+// matcher types try to match a request.
+type matcher interface {
+ Match(*http.Request, *RouteMatch) bool
+}
+
+// addMatcher adds a matcher to the route.
+func (r *Route) addMatcher(m matcher) *Route {
+ if r.err == nil {
+ r.matchers = append(r.matchers, m)
+ }
+ return r
+}
+
+// addRegexpMatcher adds a host or path matcher and builder to a route.
+func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error {
+ if r.err != nil {
+ return r.err
+ }
+ r.regexp = r.getRegexpGroup()
+ if !matchHost && !matchQuery {
+ if tpl == "/" && (len(tpl) == 0 || tpl[0] != '/') {
+ return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
+ }
+ if r.regexp.path != nil {
+ tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
+ }
+ }
+ rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash, r.useEncodedPath)
+ if err != nil {
+ return err
+ }
+ for _, q := range r.regexp.queries {
+ if err = uniqueVars(rr.varsN, q.varsN); err != nil {
+ return err
+ }
+ }
+ if matchHost {
+ if r.regexp.path != nil {
+ if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
+ return err
+ }
+ }
+ r.regexp.host = rr
+ } else {
+ if r.regexp.host != nil {
+ if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {
+ return err
+ }
+ }
+ if matchQuery {
+ r.regexp.queries = append(r.regexp.queries, rr)
+ } else {
+ r.regexp.path = rr
+ }
+ }
+ r.addMatcher(rr)
+ return nil
+}
+
+// Headers --------------------------------------------------------------------
+
+// headerMatcher matches the request against header values.
+type headerMatcher map[string]string
+
+func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchMapWithString(m, r.Header, true)
+}
+
+// Headers adds a matcher for request header values.
+// It accepts a sequence of key/value pairs to be matched. For example:
+//
+// r := mux.NewRouter()
+// r.Headers("Content-Type", "application/json",
+// "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both request header values match.
+// If the value is an empty string, it will match any value if the key is set.
+func (r *Route) Headers(pairs ...string) *Route {
+ if r.err == nil {
+ var headers map[string]string
+ headers, r.err = mapFromPairsToString(pairs...)
+ return r.addMatcher(headerMatcher(headers))
+ }
+ return r
+}
+
+// headerRegexMatcher matches the request against the route given a regex for the header
+type headerRegexMatcher map[string]*regexp.Regexp
+
+func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchMapWithRegex(m, r.Header, true)
+}
+
+// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
+// support. For example:
+//
+// r := mux.NewRouter()
+// r.HeadersRegexp("Content-Type", "application/(text|json)",
+// "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both the request header matches both regular expressions.
+// It the value is an empty string, it will match any value if the key is set.
+func (r *Route) HeadersRegexp(pairs ...string) *Route {
+ if r.err == nil {
+ var headers map[string]*regexp.Regexp
+ headers, r.err = mapFromPairsToRegex(pairs...)
+ return r.addMatcher(headerRegexMatcher(headers))
+ }
+ return r
+}
+
+// Host -----------------------------------------------------------------------
+
+// Host adds a matcher for the URL host.
+// It accepts a template with zero or more URL variables enclosed by {}.
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next dot.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+// r := mux.NewRouter()
+// r.Host("www.example.com")
+// r.Host("{subdomain}.domain.com")
+// r.Host("{subdomain:[a-z]+}.domain.com")
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Host(tpl string) *Route {
+ r.err = r.addRegexpMatcher(tpl, true, false, false)
+ return r
+}
+
+// MatcherFunc ----------------------------------------------------------------
+
+// MatcherFunc is the function signature used by custom matchers.
+type MatcherFunc func(*http.Request, *RouteMatch) bool
+
+// Match returns the match for a given request.
+func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
+ return m(r, match)
+}
+
+// MatcherFunc adds a custom function to be used as request matcher.
+func (r *Route) MatcherFunc(f MatcherFunc) *Route {
+ return r.addMatcher(f)
+}
+
+// Methods --------------------------------------------------------------------
+
+// methodMatcher matches the request against HTTP methods.
+type methodMatcher []string
+
+func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchInArray(m, r.Method)
+}
+
+// Methods adds a matcher for HTTP methods.
+// It accepts a sequence of one or more methods to be matched, e.g.:
+// "GET", "POST", "PUT".
+func (r *Route) Methods(methods ...string) *Route {
+ for k, v := range methods {
+ methods[k] = strings.ToUpper(v)
+ }
+ return r.addMatcher(methodMatcher(methods))
+}
+
+// Path -----------------------------------------------------------------------
+
+// Path adds a matcher for the URL path.
+// It accepts a template with zero or more URL variables enclosed by {}. The
+// template must start with a "/".
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+// r := mux.NewRouter()
+// r.Path("/products/").Handler(ProductsHandler)
+// r.Path("/products/{key}").Handler(ProductsHandler)
+// r.Path("/articles/{category}/{id:[0-9]+}").
+// Handler(ArticleHandler)
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Path(tpl string) *Route {
+ r.err = r.addRegexpMatcher(tpl, false, false, false)
+ return r
+}
+
+// PathPrefix -----------------------------------------------------------------
+
+// PathPrefix adds a matcher for the URL path prefix. This matches if the given
+// template is a prefix of the full URL path. See Route.Path() for details on
+// the tpl argument.
+//
+// Note that it does not treat slashes specially ("/foobar/" will be matched by
+// the prefix "/foo") so you may want to use a trailing slash here.
+//
+// Also note that the setting of Router.StrictSlash() has no effect on routes
+// with a PathPrefix matcher.
+func (r *Route) PathPrefix(tpl string) *Route {
+ r.err = r.addRegexpMatcher(tpl, false, true, false)
+ return r
+}
+
+// Query ----------------------------------------------------------------------
+
+// Queries adds a matcher for URL query values.
+// It accepts a sequence of key/value pairs. Values may define variables.
+// For example:
+//
+// r := mux.NewRouter()
+// r.Queries("foo", "bar", "id", "{id:[0-9]+}")
+//
+// The above route will only match if the URL contains the defined queries
+// values, e.g.: ?foo=bar&id=42.
+//
+// It the value is an empty string, it will match any value if the key is set.
+//
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+func (r *Route) Queries(pairs ...string) *Route {
+ length := len(pairs)
+ if length%2 != 0 {
+ r.err = fmt.Errorf(
+ "mux: number of parameters must be multiple of 2, got %v", pairs)
+ return nil
+ }
+ for i := 0; i < length; i += 2 {
+ if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil {
+ return r
+ }
+ }
+
+ return r
+}
+
+// Schemes --------------------------------------------------------------------
+
+// schemeMatcher matches the request against URL schemes.
+type schemeMatcher []string
+
+func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchInArray(m, r.URL.Scheme)
+}
+
+// Schemes adds a matcher for URL schemes.
+// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
+func (r *Route) Schemes(schemes ...string) *Route {
+ for k, v := range schemes {
+ schemes[k] = strings.ToLower(v)
+ }
+ return r.addMatcher(schemeMatcher(schemes))
+}
+
+// BuildVarsFunc --------------------------------------------------------------
+
+// BuildVarsFunc is the function signature used by custom build variable
+// functions (which can modify route variables before a route's URL is built).
+type BuildVarsFunc func(map[string]string) map[string]string
+
+// BuildVarsFunc adds a custom function to be used to modify build variables
+// before a route's URL is built.
+func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
+ r.buildVarsFunc = f
+ return r
+}
+
+// Subrouter ------------------------------------------------------------------
+
+// Subrouter creates a subrouter for the route.
+//
+// It will test the inner routes only if the parent route matched. For example:
+//
+// r := mux.NewRouter()
+// s := r.Host("www.example.com").Subrouter()
+// s.HandleFunc("/products/", ProductsHandler)
+// s.HandleFunc("/products/{key}", ProductHandler)
+// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+//
+// Here, the routes registered in the subrouter won't be tested if the host
+// doesn't match.
+func (r *Route) Subrouter() *Router {
+ router := &Router{parent: r, strictSlash: r.strictSlash}
+ r.addMatcher(router)
+ return router
+}
+
+// ----------------------------------------------------------------------------
+// URL building
+// ----------------------------------------------------------------------------
+
+// URL builds a URL for the route.
+//
+// It accepts a sequence of key/value pairs for the route variables. For
+// example, given this route:
+//
+// r := mux.NewRouter()
+// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+// Name("article")
+//
+// ...a URL for it can be built using:
+//
+// url, err := r.Get("article").URL("category", "technology", "id", "42")
+//
+// ...which will return an url.URL with the following path:
+//
+// "/articles/technology/42"
+//
+// This also works for host variables:
+//
+// r := mux.NewRouter()
+// r.Host("{subdomain}.domain.com").
+// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+// Name("article")
+//
+// // url.String() will be "http://news.domain.com/articles/technology/42"
+// url, err := r.Get("article").URL("subdomain", "news",
+// "category", "technology",
+// "id", "42")
+//
+// All variables defined in the route are required, and their values must
+// conform to the corresponding patterns.
+func (r *Route) URL(pairs ...string) (*url.URL, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp == nil {
+ return nil, errors.New("mux: route doesn't have a host or path")
+ }
+ values, err := r.prepareVars(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ var scheme, host, path string
+ if r.regexp.host != nil {
+ // Set a default scheme.
+ scheme = "http"
+ if host, err = r.regexp.host.url(values); err != nil {
+ return nil, err
+ }
+ }
+ if r.regexp.path != nil {
+ if path, err = r.regexp.path.url(values); err != nil {
+ return nil, err
+ }
+ }
+ return &url.URL{
+ Scheme: scheme,
+ Host: host,
+ Path: path,
+ }, nil
+}
+
+// URLHost builds the host part of the URL for a route. See Route.URL().
+//
+// The route must have a host defined.
+func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp == nil || r.regexp.host == nil {
+ return nil, errors.New("mux: route doesn't have a host")
+ }
+ values, err := r.prepareVars(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ host, err := r.regexp.host.url(values)
+ if err != nil {
+ return nil, err
+ }
+ return &url.URL{
+ Scheme: "http",
+ Host: host,
+ }, nil
+}
+
+// URLPath builds the path part of the URL for a route. See Route.URL().
+//
+// The route must have a path defined.
+func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp == nil || r.regexp.path == nil {
+ return nil, errors.New("mux: route doesn't have a path")
+ }
+ values, err := r.prepareVars(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ path, err := r.regexp.path.url(values)
+ if err != nil {
+ return nil, err
+ }
+ return &url.URL{
+ Path: path,
+ }, nil
+}
+
+// GetPathTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a path.
+func (r *Route) GetPathTemplate() (string, error) {
+ if r.err != nil {
+ return "", r.err
+ }
+ if r.regexp == nil || r.regexp.path == nil {
+ return "", errors.New("mux: route doesn't have a path")
+ }
+ return r.regexp.path.template, nil
+}
+
+// GetHostTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a host.
+func (r *Route) GetHostTemplate() (string, error) {
+ if r.err != nil {
+ return "", r.err
+ }
+ if r.regexp == nil || r.regexp.host == nil {
+ return "", errors.New("mux: route doesn't have a host")
+ }
+ return r.regexp.host.template, nil
+}
+
+// prepareVars converts the route variable pairs into a map. If the route has a
+// BuildVarsFunc, it is invoked.
+func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
+ m, err := mapFromPairsToString(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ return r.buildVars(m), nil
+}
+
+func (r *Route) buildVars(m map[string]string) map[string]string {
+ if r.parent != nil {
+ m = r.parent.buildVars(m)
+ }
+ if r.buildVarsFunc != nil {
+ m = r.buildVarsFunc(m)
+ }
+ return m
+}
+
+// ----------------------------------------------------------------------------
+// parentRoute
+// ----------------------------------------------------------------------------
+
+// parentRoute allows routes to know about parent host and path definitions.
+type parentRoute interface {
+ getNamedRoutes() map[string]*Route
+ getRegexpGroup() *routeRegexpGroup
+ buildVars(map[string]string) map[string]string
+}
+
+// getNamedRoutes returns the map where named routes are registered.
+func (r *Route) getNamedRoutes() map[string]*Route {
+ if r.parent == nil {
+ // During tests router is not always set.
+ r.parent = NewRouter()
+ }
+ return r.parent.getNamedRoutes()
+}
+
+// getRegexpGroup returns regexp definitions from this route.
+func (r *Route) getRegexpGroup() *routeRegexpGroup {
+ if r.regexp == nil {
+ if r.parent == nil {
+ // During tests router is not always set.
+ r.parent = NewRouter()
+ }
+ regexp := r.parent.getRegexpGroup()
+ if regexp == nil {
+ r.regexp = new(routeRegexpGroup)
+ } else {
+ // Copy.
+ r.regexp = &routeRegexpGroup{
+ host: regexp.host,
+ path: regexp.path,
+ queries: regexp.queries,
+ }
+ }
+ }
+ return r.regexp
+}
diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE
new file mode 100644
index 000000000..82b4de97c
--- /dev/null
+++ b/vendor/github.com/hashicorp/errwrap/LICENSE
@@ -0,0 +1,353 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md
new file mode 100644
index 000000000..1c95f5978
--- /dev/null
+++ b/vendor/github.com/hashicorp/errwrap/README.md
@@ -0,0 +1,89 @@
+# errwrap
+
+`errwrap` is a package for Go that formalizes the pattern of wrapping errors
+and checking if an error contains another error.
+
+There is a common pattern in Go of taking a returned `error` value and
+then wrapping it (such as with `fmt.Errorf`) before returning it. The problem
+with this pattern is that you completely lose the original `error` structure.
+
+Arguably the _correct_ approach is that you should make a custom structure
+implementing the `error` interface, and have the original error as a field
+on that structure, such [as this example](http://golang.org/pkg/os/#PathError).
+This is a good approach, but you have to know the entire chain of possible
+rewrapping that happens, when you might just care about one.
+
+`errwrap` formalizes this pattern (it doesn't matter what approach you use
+above) by giving a single interface for wrapping errors, checking if a specific
+error is wrapped, and extracting that error.
+
+## Installation and Docs
+
+Install using `go get github.com/hashicorp/errwrap`.
+
+Full documentation is available at
+http://godoc.org/github.com/hashicorp/errwrap
+
+## Usage
+
+#### Basic Usage
+
+Below is a very basic example of its usage:
+
+```go
+// A function that always returns an error, but wraps it, like a real
+// function might.
+func tryOpen() error {
+ _, err := os.Open("/i/dont/exist")
+ if err != nil {
+ return errwrap.Wrapf("Doesn't exist: {{err}}", err)
+ }
+
+ return nil
+}
+
+func main() {
+ err := tryOpen()
+
+ // We can use the Contains helpers to check if an error contains
+ // another error. It is safe to do this with a nil error, or with
+ // an error that doesn't even use the errwrap package.
+ if errwrap.Contains(err, ErrNotExist) {
+ // Do something
+ }
+ if errwrap.ContainsType(err, new(os.PathError)) {
+ // Do something
+ }
+
+ // Or we can use the associated `Get` functions to just extract
+ // a specific error. This would return nil if that specific error doesn't
+ // exist.
+ perr := errwrap.GetType(err, new(os.PathError))
+}
+```
+
+#### Custom Types
+
+If you're already making custom types that properly wrap errors, then
+you can get all the functionality of `errwraps.Contains` and such by
+implementing the `Wrapper` interface with just one function. Example:
+
+```go
+type AppError {
+ Code ErrorCode
+ Err error
+}
+
+func (e *AppError) WrappedErrors() []error {
+ return []error{e.Err}
+}
+```
+
+Now this works:
+
+```go
+err := &AppError{Err: fmt.Errorf("an error")}
+if errwrap.ContainsType(err, fmt.Errorf("")) {
+ // This will work!
+}
+```
diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go
new file mode 100644
index 000000000..a733bef18
--- /dev/null
+++ b/vendor/github.com/hashicorp/errwrap/errwrap.go
@@ -0,0 +1,169 @@
+// Package errwrap implements methods to formalize error wrapping in Go.
+//
+// All of the top-level functions that take an `error` are built to be able
+// to take any error, not just wrapped errors. This allows you to use errwrap
+// without having to type-check and type-cast everywhere.
+package errwrap
+
+import (
+ "errors"
+ "reflect"
+ "strings"
+)
+
+// WalkFunc is the callback called for Walk.
+type WalkFunc func(error)
+
+// Wrapper is an interface that can be implemented by custom types to
+// have all the Contains, Get, etc. functions in errwrap work.
+//
+// When Walk reaches a Wrapper, it will call the callback for every
+// wrapped error in addition to the wrapper itself. Since all the top-level
+// functions in errwrap use Walk, this means that all those functions work
+// with your custom type.
+type Wrapper interface {
+ WrappedErrors() []error
+}
+
+// Wrap defines that outer wraps inner, returning an error type that
+// can be cleanly used with the other methods in this package, such as
+// Contains, GetAll, etc.
+//
+// This function won't modify the error message at all (the outer message
+// will be used).
+func Wrap(outer, inner error) error {
+ return &wrappedError{
+ Outer: outer,
+ Inner: inner,
+ }
+}
+
+// Wrapf wraps an error with a formatting message. This is similar to using
+// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap
+// errors, you should replace it with this.
+//
+// format is the format of the error message. The string '{{err}}' will
+// be replaced with the original error message.
+func Wrapf(format string, err error) error {
+ outerMsg := "<nil>"
+ if err != nil {
+ outerMsg = err.Error()
+ }
+
+ outer := errors.New(strings.Replace(
+ format, "{{err}}", outerMsg, -1))
+
+ return Wrap(outer, err)
+}
+
+// Contains checks if the given error contains an error with the
+// message msg. If err is not a wrapped error, this will always return
+// false unless the error itself happens to match this msg.
+func Contains(err error, msg string) bool {
+ return len(GetAll(err, msg)) > 0
+}
+
+// ContainsType checks if the given error contains an error with
+// the same concrete type as v. If err is not a wrapped error, this will
+// check the err itself.
+func ContainsType(err error, v interface{}) bool {
+ return len(GetAllType(err, v)) > 0
+}
+
+// Get is the same as GetAll but returns the deepest matching error.
+func Get(err error, msg string) error {
+ es := GetAll(err, msg)
+ if len(es) > 0 {
+ return es[len(es)-1]
+ }
+
+ return nil
+}
+
+// GetType is the same as GetAllType but returns the deepest matching error.
+func GetType(err error, v interface{}) error {
+ es := GetAllType(err, v)
+ if len(es) > 0 {
+ return es[len(es)-1]
+ }
+
+ return nil
+}
+
+// GetAll gets all the errors that might be wrapped in err with the
+// given message. The order of the errors is such that the outermost
+// matching error (the most recent wrap) is index zero, and so on.
+func GetAll(err error, msg string) []error {
+ var result []error
+
+ Walk(err, func(err error) {
+ if err.Error() == msg {
+ result = append(result, err)
+ }
+ })
+
+ return result
+}
+
+// GetAllType gets all the errors that are the same type as v.
+//
+// The order of the return value is the same as described in GetAll.
+func GetAllType(err error, v interface{}) []error {
+ var result []error
+
+ var search string
+ if v != nil {
+ search = reflect.TypeOf(v).String()
+ }
+ Walk(err, func(err error) {
+ var needle string
+ if err != nil {
+ needle = reflect.TypeOf(err).String()
+ }
+
+ if needle == search {
+ result = append(result, err)
+ }
+ })
+
+ return result
+}
+
+// Walk walks all the wrapped errors in err and calls the callback. If
+// err isn't a wrapped error, this will be called once for err. If err
+// is a wrapped error, the callback will be called for both the wrapper
+// that implements error as well as the wrapped error itself.
+func Walk(err error, cb WalkFunc) {
+ if err == nil {
+ return
+ }
+
+ switch e := err.(type) {
+ case *wrappedError:
+ cb(e.Outer)
+ Walk(e.Inner, cb)
+ case Wrapper:
+ cb(err)
+
+ for _, err := range e.WrappedErrors() {
+ Walk(err, cb)
+ }
+ default:
+ cb(err)
+ }
+}
+
+// wrappedError is an implementation of error that has both the
+// outer and inner errors.
+type wrappedError struct {
+ Outer error
+ Inner error
+}
+
+func (w *wrappedError) Error() string {
+ return w.Outer.Error()
+}
+
+func (w *wrappedError) WrappedErrors() []error {
+ return []error{w.Outer, w.Inner}
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE
new file mode 100644
index 000000000..82b4de97c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/LICENSE
@@ -0,0 +1,353 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md
new file mode 100644
index 000000000..ead5830f7
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/README.md
@@ -0,0 +1,97 @@
+# go-multierror
+
+[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis]
+[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
+
+[travis]: https://travis-ci.org/hashicorp/go-multierror
+[godocs]: https://godoc.org/github.com/hashicorp/go-multierror
+
+`go-multierror` is a package for Go that provides a mechanism for
+representing a list of `error` values as a single `error`.
+
+This allows a function in Go to return an `error` that might actually
+be a list of errors. If the caller knows this, they can unwrap the
+list and access the errors. If the caller doesn't know, the error
+formats to a nice human-readable format.
+
+`go-multierror` implements the
+[errwrap](https://github.com/hashicorp/errwrap) interface so that it can
+be used with that library, as well.
+
+## Installation and Docs
+
+Install using `go get github.com/hashicorp/go-multierror`.
+
+Full documentation is available at
+http://godoc.org/github.com/hashicorp/go-multierror
+
+## Usage
+
+go-multierror is easy to use and purposely built to be unobtrusive in
+existing Go applications/libraries that may not be aware of it.
+
+**Building a list of errors**
+
+The `Append` function is used to create a list of errors. This function
+behaves a lot like the Go built-in `append` function: it doesn't matter
+if the first argument is nil, a `multierror.Error`, or any other `error`,
+the function behaves as you would expect.
+
+```go
+var result error
+
+if err := step1(); err != nil {
+ result = multierror.Append(result, err)
+}
+if err := step2(); err != nil {
+ result = multierror.Append(result, err)
+}
+
+return result
+```
+
+**Customizing the formatting of the errors**
+
+By specifying a custom `ErrorFormat`, you can customize the format
+of the `Error() string` function:
+
+```go
+var result *multierror.Error
+
+// ... accumulate errors here, maybe using Append
+
+if result != nil {
+ result.ErrorFormat = func([]error) string {
+ return "errors!"
+ }
+}
+```
+
+**Accessing the list of errors**
+
+`multierror.Error` implements `error` so if the caller doesn't know about
+multierror, it will work just fine. But if you're aware a multierror might
+be returned, you can use type switches to access the list of errors:
+
+```go
+if err := something(); err != nil {
+ if merr, ok := err.(*multierror.Error); ok {
+ // Use merr.Errors
+ }
+}
+```
+
+**Returning a multierror only if there are errors**
+
+If you build a `multierror.Error`, you can use the `ErrorOrNil` function
+to return an `error` implementation only if there are errors to return:
+
+```go
+var result *multierror.Error
+
+// ... accumulate errors here
+
+// Return the `error` only if errors were added to the multierror, otherwise
+// return nil since there are no errors.
+return result.ErrorOrNil()
+```
diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go
new file mode 100644
index 000000000..775b6e753
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/append.go
@@ -0,0 +1,41 @@
+package multierror
+
+// Append is a helper function that will append more errors
+// onto an Error in order to create a larger multi-error.
+//
+// If err is not a multierror.Error, then it will be turned into
+// one. If any of the errs are multierr.Error, they will be flattened
+// one level into err.
+func Append(err error, errs ...error) *Error {
+ switch err := err.(type) {
+ case *Error:
+ // Typed nils can reach here, so initialize if we are nil
+ if err == nil {
+ err = new(Error)
+ }
+
+ // Go through each error and flatten
+ for _, e := range errs {
+ switch e := e.(type) {
+ case *Error:
+ if e != nil {
+ err.Errors = append(err.Errors, e.Errors...)
+ }
+ default:
+ if e != nil {
+ err.Errors = append(err.Errors, e)
+ }
+ }
+ }
+
+ return err
+ default:
+ newErrs := make([]error, 0, len(errs)+1)
+ if err != nil {
+ newErrs = append(newErrs, err)
+ }
+ newErrs = append(newErrs, errs...)
+
+ return Append(&Error{}, newErrs...)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go
new file mode 100644
index 000000000..aab8e9abe
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/flatten.go
@@ -0,0 +1,26 @@
+package multierror
+
+// Flatten flattens the given error, merging any *Errors together into
+// a single *Error.
+func Flatten(err error) error {
+ // If it isn't an *Error, just return the error as-is
+ if _, ok := err.(*Error); !ok {
+ return err
+ }
+
+ // Otherwise, make the result and flatten away!
+ flatErr := new(Error)
+ flatten(err, flatErr)
+ return flatErr
+}
+
+func flatten(err error, flatErr *Error) {
+ switch err := err.(type) {
+ case *Error:
+ for _, e := range err.Errors {
+ flatten(e, flatErr)
+ }
+ default:
+ flatErr.Errors = append(flatErr.Errors, err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go
new file mode 100644
index 000000000..6c7a3cc91
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/format.go
@@ -0,0 +1,27 @@
+package multierror
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ErrorFormatFunc is a function callback that is called by Error to
+// turn the list of errors into a string.
+type ErrorFormatFunc func([]error) string
+
+// ListFormatFunc is a basic formatter that outputs the number of errors
+// that occurred along with a bullet point list of the errors.
+func ListFormatFunc(es []error) string {
+ if len(es) == 1 {
+ return fmt.Sprintf("1 error occurred:\n\n* %s", es[0])
+ }
+
+ points := make([]string, len(es))
+ for i, err := range es {
+ points[i] = fmt.Sprintf("* %s", err)
+ }
+
+ return fmt.Sprintf(
+ "%d errors occurred:\n\n%s",
+ len(es), strings.Join(points, "\n"))
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go
new file mode 100644
index 000000000..89b1422d1
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/multierror.go
@@ -0,0 +1,51 @@
+package multierror
+
+import (
+ "fmt"
+)
+
+// Error is an error type to track multiple errors. This is used to
+// accumulate errors in cases and return them as a single "error".
+type Error struct {
+ Errors []error
+ ErrorFormat ErrorFormatFunc
+}
+
+func (e *Error) Error() string {
+ fn := e.ErrorFormat
+ if fn == nil {
+ fn = ListFormatFunc
+ }
+
+ return fn(e.Errors)
+}
+
+// ErrorOrNil returns an error interface if this Error represents
+// a list of errors, or returns nil if the list of errors is empty. This
+// function is useful at the end of accumulation to make sure that the value
+// returned represents the existence of errors.
+func (e *Error) ErrorOrNil() error {
+ if e == nil {
+ return nil
+ }
+ if len(e.Errors) == 0 {
+ return nil
+ }
+
+ return e
+}
+
+func (e *Error) GoString() string {
+ return fmt.Sprintf("*%#v", *e)
+}
+
+// WrappedErrors returns the list of errors that this Error is wrapping.
+// It is an implementation of the errwrap.Wrapper interface so that
+// multierror.Error can be used with that library.
+//
+// This method is not safe to be called concurrently and is no different
+// than accessing the Errors field directly. It is implemented only to
+// satisfy the errwrap.Wrapper interface.
+func (e *Error) WrappedErrors() []error {
+ return e.Errors
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go
new file mode 100644
index 000000000..5c477abe4
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/prefix.go
@@ -0,0 +1,37 @@
+package multierror
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/errwrap"
+)
+
+// Prefix is a helper function that will prefix some text
+// to the given error. If the error is a multierror.Error, then
+// it will be prefixed to each wrapped error.
+//
+// This is useful to use when appending multiple multierrors
+// together in order to give better scoping.
+func Prefix(err error, prefix string) error {
+ if err == nil {
+ return nil
+ }
+
+ format := fmt.Sprintf("%s {{err}}", prefix)
+ switch err := err.(type) {
+ case *Error:
+ // Typed nils can reach here, so initialize if we are nil
+ if err == nil {
+ err = new(Error)
+ }
+
+ // Wrap each of the errors
+ for i, e := range err.Errors {
+ err.Errors[i] = errwrap.Wrapf(format, e)
+ }
+
+ return err
+ default:
+ return errwrap.Wrapf(format, err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go
new file mode 100644
index 000000000..337d96329
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/2q.go
@@ -0,0 +1,212 @@
+package lru
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/hashicorp/golang-lru/simplelru"
+)
+
+const (
+ // Default2QRecentRatio is the ratio of the 2Q cache dedicated
+ // to recently added entries that have only been accessed once.
+ Default2QRecentRatio = 0.25
+
+ // Default2QGhostEntries is the default ratio of ghost
+ // entries kept to track entries recently evicted
+ Default2QGhostEntries = 0.50
+)
+
+// TwoQueueCache is a thread-safe fixed size 2Q cache.
+// 2Q is an enhancement over the standard LRU cache
+// in that it tracks both frequently and recently used
+// entries separately. This avoids a burst in access to new
+// entries from evicting frequently used entries. It adds some
+// additional tracking overhead to the standard LRU cache, and is
+// computationally about 2x the cost, and adds some metadata over
+// head. The ARCCache is similar, but does not require setting any
+// parameters.
+type TwoQueueCache struct {
+ size int
+ recentSize int
+
+ recent *simplelru.LRU
+ frequent *simplelru.LRU
+ recentEvict *simplelru.LRU
+ lock sync.RWMutex
+}
+
+// New2Q creates a new TwoQueueCache using the default
+// values for the parameters.
+func New2Q(size int) (*TwoQueueCache, error) {
+ return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries)
+}
+
+// New2QParams creates a new TwoQueueCache using the provided
+// parameter values.
+func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) {
+ if size <= 0 {
+ return nil, fmt.Errorf("invalid size")
+ }
+ if recentRatio < 0.0 || recentRatio > 1.0 {
+ return nil, fmt.Errorf("invalid recent ratio")
+ }
+ if ghostRatio < 0.0 || ghostRatio > 1.0 {
+ return nil, fmt.Errorf("invalid ghost ratio")
+ }
+
+ // Determine the sub-sizes
+ recentSize := int(float64(size) * recentRatio)
+ evictSize := int(float64(size) * ghostRatio)
+
+ // Allocate the LRUs
+ recent, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ frequent, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ recentEvict, err := simplelru.NewLRU(evictSize, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Initialize the cache
+ c := &TwoQueueCache{
+ size: size,
+ recentSize: recentSize,
+ recent: recent,
+ frequent: frequent,
+ recentEvict: recentEvict,
+ }
+ return c, nil
+}
+
+func (c *TwoQueueCache) Get(key interface{}) (interface{}, bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Check if this is a frequent value
+ if val, ok := c.frequent.Get(key); ok {
+ return val, ok
+ }
+
+ // If the value is contained in recent, then we
+ // promote it to frequent
+ if val, ok := c.recent.Peek(key); ok {
+ c.recent.Remove(key)
+ c.frequent.Add(key, val)
+ return val, ok
+ }
+
+ // No hit
+ return nil, false
+}
+
+func (c *TwoQueueCache) Add(key, value interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Check if the value is frequently used already,
+ // and just update the value
+ if c.frequent.Contains(key) {
+ c.frequent.Add(key, value)
+ return
+ }
+
+ // Check if the value is recently used, and promote
+ // the value into the frequent list
+ if c.recent.Contains(key) {
+ c.recent.Remove(key)
+ c.frequent.Add(key, value)
+ return
+ }
+
+ // If the value was recently evicted, add it to the
+ // frequently used list
+ if c.recentEvict.Contains(key) {
+ c.ensureSpace(true)
+ c.recentEvict.Remove(key)
+ c.frequent.Add(key, value)
+ return
+ }
+
+ // Add to the recently seen list
+ c.ensureSpace(false)
+ c.recent.Add(key, value)
+ return
+}
+
+// ensureSpace is used to ensure we have space in the cache
+func (c *TwoQueueCache) ensureSpace(recentEvict bool) {
+ // If we have space, nothing to do
+ recentLen := c.recent.Len()
+ freqLen := c.frequent.Len()
+ if recentLen+freqLen < c.size {
+ return
+ }
+
+ // If the recent buffer is larger than
+ // the target, evict from there
+ if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) {
+ k, _, _ := c.recent.RemoveOldest()
+ c.recentEvict.Add(k, nil)
+ return
+ }
+
+ // Remove from the frequent list otherwise
+ c.frequent.RemoveOldest()
+}
+
+func (c *TwoQueueCache) Len() int {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.recent.Len() + c.frequent.Len()
+}
+
+func (c *TwoQueueCache) Keys() []interface{} {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ k1 := c.frequent.Keys()
+ k2 := c.recent.Keys()
+ return append(k1, k2...)
+}
+
+func (c *TwoQueueCache) Remove(key interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if c.frequent.Remove(key) {
+ return
+ }
+ if c.recent.Remove(key) {
+ return
+ }
+ if c.recentEvict.Remove(key) {
+ return
+ }
+}
+
+func (c *TwoQueueCache) Purge() {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ c.recent.Purge()
+ c.frequent.Purge()
+ c.recentEvict.Purge()
+}
+
+func (c *TwoQueueCache) Contains(key interface{}) bool {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.frequent.Contains(key) || c.recent.Contains(key)
+}
+
+func (c *TwoQueueCache) Peek(key interface{}) (interface{}, bool) {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ if val, ok := c.frequent.Peek(key); ok {
+ return val, ok
+ }
+ return c.recent.Peek(key)
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE
new file mode 100644
index 000000000..be2cc4dfb
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/LICENSE
@@ -0,0 +1,362 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md
new file mode 100644
index 000000000..33e58cfaf
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/README.md
@@ -0,0 +1,25 @@
+golang-lru
+==========
+
+This provides the `lru` package which implements a fixed-size
+thread safe LRU cache. It is based on the cache in Groupcache.
+
+Documentation
+=============
+
+Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru)
+
+Example
+=======
+
+Using the LRU is very simple:
+
+```go
+l, _ := New(128)
+for i := 0; i < 256; i++ {
+ l.Add(i, nil)
+}
+if l.Len() != 128 {
+ panic(fmt.Sprintf("bad len: %v", l.Len()))
+}
+```
diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go
new file mode 100644
index 000000000..a2a252817
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/arc.go
@@ -0,0 +1,257 @@
+package lru
+
+import (
+ "sync"
+
+ "github.com/hashicorp/golang-lru/simplelru"
+)
+
+// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
+// ARC is an enhancement over the standard LRU cache in that tracks both
+// frequency and recency of use. This avoids a burst in access to new
+// entries from evicting the frequently used older entries. It adds some
+// additional tracking overhead to a standard LRU cache, computationally
+// it is roughly 2x the cost, and the extra memory overhead is linear
+// with the size of the cache. ARC has been patented by IBM, but is
+// similar to the TwoQueueCache (2Q) which requires setting parameters.
+type ARCCache struct {
+ size int // Size is the total capacity of the cache
+ p int // P is the dynamic preference towards T1 or T2
+
+ t1 *simplelru.LRU // T1 is the LRU for recently accessed items
+ b1 *simplelru.LRU // B1 is the LRU for evictions from t1
+
+ t2 *simplelru.LRU // T2 is the LRU for frequently accessed items
+ b2 *simplelru.LRU // B2 is the LRU for evictions from t2
+
+ lock sync.RWMutex
+}
+
+// NewARC creates an ARC of the given size
+func NewARC(size int) (*ARCCache, error) {
+ // Create the sub LRUs
+ b1, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ t1, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ t2, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Initialize the ARC
+ c := &ARCCache{
+ size: size,
+ p: 0,
+ t1: t1,
+ b1: b1,
+ t2: t2,
+ b2: b2,
+ }
+ return c, nil
+}
+
+// Get looks up a key's value from the cache.
+func (c *ARCCache) Get(key interface{}) (interface{}, bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Ff the value is contained in T1 (recent), then
+ // promote it to T2 (frequent)
+ if val, ok := c.t1.Peek(key); ok {
+ c.t1.Remove(key)
+ c.t2.Add(key, val)
+ return val, ok
+ }
+
+ // Check if the value is contained in T2 (frequent)
+ if val, ok := c.t2.Get(key); ok {
+ return val, ok
+ }
+
+ // No hit
+ return nil, false
+}
+
+// Add adds a value to the cache.
+func (c *ARCCache) Add(key, value interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Check if the value is contained in T1 (recent), and potentially
+ // promote it to frequent T2
+ if c.t1.Contains(key) {
+ c.t1.Remove(key)
+ c.t2.Add(key, value)
+ return
+ }
+
+ // Check if the value is already in T2 (frequent) and update it
+ if c.t2.Contains(key) {
+ c.t2.Add(key, value)
+ return
+ }
+
+ // Check if this value was recently evicted as part of the
+ // recently used list
+ if c.b1.Contains(key) {
+ // T1 set is too small, increase P appropriately
+ delta := 1
+ b1Len := c.b1.Len()
+ b2Len := c.b2.Len()
+ if b2Len > b1Len {
+ delta = b2Len / b1Len
+ }
+ if c.p+delta >= c.size {
+ c.p = c.size
+ } else {
+ c.p += delta
+ }
+
+ // Potentially need to make room in the cache
+ if c.t1.Len()+c.t2.Len() >= c.size {
+ c.replace(false)
+ }
+
+ // Remove from B1
+ c.b1.Remove(key)
+
+ // Add the key to the frequently used list
+ c.t2.Add(key, value)
+ return
+ }
+
+ // Check if this value was recently evicted as part of the
+ // frequently used list
+ if c.b2.Contains(key) {
+ // T2 set is too small, decrease P appropriately
+ delta := 1
+ b1Len := c.b1.Len()
+ b2Len := c.b2.Len()
+ if b1Len > b2Len {
+ delta = b1Len / b2Len
+ }
+ if delta >= c.p {
+ c.p = 0
+ } else {
+ c.p -= delta
+ }
+
+ // Potentially need to make room in the cache
+ if c.t1.Len()+c.t2.Len() >= c.size {
+ c.replace(true)
+ }
+
+ // Remove from B2
+ c.b2.Remove(key)
+
+ // Add the key to the frequntly used list
+ c.t2.Add(key, value)
+ return
+ }
+
+ // Potentially need to make room in the cache
+ if c.t1.Len()+c.t2.Len() >= c.size {
+ c.replace(false)
+ }
+
+ // Keep the size of the ghost buffers trim
+ if c.b1.Len() > c.size-c.p {
+ c.b1.RemoveOldest()
+ }
+ if c.b2.Len() > c.p {
+ c.b2.RemoveOldest()
+ }
+
+ // Add to the recently seen list
+ c.t1.Add(key, value)
+ return
+}
+
+// replace is used to adaptively evict from either T1 or T2
+// based on the current learned value of P
+func (c *ARCCache) replace(b2ContainsKey bool) {
+ t1Len := c.t1.Len()
+ if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) {
+ k, _, ok := c.t1.RemoveOldest()
+ if ok {
+ c.b1.Add(k, nil)
+ }
+ } else {
+ k, _, ok := c.t2.RemoveOldest()
+ if ok {
+ c.b2.Add(k, nil)
+ }
+ }
+}
+
+// Len returns the number of cached entries
+func (c *ARCCache) Len() int {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.t1.Len() + c.t2.Len()
+}
+
+// Keys returns all the cached keys
+func (c *ARCCache) Keys() []interface{} {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ k1 := c.t1.Keys()
+ k2 := c.t2.Keys()
+ return append(k1, k2...)
+}
+
+// Remove is used to purge a key from the cache
+func (c *ARCCache) Remove(key interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if c.t1.Remove(key) {
+ return
+ }
+ if c.t2.Remove(key) {
+ return
+ }
+ if c.b1.Remove(key) {
+ return
+ }
+ if c.b2.Remove(key) {
+ return
+ }
+}
+
+// Purge is used to clear the cache
+func (c *ARCCache) Purge() {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ c.t1.Purge()
+ c.t2.Purge()
+ c.b1.Purge()
+ c.b2.Purge()
+}
+
+// Contains is used to check if the cache contains a key
+// without updating recency or frequency.
+func (c *ARCCache) Contains(key interface{}) bool {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.t1.Contains(key) || c.t2.Contains(key)
+}
+
+// Peek is used to inspect the cache value of a key
+// without updating recency or frequency.
+func (c *ARCCache) Peek(key interface{}) (interface{}, bool) {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ if val, ok := c.t1.Peek(key); ok {
+ return val, ok
+ }
+ return c.t2.Peek(key)
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go
new file mode 100644
index 000000000..a6285f989
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/lru.go
@@ -0,0 +1,114 @@
+// This package provides a simple LRU cache. It is based on the
+// LRU implementation in groupcache:
+// https://github.com/golang/groupcache/tree/master/lru
+package lru
+
+import (
+ "sync"
+
+ "github.com/hashicorp/golang-lru/simplelru"
+)
+
+// Cache is a thread-safe fixed size LRU cache.
+type Cache struct {
+ lru *simplelru.LRU
+ lock sync.RWMutex
+}
+
+// New creates an LRU of the given size
+func New(size int) (*Cache, error) {
+ return NewWithEvict(size, nil)
+}
+
+// NewWithEvict constructs a fixed size cache with the given eviction
+// callback.
+func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) {
+ lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted))
+ if err != nil {
+ return nil, err
+ }
+ c := &Cache{
+ lru: lru,
+ }
+ return c, nil
+}
+
+// Purge is used to completely clear the cache
+func (c *Cache) Purge() {
+ c.lock.Lock()
+ c.lru.Purge()
+ c.lock.Unlock()
+}
+
+// Add adds a value to the cache. Returns true if an eviction occurred.
+func (c *Cache) Add(key, value interface{}) bool {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ return c.lru.Add(key, value)
+}
+
+// Get looks up a key's value from the cache.
+func (c *Cache) Get(key interface{}) (interface{}, bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ return c.lru.Get(key)
+}
+
+// Check if a key is in the cache, without updating the recent-ness
+// or deleting it for being stale.
+func (c *Cache) Contains(key interface{}) bool {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.lru.Contains(key)
+}
+
+// Returns the key value (or undefined if not found) without updating
+// the "recently used"-ness of the key.
+func (c *Cache) Peek(key interface{}) (interface{}, bool) {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.lru.Peek(key)
+}
+
+// ContainsOrAdd checks if a key is in the cache without updating the
+// recent-ness or deleting it for being stale, and if not, adds the value.
+// Returns whether found and whether an eviction occurred.
+func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evict bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if c.lru.Contains(key) {
+ return true, false
+ } else {
+ evict := c.lru.Add(key, value)
+ return false, evict
+ }
+}
+
+// Remove removes the provided key from the cache.
+func (c *Cache) Remove(key interface{}) {
+ c.lock.Lock()
+ c.lru.Remove(key)
+ c.lock.Unlock()
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *Cache) RemoveOldest() {
+ c.lock.Lock()
+ c.lru.RemoveOldest()
+ c.lock.Unlock()
+}
+
+// Keys returns a slice of the keys in the cache, from oldest to newest.
+func (c *Cache) Keys() []interface{} {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.lru.Keys()
+}
+
+// Len returns the number of items in the cache.
+func (c *Cache) Len() int {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.lru.Len()
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
new file mode 100644
index 000000000..cb416b394
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
@@ -0,0 +1,160 @@
+package simplelru
+
+import (
+ "container/list"
+ "errors"
+)
+
+// EvictCallback is used to get a callback when a cache entry is evicted
+type EvictCallback func(key interface{}, value interface{})
+
+// LRU implements a non-thread safe fixed size LRU cache
+type LRU struct {
+ size int
+ evictList *list.List
+ items map[interface{}]*list.Element
+ onEvict EvictCallback
+}
+
+// entry is used to hold a value in the evictList
+type entry struct {
+ key interface{}
+ value interface{}
+}
+
+// NewLRU constructs an LRU of the given size
+func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
+ if size <= 0 {
+ return nil, errors.New("Must provide a positive size")
+ }
+ c := &LRU{
+ size: size,
+ evictList: list.New(),
+ items: make(map[interface{}]*list.Element),
+ onEvict: onEvict,
+ }
+ return c, nil
+}
+
+// Purge is used to completely clear the cache
+func (c *LRU) Purge() {
+ for k, v := range c.items {
+ if c.onEvict != nil {
+ c.onEvict(k, v.Value.(*entry).value)
+ }
+ delete(c.items, k)
+ }
+ c.evictList.Init()
+}
+
+// Add adds a value to the cache. Returns true if an eviction occurred.
+func (c *LRU) Add(key, value interface{}) bool {
+ // Check for existing item
+ if ent, ok := c.items[key]; ok {
+ c.evictList.MoveToFront(ent)
+ ent.Value.(*entry).value = value
+ return false
+ }
+
+ // Add new item
+ ent := &entry{key, value}
+ entry := c.evictList.PushFront(ent)
+ c.items[key] = entry
+
+ evict := c.evictList.Len() > c.size
+ // Verify size not exceeded
+ if evict {
+ c.removeOldest()
+ }
+ return evict
+}
+
+// Get looks up a key's value from the cache.
+func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
+ if ent, ok := c.items[key]; ok {
+ c.evictList.MoveToFront(ent)
+ return ent.Value.(*entry).value, true
+ }
+ return
+}
+
+// Check if a key is in the cache, without updating the recent-ness
+// or deleting it for being stale.
+func (c *LRU) Contains(key interface{}) (ok bool) {
+ _, ok = c.items[key]
+ return ok
+}
+
+// Returns the key value (or undefined if not found) without updating
+// the "recently used"-ness of the key.
+func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
+ if ent, ok := c.items[key]; ok {
+ return ent.Value.(*entry).value, true
+ }
+ return nil, ok
+}
+
+// Remove removes the provided key from the cache, returning if the
+// key was contained.
+func (c *LRU) Remove(key interface{}) bool {
+ if ent, ok := c.items[key]; ok {
+ c.removeElement(ent)
+ return true
+ }
+ return false
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *LRU) RemoveOldest() (interface{}, interface{}, bool) {
+ ent := c.evictList.Back()
+ if ent != nil {
+ c.removeElement(ent)
+ kv := ent.Value.(*entry)
+ return kv.key, kv.value, true
+ }
+ return nil, nil, false
+}
+
+// GetOldest returns the oldest entry
+func (c *LRU) GetOldest() (interface{}, interface{}, bool) {
+ ent := c.evictList.Back()
+ if ent != nil {
+ kv := ent.Value.(*entry)
+ return kv.key, kv.value, true
+ }
+ return nil, nil, false
+}
+
+// Keys returns a slice of the keys in the cache, from oldest to newest.
+func (c *LRU) Keys() []interface{} {
+ keys := make([]interface{}, len(c.items))
+ i := 0
+ for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
+ keys[i] = ent.Value.(*entry).key
+ i++
+ }
+ return keys
+}
+
+// Len returns the number of items in the cache.
+func (c *LRU) Len() int {
+ return c.evictList.Len()
+}
+
+// removeOldest removes the oldest item from the cache.
+func (c *LRU) removeOldest() {
+ ent := c.evictList.Back()
+ if ent != nil {
+ c.removeElement(ent)
+ }
+}
+
+// removeElement is used to remove a given list element from the cache
+func (c *LRU) removeElement(e *list.Element) {
+ c.evictList.Remove(e)
+ kv := e.Value.(*entry)
+ delete(c.items, kv.key)
+ if c.onEvict != nil {
+ c.onEvict(kv.key, kv.value)
+ }
+}
diff --git a/vendor/github.com/hpcloud/tail/LICENSE.txt b/vendor/github.com/hpcloud/tail/LICENSE.txt
new file mode 100644
index 000000000..818d802a5
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/LICENSE.txt
@@ -0,0 +1,21 @@
+# The MIT License (MIT)
+
+# © Copyright 2015 Hewlett Packard Enterprise Development LP
+Copyright (c) 2014 ActiveState
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/hpcloud/tail/README.md b/vendor/github.com/hpcloud/tail/README.md
new file mode 100644
index 000000000..fb7fbc26c
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/README.md
@@ -0,0 +1,28 @@
+[![Build Status](https://travis-ci.org/hpcloud/tail.svg)](https://travis-ci.org/hpcloud/tail)
+[![Build status](https://ci.appveyor.com/api/projects/status/kohpsf3rvhjhrox6?svg=true)](https://ci.appveyor.com/project/HelionCloudFoundry/tail)
+
+# Go package for tail-ing files
+
+A Go package striving to emulate the features of the BSD `tail` program.
+
+```Go
+t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true})
+for line := range t.Lines {
+ fmt.Println(line.Text)
+}
+```
+
+See [API documentation](http://godoc.org/github.com/hpcloud/tail).
+
+## Log rotation
+
+Tail comes with full support for truncation/move detection as it is
+designed to work with log rotation tools.
+
+## Installing
+
+ go get github.com/hpcloud/tail/...
+
+## Windows support
+
+This package [needs assistance](https://github.com/hpcloud/tail/labels/Windows) for full Windows support.
diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go b/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go
new file mode 100644
index 000000000..358b69e7f
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go
@@ -0,0 +1,97 @@
+// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends.
+package ratelimiter
+
+import (
+ "time"
+)
+
+type LeakyBucket struct {
+ Size uint16
+ Fill float64
+ LeakInterval time.Duration // time.Duration for 1 unit of size to leak
+ Lastupdate time.Time
+ Now func() time.Time
+}
+
+func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket {
+ bucket := LeakyBucket{
+ Size: size,
+ Fill: 0,
+ LeakInterval: leakInterval,
+ Now: time.Now,
+ Lastupdate: time.Now(),
+ }
+
+ return &bucket
+}
+
+func (b *LeakyBucket) updateFill() {
+ now := b.Now()
+ if b.Fill > 0 {
+ elapsed := now.Sub(b.Lastupdate)
+
+ b.Fill -= float64(elapsed) / float64(b.LeakInterval)
+ if b.Fill < 0 {
+ b.Fill = 0
+ }
+ }
+ b.Lastupdate = now
+}
+
+func (b *LeakyBucket) Pour(amount uint16) bool {
+ b.updateFill()
+
+ var newfill float64 = b.Fill + float64(amount)
+
+ if newfill > float64(b.Size) {
+ return false
+ }
+
+ b.Fill = newfill
+
+ return true
+}
+
+// The time at which this bucket will be completely drained
+func (b *LeakyBucket) DrainedAt() time.Time {
+ return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval)))
+}
+
+// The duration until this bucket is completely drained
+func (b *LeakyBucket) TimeToDrain() time.Duration {
+ return b.DrainedAt().Sub(b.Now())
+}
+
+func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration {
+ return b.Now().Sub(b.Lastupdate)
+}
+
+type LeakyBucketSer struct {
+ Size uint16
+ Fill float64
+ LeakInterval time.Duration // time.Duration for 1 unit of size to leak
+ Lastupdate time.Time
+}
+
+func (b *LeakyBucket) Serialise() *LeakyBucketSer {
+ bucket := LeakyBucketSer{
+ Size: b.Size,
+ Fill: b.Fill,
+ LeakInterval: b.LeakInterval,
+ Lastupdate: b.Lastupdate,
+ }
+
+ return &bucket
+}
+
+func (b *LeakyBucketSer) DeSerialise() *LeakyBucket {
+ bucket := LeakyBucket{
+ Size: b.Size,
+ Fill: b.Fill,
+ LeakInterval: b.LeakInterval,
+ Lastupdate: b.Lastupdate,
+ Now: time.Now,
+ }
+
+ return &bucket
+}
diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/memory.go b/vendor/github.com/hpcloud/tail/ratelimiter/memory.go
new file mode 100644
index 000000000..8f6a5784a
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/ratelimiter/memory.go
@@ -0,0 +1,58 @@
+package ratelimiter
+
+import (
+ "errors"
+ "time"
+)
+
+const GC_SIZE int = 100
+
+type Memory struct {
+ store map[string]LeakyBucket
+ lastGCCollected time.Time
+}
+
+func NewMemory() *Memory {
+ m := new(Memory)
+ m.store = make(map[string]LeakyBucket)
+ m.lastGCCollected = time.Now()
+ return m
+}
+
+func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) {
+
+ bucket, ok := m.store[key]
+ if !ok {
+ return nil, errors.New("miss")
+ }
+
+ return &bucket, nil
+}
+
+func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error {
+
+ if len(m.store) > GC_SIZE {
+ m.GarbageCollect()
+ }
+
+ m.store[key] = bucket
+
+ return nil
+}
+
+func (m *Memory) GarbageCollect() {
+ now := time.Now()
+
+ // rate limit GC to once per minute
+ if now.Add(60*time.Second).Unix() > m.lastGCCollected.Unix() {
+
+ for key, bucket := range m.store {
+ // if the bucket is drained, then GC
+ if bucket.DrainedAt().Unix() > now.Unix() {
+ delete(m.store, key)
+ }
+ }
+
+ m.lastGCCollected = now
+ }
+}
diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/storage.go b/vendor/github.com/hpcloud/tail/ratelimiter/storage.go
new file mode 100644
index 000000000..89b2fe882
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/ratelimiter/storage.go
@@ -0,0 +1,6 @@
+package ratelimiter
+
+type Storage interface {
+ GetBucketFor(string) (*LeakyBucket, error)
+ SetBucketFor(string, LeakyBucket) error
+}
diff --git a/vendor/github.com/hpcloud/tail/tail.go b/vendor/github.com/hpcloud/tail/tail.go
new file mode 100644
index 000000000..2d252d605
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/tail.go
@@ -0,0 +1,438 @@
+// Copyright (c) 2015 HPE Software Inc. All rights reserved.
+// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+
+package tail
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/hpcloud/tail/ratelimiter"
+ "github.com/hpcloud/tail/util"
+ "github.com/hpcloud/tail/watch"
+ "gopkg.in/tomb.v1"
+)
+
+var (
+ ErrStop = fmt.Errorf("tail should now stop")
+)
+
+type Line struct {
+ Text string
+ Time time.Time
+ Err error // Error from tail
+}
+
+// NewLine returns a Line with present time.
+func NewLine(text string) *Line {
+ return &Line{text, time.Now(), nil}
+}
+
+// SeekInfo represents arguments to `os.Seek`
+type SeekInfo struct {
+ Offset int64
+ Whence int // os.SEEK_*
+}
+
+type logger interface {
+ Fatal(v ...interface{})
+ Fatalf(format string, v ...interface{})
+ Fatalln(v ...interface{})
+ Panic(v ...interface{})
+ Panicf(format string, v ...interface{})
+ Panicln(v ...interface{})
+ Print(v ...interface{})
+ Printf(format string, v ...interface{})
+ Println(v ...interface{})
+}
+
+// Config is used to specify how a file must be tailed.
+type Config struct {
+ // File-specifc
+ Location *SeekInfo // Seek to this location before tailing
+ ReOpen bool // Reopen recreated files (tail -F)
+ MustExist bool // Fail early if the file does not exist
+ Poll bool // Poll for file changes instead of using inotify
+ Pipe bool // Is a named pipe (mkfifo)
+ RateLimiter *ratelimiter.LeakyBucket
+
+ // Generic IO
+ Follow bool // Continue looking for new lines (tail -f)
+ MaxLineSize int // If non-zero, split longer lines into multiple lines
+
+ // Logger, when nil, is set to tail.DefaultLogger
+ // To disable logging: set field to tail.DiscardingLogger
+ Logger logger
+}
+
+type Tail struct {
+ Filename string
+ Lines chan *Line
+ Config
+
+ file *os.File
+ reader *bufio.Reader
+
+ watcher watch.FileWatcher
+ changes *watch.FileChanges
+
+ tomb.Tomb // provides: Done, Kill, Dying
+
+ lk sync.Mutex
+}
+
+var (
+ // DefaultLogger is used when Config.Logger == nil
+ DefaultLogger = log.New(os.Stderr, "", log.LstdFlags)
+ // DiscardingLogger can be used to disable logging output
+ DiscardingLogger = log.New(ioutil.Discard, "", 0)
+)
+
+// TailFile begins tailing the file. Output stream is made available
+// via the `Tail.Lines` channel. To handle errors during tailing,
+// invoke the `Wait` or `Err` method after finishing reading from the
+// `Lines` channel.
+func TailFile(filename string, config Config) (*Tail, error) {
+ if config.ReOpen && !config.Follow {
+ util.Fatal("cannot set ReOpen without Follow.")
+ }
+
+ t := &Tail{
+ Filename: filename,
+ Lines: make(chan *Line),
+ Config: config,
+ }
+
+ // when Logger was not specified in config, use default logger
+ if t.Logger == nil {
+ t.Logger = log.New(os.Stderr, "", log.LstdFlags)
+ }
+
+ if t.Poll {
+ t.watcher = watch.NewPollingFileWatcher(filename)
+ } else {
+ t.watcher = watch.NewInotifyFileWatcher(filename)
+ }
+
+ if t.MustExist {
+ var err error
+ t.file, err = OpenFile(t.Filename)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ go t.tailFileSync()
+
+ return t, nil
+}
+
+// Return the file's current position, like stdio's ftell().
+// But this value is not very accurate.
+// it may readed one line in the chan(tail.Lines),
+// so it may lost one line.
+func (tail *Tail) Tell() (offset int64, err error) {
+ if tail.file == nil {
+ return
+ }
+ offset, err = tail.file.Seek(0, os.SEEK_CUR)
+ if err != nil {
+ return
+ }
+
+ tail.lk.Lock()
+ defer tail.lk.Unlock()
+ if tail.reader == nil {
+ return
+ }
+
+ offset -= int64(tail.reader.Buffered())
+ return
+}
+
+// Stop stops the tailing activity.
+func (tail *Tail) Stop() error {
+ tail.Kill(nil)
+ return tail.Wait()
+}
+
+// StopAtEOF stops tailing as soon as the end of the file is reached.
+func (tail *Tail) StopAtEOF() error {
+ tail.Kill(errStopAtEOF)
+ return tail.Wait()
+}
+
+var errStopAtEOF = errors.New("tail: stop at eof")
+
+func (tail *Tail) close() {
+ close(tail.Lines)
+ tail.closeFile()
+}
+
+func (tail *Tail) closeFile() {
+ if tail.file != nil {
+ tail.file.Close()
+ tail.file = nil
+ }
+}
+
+func (tail *Tail) reopen() error {
+ tail.closeFile()
+ for {
+ var err error
+ tail.file, err = OpenFile(tail.Filename)
+ if err != nil {
+ if os.IsNotExist(err) {
+ tail.Logger.Printf("Waiting for %s to appear...", tail.Filename)
+ if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil {
+ if err == tomb.ErrDying {
+ return err
+ }
+ return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err)
+ }
+ continue
+ }
+ return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err)
+ }
+ break
+ }
+ return nil
+}
+
+func (tail *Tail) readLine() (string, error) {
+ tail.lk.Lock()
+ line, err := tail.reader.ReadString('\n')
+ tail.lk.Unlock()
+ if err != nil {
+ // Note ReadString "returns the data read before the error" in
+ // case of an error, including EOF, so we return it as is. The
+ // caller is expected to process it if err is EOF.
+ return line, err
+ }
+
+ line = strings.TrimRight(line, "\n")
+
+ return line, err
+}
+
+func (tail *Tail) tailFileSync() {
+ defer tail.Done()
+ defer tail.close()
+
+ if !tail.MustExist {
+ // deferred first open.
+ err := tail.reopen()
+ if err != nil {
+ if err != tomb.ErrDying {
+ tail.Kill(err)
+ }
+ return
+ }
+ }
+
+ // Seek to requested location on first open of the file.
+ if tail.Location != nil {
+ _, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence)
+ tail.Logger.Printf("Seeked %s - %+v\n", tail.Filename, tail.Location)
+ if err != nil {
+ tail.Killf("Seek error on %s: %s", tail.Filename, err)
+ return
+ }
+ }
+
+ tail.openReader()
+
+ var offset int64 = 0
+ var err error
+
+ // Read line by line.
+ for {
+ // do not seek in named pipes
+ if !tail.Pipe {
+ // grab the position in case we need to back up in the event of a half-line
+ offset, err = tail.Tell()
+ if err != nil {
+ tail.Kill(err)
+ return
+ }
+ }
+
+ line, err := tail.readLine()
+
+ // Process `line` even if err is EOF.
+ if err == nil {
+ cooloff := !tail.sendLine(line)
+ if cooloff {
+ // Wait a second before seeking till the end of
+ // file when rate limit is reached.
+ msg := fmt.Sprintf(
+ "Too much log activity; waiting a second " +
+ "before resuming tailing")
+ tail.Lines <- &Line{msg, time.Now(), fmt.Errorf(msg)}
+ select {
+ case <-time.After(time.Second):
+ case <-tail.Dying():
+ return
+ }
+ if err := tail.seekEnd(); err != nil {
+ tail.Kill(err)
+ return
+ }
+ }
+ } else if err == io.EOF {
+ if !tail.Follow {
+ if line != "" {
+ tail.sendLine(line)
+ }
+ return
+ }
+
+ if tail.Follow && line != "" {
+ // this has the potential to never return the last line if
+ // it's not followed by a newline; seems a fair trade here
+ err := tail.seekTo(SeekInfo{Offset: offset, Whence: 0})
+ if err != nil {
+ tail.Kill(err)
+ return
+ }
+ }
+
+ // When EOF is reached, wait for more data to become
+ // available. Wait strategy is based on the `tail.watcher`
+ // implementation (inotify or polling).
+ err := tail.waitForChanges()
+ if err != nil {
+ if err != ErrStop {
+ tail.Kill(err)
+ }
+ return
+ }
+ } else {
+ // non-EOF error
+ tail.Killf("Error reading %s: %s", tail.Filename, err)
+ return
+ }
+
+ select {
+ case <-tail.Dying():
+ if tail.Err() == errStopAtEOF {
+ continue
+ }
+ return
+ default:
+ }
+ }
+}
+
+// waitForChanges waits until the file has been appended, deleted,
+// moved or truncated. When moved or deleted - the file will be
+// reopened if ReOpen is true. Truncated files are always reopened.
+func (tail *Tail) waitForChanges() error {
+ if tail.changes == nil {
+ pos, err := tail.file.Seek(0, os.SEEK_CUR)
+ if err != nil {
+ return err
+ }
+ tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos)
+ if err != nil {
+ return err
+ }
+ }
+
+ select {
+ case <-tail.changes.Modified:
+ return nil
+ case <-tail.changes.Deleted:
+ tail.changes = nil
+ if tail.ReOpen {
+ // XXX: we must not log from a library.
+ tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename)
+ if err := tail.reopen(); err != nil {
+ return err
+ }
+ tail.Logger.Printf("Successfully reopened %s", tail.Filename)
+ tail.openReader()
+ return nil
+ } else {
+ tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename)
+ return ErrStop
+ }
+ case <-tail.changes.Truncated:
+ // Always reopen truncated files (Follow is true)
+ tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename)
+ if err := tail.reopen(); err != nil {
+ return err
+ }
+ tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename)
+ tail.openReader()
+ return nil
+ case <-tail.Dying():
+ return ErrStop
+ }
+ panic("unreachable")
+}
+
+func (tail *Tail) openReader() {
+ if tail.MaxLineSize > 0 {
+ // add 2 to account for newline characters
+ tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2)
+ } else {
+ tail.reader = bufio.NewReader(tail.file)
+ }
+}
+
+func (tail *Tail) seekEnd() error {
+ return tail.seekTo(SeekInfo{Offset: 0, Whence: os.SEEK_END})
+}
+
+func (tail *Tail) seekTo(pos SeekInfo) error {
+ _, err := tail.file.Seek(pos.Offset, pos.Whence)
+ if err != nil {
+ return fmt.Errorf("Seek error on %s: %s", tail.Filename, err)
+ }
+ // Reset the read buffer whenever the file is re-seek'ed
+ tail.reader.Reset(tail.file)
+ return nil
+}
+
+// sendLine sends the line(s) to Lines channel, splitting longer lines
+// if necessary. Return false if rate limit is reached.
+func (tail *Tail) sendLine(line string) bool {
+ now := time.Now()
+ lines := []string{line}
+
+ // Split longer lines
+ if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize {
+ lines = util.PartitionString(line, tail.MaxLineSize)
+ }
+
+ for _, line := range lines {
+ tail.Lines <- &Line{line, now, nil}
+ }
+
+ if tail.Config.RateLimiter != nil {
+ ok := tail.Config.RateLimiter.Pour(uint16(len(lines)))
+ if !ok {
+ tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.\n",
+ tail.Filename)
+ return false
+ }
+ }
+
+ return true
+}
+
+// Cleanup removes inotify watches added by the tail package. This function is
+// meant to be invoked from a process's exit handler. Linux kernel may not
+// automatically remove inotify watches after the process exits.
+func (tail *Tail) Cleanup() {
+ watch.Cleanup(tail.Filename)
+}
diff --git a/vendor/github.com/hpcloud/tail/tail_posix.go b/vendor/github.com/hpcloud/tail/tail_posix.go
new file mode 100644
index 000000000..bc4dc3357
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/tail_posix.go
@@ -0,0 +1,11 @@
+// +build linux darwin freebsd netbsd openbsd
+
+package tail
+
+import (
+ "os"
+)
+
+func OpenFile(name string) (file *os.File, err error) {
+ return os.Open(name)
+}
diff --git a/vendor/github.com/hpcloud/tail/tail_windows.go b/vendor/github.com/hpcloud/tail/tail_windows.go
new file mode 100644
index 000000000..ef2cfca1b
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/tail_windows.go
@@ -0,0 +1,12 @@
+// +build windows
+
+package tail
+
+import (
+ "github.com/hpcloud/tail/winfile"
+ "os"
+)
+
+func OpenFile(name string) (file *os.File, err error) {
+ return winfile.OpenFile(name, os.O_RDONLY, 0)
+}
diff --git a/vendor/github.com/hpcloud/tail/util/util.go b/vendor/github.com/hpcloud/tail/util/util.go
new file mode 100644
index 000000000..54151fe39
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/util/util.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2015 HPE Software Inc. All rights reserved.
+// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+
+package util
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "runtime/debug"
+)
+
+type Logger struct {
+ *log.Logger
+}
+
+var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)}
+
+// fatal is like panic except it displays only the current goroutine's stack.
+func Fatal(format string, v ...interface{}) {
+ // https://github.com/hpcloud/log/blob/master/log.go#L45
+ LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack()))
+ os.Exit(1)
+}
+
+// partitionString partitions the string into chunks of given size,
+// with the last chunk of variable size.
+func PartitionString(s string, chunkSize int) []string {
+ if chunkSize <= 0 {
+ panic("invalid chunkSize")
+ }
+ length := len(s)
+ chunks := 1 + length/chunkSize
+ start := 0
+ end := chunkSize
+ parts := make([]string, 0, chunks)
+ for {
+ if end > length {
+ end = length
+ }
+ parts = append(parts, s[start:end])
+ if end == length {
+ break
+ }
+ start, end = end, end+chunkSize
+ }
+ return parts
+}
diff --git a/vendor/github.com/hpcloud/tail/watch/filechanges.go b/vendor/github.com/hpcloud/tail/watch/filechanges.go
new file mode 100644
index 000000000..3ce5dcecb
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/watch/filechanges.go
@@ -0,0 +1,36 @@
+package watch
+
+type FileChanges struct {
+ Modified chan bool // Channel to get notified of modifications
+ Truncated chan bool // Channel to get notified of truncations
+ Deleted chan bool // Channel to get notified of deletions/renames
+}
+
+func NewFileChanges() *FileChanges {
+ return &FileChanges{
+ make(chan bool), make(chan bool), make(chan bool)}
+}
+
+func (fc *FileChanges) NotifyModified() {
+ sendOnlyIfEmpty(fc.Modified)
+}
+
+func (fc *FileChanges) NotifyTruncated() {
+ sendOnlyIfEmpty(fc.Truncated)
+}
+
+func (fc *FileChanges) NotifyDeleted() {
+ sendOnlyIfEmpty(fc.Deleted)
+}
+
+// sendOnlyIfEmpty sends on a bool channel only if the channel has no
+// backlog to be read by other goroutines. This concurrency pattern
+// can be used to notify other goroutines if and only if they are
+// looking for it (i.e., subsequent notifications can be compressed
+// into one).
+func sendOnlyIfEmpty(ch chan bool) {
+ select {
+ case ch <- true:
+ default:
+ }
+}
diff --git a/vendor/github.com/hpcloud/tail/watch/inotify.go b/vendor/github.com/hpcloud/tail/watch/inotify.go
new file mode 100644
index 000000000..4478f1e1a
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/watch/inotify.go
@@ -0,0 +1,128 @@
+// Copyright (c) 2015 HPE Software Inc. All rights reserved.
+// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+
+package watch
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/hpcloud/tail/util"
+
+ "gopkg.in/fsnotify.v1"
+ "gopkg.in/tomb.v1"
+)
+
+// InotifyFileWatcher uses inotify to monitor file changes.
+type InotifyFileWatcher struct {
+ Filename string
+ Size int64
+}
+
+func NewInotifyFileWatcher(filename string) *InotifyFileWatcher {
+ fw := &InotifyFileWatcher{filepath.Clean(filename), 0}
+ return fw
+}
+
+func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
+ err := WatchCreate(fw.Filename)
+ if err != nil {
+ return err
+ }
+ defer RemoveWatchCreate(fw.Filename)
+
+ // Do a real check now as the file might have been created before
+ // calling `WatchFlags` above.
+ if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) {
+ // file exists, or stat returned an error.
+ return err
+ }
+
+ events := Events(fw.Filename)
+
+ for {
+ select {
+ case evt, ok := <-events:
+ if !ok {
+ return fmt.Errorf("inotify watcher has been closed")
+ }
+ evtName, err := filepath.Abs(evt.Name)
+ if err != nil {
+ return err
+ }
+ fwFilename, err := filepath.Abs(fw.Filename)
+ if err != nil {
+ return err
+ }
+ if evtName == fwFilename {
+ return nil
+ }
+ case <-t.Dying():
+ return tomb.ErrDying
+ }
+ }
+ panic("unreachable")
+}
+
+func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
+ err := Watch(fw.Filename)
+ if err != nil {
+ return nil, err
+ }
+
+ changes := NewFileChanges()
+ fw.Size = pos
+
+ go func() {
+ defer RemoveWatch(fw.Filename)
+
+ events := Events(fw.Filename)
+
+ for {
+ prevSize := fw.Size
+
+ var evt fsnotify.Event
+ var ok bool
+
+ select {
+ case evt, ok = <-events:
+ if !ok {
+ return
+ }
+ case <-t.Dying():
+ return
+ }
+
+ switch {
+ case evt.Op&fsnotify.Remove == fsnotify.Remove:
+ fallthrough
+
+ case evt.Op&fsnotify.Rename == fsnotify.Rename:
+ changes.NotifyDeleted()
+ return
+
+ case evt.Op&fsnotify.Write == fsnotify.Write:
+ fi, err := os.Stat(fw.Filename)
+ if err != nil {
+ if os.IsNotExist(err) {
+ changes.NotifyDeleted()
+ return
+ }
+ // XXX: report this error back to the user
+ util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
+ }
+ fw.Size = fi.Size()
+
+ if prevSize > 0 && prevSize > fw.Size {
+ changes.NotifyTruncated()
+ } else {
+ changes.NotifyModified()
+ }
+ prevSize = fw.Size
+ }
+ }
+ }()
+
+ return changes, nil
+}
diff --git a/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go b/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go
new file mode 100644
index 000000000..03be4275c
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go
@@ -0,0 +1,260 @@
+// Copyright (c) 2015 HPE Software Inc. All rights reserved.
+// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+
+package watch
+
+import (
+ "log"
+ "os"
+ "path/filepath"
+ "sync"
+ "syscall"
+
+ "github.com/hpcloud/tail/util"
+
+ "gopkg.in/fsnotify.v1"
+)
+
+type InotifyTracker struct {
+ mux sync.Mutex
+ watcher *fsnotify.Watcher
+ chans map[string]chan fsnotify.Event
+ done map[string]chan bool
+ watchNums map[string]int
+ watch chan *watchInfo
+ remove chan *watchInfo
+ error chan error
+}
+
+type watchInfo struct {
+ op fsnotify.Op
+ fname string
+}
+
+func (this *watchInfo) isCreate() bool {
+ return this.op == fsnotify.Create
+}
+
+var (
+ // globally shared InotifyTracker; ensures only one fsnotify.Watcher is used
+ shared *InotifyTracker
+
+ // these are used to ensure the shared InotifyTracker is run exactly once
+ once = sync.Once{}
+ goRun = func() {
+ shared = &InotifyTracker{
+ mux: sync.Mutex{},
+ chans: make(map[string]chan fsnotify.Event),
+ done: make(map[string]chan bool),
+ watchNums: make(map[string]int),
+ watch: make(chan *watchInfo),
+ remove: make(chan *watchInfo),
+ error: make(chan error),
+ }
+ go shared.run()
+ }
+
+ logger = log.New(os.Stderr, "", log.LstdFlags)
+)
+
+// Watch signals the run goroutine to begin watching the input filename
+func Watch(fname string) error {
+ return watch(&watchInfo{
+ fname: fname,
+ })
+}
+
+// Watch create signals the run goroutine to begin watching the input filename
+// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate
+func WatchCreate(fname string) error {
+ return watch(&watchInfo{
+ op: fsnotify.Create,
+ fname: fname,
+ })
+}
+
+func watch(winfo *watchInfo) error {
+ // start running the shared InotifyTracker if not already running
+ once.Do(goRun)
+
+ winfo.fname = filepath.Clean(winfo.fname)
+ shared.watch <- winfo
+ return <-shared.error
+}
+
+// RemoveWatch signals the run goroutine to remove the watch for the input filename
+func RemoveWatch(fname string) {
+ remove(&watchInfo{
+ fname: fname,
+ })
+}
+
+// RemoveWatch create signals the run goroutine to remove the watch for the input filename
+func RemoveWatchCreate(fname string) {
+ remove(&watchInfo{
+ op: fsnotify.Create,
+ fname: fname,
+ })
+}
+
+func remove(winfo *watchInfo) {
+ // start running the shared InotifyTracker if not already running
+ once.Do(goRun)
+
+ winfo.fname = filepath.Clean(winfo.fname)
+ shared.mux.Lock()
+ done := shared.done[winfo.fname]
+ if done != nil {
+ delete(shared.done, winfo.fname)
+ close(done)
+ }
+
+ fname := winfo.fname
+ if winfo.isCreate() {
+ // Watch for new files to be created in the parent directory.
+ fname = filepath.Dir(fname)
+ }
+ shared.watchNums[fname]--
+ watchNum := shared.watchNums[fname]
+ if watchNum == 0 {
+ delete(shared.watchNums, fname)
+ }
+ shared.mux.Unlock()
+
+ // If we were the last ones to watch this file, unsubscribe from inotify.
+ // This needs to happen after releasing the lock because fsnotify waits
+ // synchronously for the kernel to acknowledge the removal of the watch
+ // for this file, which causes us to deadlock if we still held the lock.
+ if watchNum == 0 {
+ shared.watcher.Remove(fname)
+ }
+ shared.remove <- winfo
+}
+
+// Events returns a channel to which FileEvents corresponding to the input filename
+// will be sent. This channel will be closed when removeWatch is called on this
+// filename.
+func Events(fname string) <-chan fsnotify.Event {
+ shared.mux.Lock()
+ defer shared.mux.Unlock()
+
+ return shared.chans[fname]
+}
+
+// Cleanup removes the watch for the input filename if necessary.
+func Cleanup(fname string) {
+ RemoveWatch(fname)
+}
+
+// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating
+// a new Watcher if the previous Watcher was closed.
+func (shared *InotifyTracker) addWatch(winfo *watchInfo) error {
+ shared.mux.Lock()
+ defer shared.mux.Unlock()
+
+ if shared.chans[winfo.fname] == nil {
+ shared.chans[winfo.fname] = make(chan fsnotify.Event)
+ shared.done[winfo.fname] = make(chan bool)
+ }
+
+ fname := winfo.fname
+ if winfo.isCreate() {
+ // Watch for new files to be created in the parent directory.
+ fname = filepath.Dir(fname)
+ }
+
+ // already in inotify watch
+ if shared.watchNums[fname] > 0 {
+ shared.watchNums[fname]++
+ if winfo.isCreate() {
+ shared.watchNums[winfo.fname]++
+ }
+ return nil
+ }
+
+ err := shared.watcher.Add(fname)
+ if err == nil {
+ shared.watchNums[fname]++
+ if winfo.isCreate() {
+ shared.watchNums[winfo.fname]++
+ }
+ }
+ return err
+}
+
+// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the
+// corresponding events channel.
+func (shared *InotifyTracker) removeWatch(winfo *watchInfo) {
+ shared.mux.Lock()
+ defer shared.mux.Unlock()
+
+ ch := shared.chans[winfo.fname]
+ if ch == nil {
+ return
+ }
+
+ delete(shared.chans, winfo.fname)
+ close(ch)
+
+ if !winfo.isCreate() {
+ return
+ }
+
+ shared.watchNums[winfo.fname]--
+ if shared.watchNums[winfo.fname] == 0 {
+ delete(shared.watchNums, winfo.fname)
+ }
+}
+
+// sendEvent sends the input event to the appropriate Tail.
+func (shared *InotifyTracker) sendEvent(event fsnotify.Event) {
+ name := filepath.Clean(event.Name)
+
+ shared.mux.Lock()
+ ch := shared.chans[name]
+ done := shared.done[name]
+ shared.mux.Unlock()
+
+ if ch != nil && done != nil {
+ select {
+ case ch <- event:
+ case <-done:
+ }
+ }
+}
+
+// run starts the goroutine in which the shared struct reads events from its
+// Watcher's Event channel and sends the events to the appropriate Tail.
+func (shared *InotifyTracker) run() {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ util.Fatal("failed to create Watcher")
+ }
+ shared.watcher = watcher
+
+ for {
+ select {
+ case winfo := <-shared.watch:
+ shared.error <- shared.addWatch(winfo)
+
+ case winfo := <-shared.remove:
+ shared.removeWatch(winfo)
+
+ case event, open := <-shared.watcher.Events:
+ if !open {
+ return
+ }
+ shared.sendEvent(event)
+
+ case err, open := <-shared.watcher.Errors:
+ if !open {
+ return
+ } else if err != nil {
+ sysErr, ok := err.(*os.SyscallError)
+ if !ok || sysErr.Err != syscall.EINTR {
+ logger.Printf("Error in Watcher Error channel: %s", err)
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/hpcloud/tail/watch/polling.go b/vendor/github.com/hpcloud/tail/watch/polling.go
new file mode 100644
index 000000000..49491f21d
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/watch/polling.go
@@ -0,0 +1,118 @@
+// Copyright (c) 2015 HPE Software Inc. All rights reserved.
+// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+
+package watch
+
+import (
+ "os"
+ "runtime"
+ "time"
+
+ "github.com/hpcloud/tail/util"
+ "gopkg.in/tomb.v1"
+)
+
+// PollingFileWatcher polls the file for changes.
+type PollingFileWatcher struct {
+ Filename string
+ Size int64
+}
+
+func NewPollingFileWatcher(filename string) *PollingFileWatcher {
+ fw := &PollingFileWatcher{filename, 0}
+ return fw
+}
+
+var POLL_DURATION time.Duration
+
+func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
+ for {
+ if _, err := os.Stat(fw.Filename); err == nil {
+ return nil
+ } else if !os.IsNotExist(err) {
+ return err
+ }
+ select {
+ case <-time.After(POLL_DURATION):
+ continue
+ case <-t.Dying():
+ return tomb.ErrDying
+ }
+ }
+ panic("unreachable")
+}
+
+func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
+ origFi, err := os.Stat(fw.Filename)
+ if err != nil {
+ return nil, err
+ }
+
+ changes := NewFileChanges()
+ var prevModTime time.Time
+
+ // XXX: use tomb.Tomb to cleanly manage these goroutines. replace
+ // the fatal (below) with tomb's Kill.
+
+ fw.Size = pos
+
+ go func() {
+ prevSize := fw.Size
+ for {
+ select {
+ case <-t.Dying():
+ return
+ default:
+ }
+
+ time.Sleep(POLL_DURATION)
+ fi, err := os.Stat(fw.Filename)
+ if err != nil {
+ // Windows cannot delete a file if a handle is still open (tail keeps one open)
+ // so it gives access denied to anything trying to read it until all handles are released.
+ if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) {
+ // File does not exist (has been deleted).
+ changes.NotifyDeleted()
+ return
+ }
+
+ // XXX: report this error back to the user
+ util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
+ }
+
+ // File got moved/renamed?
+ if !os.SameFile(origFi, fi) {
+ changes.NotifyDeleted()
+ return
+ }
+
+ // File got truncated?
+ fw.Size = fi.Size()
+ if prevSize > 0 && prevSize > fw.Size {
+ changes.NotifyTruncated()
+ prevSize = fw.Size
+ continue
+ }
+ // File got bigger?
+ if prevSize > 0 && prevSize < fw.Size {
+ changes.NotifyModified()
+ prevSize = fw.Size
+ continue
+ }
+ prevSize = fw.Size
+
+ // File was appended to (changed)?
+ modTime := fi.ModTime()
+ if modTime != prevModTime {
+ prevModTime = modTime
+ changes.NotifyModified()
+ }
+ }
+ }()
+
+ return changes, nil
+}
+
+func init() {
+ POLL_DURATION = 250 * time.Millisecond
+}
diff --git a/vendor/github.com/hpcloud/tail/watch/watch.go b/vendor/github.com/hpcloud/tail/watch/watch.go
new file mode 100644
index 000000000..2e1783ef0
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/watch/watch.go
@@ -0,0 +1,20 @@
+// Copyright (c) 2015 HPE Software Inc. All rights reserved.
+// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+
+package watch
+
+import "gopkg.in/tomb.v1"
+
+// FileWatcher monitors file-level events.
+type FileWatcher interface {
+ // BlockUntilExists blocks until the file comes into existence.
+ BlockUntilExists(*tomb.Tomb) error
+
+ // ChangeEvents reports on changes to a file, be it modification,
+ // deletion, renames or truncations. Returned FileChanges group of
+ // channels will be closed, thus become unusable, after a deletion
+ // or truncation event.
+ // In order to properly report truncations, ChangeEvents requires
+ // the caller to pass their current offset in the file.
+ ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error)
+}
diff --git a/vendor/github.com/hpcloud/tail/winfile/winfile.go b/vendor/github.com/hpcloud/tail/winfile/winfile.go
new file mode 100644
index 000000000..aa7e7bc5d
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/winfile/winfile.go
@@ -0,0 +1,92 @@
+// +build windows
+
+package winfile
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+// issue also described here
+//https://codereview.appspot.com/8203043/
+
+// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218
+func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) {
+ if len(path) == 0 {
+ return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
+ }
+ pathp, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return syscall.InvalidHandle, err
+ }
+ var access uint32
+ switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) {
+ case syscall.O_RDONLY:
+ access = syscall.GENERIC_READ
+ case syscall.O_WRONLY:
+ access = syscall.GENERIC_WRITE
+ case syscall.O_RDWR:
+ access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
+ }
+ if mode&syscall.O_CREAT != 0 {
+ access |= syscall.GENERIC_WRITE
+ }
+ if mode&syscall.O_APPEND != 0 {
+ access &^= syscall.GENERIC_WRITE
+ access |= syscall.FILE_APPEND_DATA
+ }
+ sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE)
+ var sa *syscall.SecurityAttributes
+ if mode&syscall.O_CLOEXEC == 0 {
+ sa = makeInheritSa()
+ }
+ var createmode uint32
+ switch {
+ case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL):
+ createmode = syscall.CREATE_NEW
+ case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC):
+ createmode = syscall.CREATE_ALWAYS
+ case mode&syscall.O_CREAT == syscall.O_CREAT:
+ createmode = syscall.OPEN_ALWAYS
+ case mode&syscall.O_TRUNC == syscall.O_TRUNC:
+ createmode = syscall.TRUNCATE_EXISTING
+ default:
+ createmode = syscall.OPEN_EXISTING
+ }
+ h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0)
+ return h, e
+}
+
+// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211
+func makeInheritSa() *syscall.SecurityAttributes {
+ var sa syscall.SecurityAttributes
+ sa.Length = uint32(unsafe.Sizeof(sa))
+ sa.InheritHandle = 1
+ return &sa
+}
+
+// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133
+func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) {
+ r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm))
+ if e != nil {
+ return nil, e
+ }
+ return os.NewFile(uintptr(r), name), nil
+}
+
+// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61
+func syscallMode(i os.FileMode) (o uint32) {
+ o |= uint32(i.Perm())
+ if i&os.ModeSetuid != 0 {
+ o |= syscall.S_ISUID
+ }
+ if i&os.ModeSetgid != 0 {
+ o |= syscall.S_ISGID
+ }
+ if i&os.ModeSticky != 0 {
+ o |= syscall.S_ISVTX
+ }
+ // No mapping for Go's ModeTemporary (plan9 only).
+ return
+}
diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE
new file mode 100644
index 000000000..686680298
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2013 Dario Castañé. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md
new file mode 100644
index 000000000..f4769cdd7
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/README.md
@@ -0,0 +1,128 @@
+# Mergo
+
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
+
+Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region Marche.
+
+![Mergo dall'alto](http://www.comune.mergo.an.it/Siti/Mergo/Immagini/Foto/mergo_dall_alto.jpg)
+
+## Status
+
+It is ready for production use. It works fine after extensive use in the wild.
+
+[![Build Status][1]][2]
+[![GoDoc][3]][4]
+[![GoCard][5]][6]
+
+[1]: https://travis-ci.org/imdario/mergo.png
+[2]: https://travis-ci.org/imdario/mergo
+[3]: https://godoc.org/github.com/imdario/mergo?status.svg
+[4]: https://godoc.org/github.com/imdario/mergo
+[5]: https://goreportcard.com/badge/imdario/mergo
+[6]: https://goreportcard.com/report/github.com/imdario/mergo
+
+### Important note
+
+Mergo is intended to assign **only** zero value fields on destination with source value. Since April 6th it works like this. Before it didn't work properly, causing some random overwrites. After some issues and PRs I found it didn't merge as I designed it. Thanks to [imdario/mergo#8](https://github.com/imdario/mergo/pull/8) overwriting functions were added and the wrong behavior was clearly detected.
+
+If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0).
+
+### Mergo in the wild
+
+- [docker/docker](https://github.com/docker/docker/)
+- [GoogleCloudPlatform/kubernetes](https://github.com/GoogleCloudPlatform/kubernetes)
+- [imdario/zas](https://github.com/imdario/zas)
+- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
+- [EagerIO/Stout](https://github.com/EagerIO/Stout)
+- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
+- [russross/canvasassignments](https://github.com/russross/canvasassignments)
+- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
+- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
+- [divshot/gitling](https://github.com/divshot/gitling)
+- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
+- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
+- [elwinar/rambler](https://github.com/elwinar/rambler)
+- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
+- [jfbus/impressionist](https://github.com/jfbus/impressionist)
+- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
+- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
+- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
+- [thoas/picfit](https://github.com/thoas/picfit)
+- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
+- [jnuthong/item_search](https://github.com/jnuthong/item_search)
+
+## Installation
+
+ go get github.com/imdario/mergo
+
+ // use in your .go code
+ import (
+ "github.com/imdario/mergo"
+ )
+
+## Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+ if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+ }
+
+Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
+
+ if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+ }
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
+
+More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo).
+
+### Nice example
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/imdario/mergo"
+)
+
+type Foo struct {
+ A string
+ B int64
+}
+
+func main() {
+ src := Foo{
+ A: "one",
+ }
+
+ dest := Foo{
+ A: "two",
+ B: 2,
+ }
+
+ mergo.Merge(&dest, src)
+
+ fmt.Println(dest)
+ // Will print
+ // {two 2}
+}
+```
+
+Note: if test are failing due missing package, please execute:
+
+ go get gopkg.in/yaml.v1
+
+## Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
+
+## About
+
+Written by [Dario Castañé](http://dario.im).
+
+## License
+
+[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go
new file mode 100644
index 000000000..6e9aa7baf
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/doc.go
@@ -0,0 +1,44 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package mergo merges same-type structs and maps by setting default values in zero-value fields.
+
+Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+Usage
+
+From my own work-in-progress project:
+
+ type networkConfig struct {
+ Protocol string
+ Address string
+ ServerType string `json: "server_type"`
+ Port uint16
+ }
+
+ type FssnConfig struct {
+ Network networkConfig
+ }
+
+ var fssnDefault = FssnConfig {
+ networkConfig {
+ "tcp",
+ "127.0.0.1",
+ "http",
+ 31560,
+ },
+ }
+
+ // Inside a function [...]
+
+ if err := mergo.Merge(&config, fssnDefault); err != nil {
+ log.Fatal(err)
+ }
+
+ // More code [...]
+
+*/
+package mergo
diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go
new file mode 100644
index 000000000..8e8c4ba8e
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/map.go
@@ -0,0 +1,156 @@
+// Copyright 2014 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "fmt"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+func changeInitialCase(s string, mapper func(rune) rune) string {
+ if s == "" {
+ return s
+ }
+ r, n := utf8.DecodeRuneInString(s)
+ return string(mapper(r)) + s[n:]
+}
+
+func isExported(field reflect.StructField) bool {
+ r, _ := utf8.DecodeRuneInString(field.Name)
+ return r >= 'A' && r <= 'Z'
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) {
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{addr, typ, seen}
+ }
+ zeroValue := reflect.Value{}
+ switch dst.Kind() {
+ case reflect.Map:
+ dstMap := dst.Interface().(map[string]interface{})
+ for i, n := 0, src.NumField(); i < n; i++ {
+ srcType := src.Type()
+ field := srcType.Field(i)
+ if !isExported(field) {
+ continue
+ }
+ fieldName := field.Name
+ fieldName = changeInitialCase(fieldName, unicode.ToLower)
+ if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) {
+ dstMap[fieldName] = src.Field(i).Interface()
+ }
+ }
+ case reflect.Struct:
+ srcMap := src.Interface().(map[string]interface{})
+ for key := range srcMap {
+ srcValue := srcMap[key]
+ fieldName := changeInitialCase(key, unicode.ToUpper)
+ dstElement := dst.FieldByName(fieldName)
+ if dstElement == zeroValue {
+ // We discard it because the field doesn't exist.
+ continue
+ }
+ srcElement := reflect.ValueOf(srcValue)
+ dstKind := dstElement.Kind()
+ srcKind := srcElement.Kind()
+ if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
+ srcElement = srcElement.Elem()
+ srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
+ } else if dstKind == reflect.Ptr {
+ // Can this work? I guess it can't.
+ if srcKind != reflect.Ptr && srcElement.CanAddr() {
+ srcPtr := srcElement.Addr()
+ srcElement = reflect.ValueOf(srcPtr)
+ srcKind = reflect.Ptr
+ }
+ }
+ if !srcElement.IsValid() {
+ continue
+ }
+ if srcKind == dstKind {
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil {
+ return
+ }
+ } else {
+ if srcKind == reflect.Map {
+ if err = deepMap(dstElement, srcElement, visited, depth+1, overwrite); err != nil {
+ return
+ }
+ } else {
+ return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
+ }
+ }
+ }
+ }
+ return
+}
+
+// Map sets fields' values in dst from src.
+// src can be a map with string keys or a struct. dst must be the opposite:
+// if src is a map, dst must be a valid pointer to struct. If src is a struct,
+// dst must be map[string]interface{}.
+// It won't merge unexported (private) fields and will do recursively
+// any exported field.
+// If dst is a map, keys will be src fields' names in lower camel case.
+// Missing key in src that doesn't match a field in dst will be skipped. This
+// doesn't apply if dst is a map.
+// This is separated method from Merge because it is cleaner and it keeps sane
+// semantics: merging equal types, mapping different (restricted) types.
+func Map(dst, src interface{}) error {
+ return _map(dst, src, false)
+}
+
+// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overriden by
+// non-empty src attribute values.
+func MapWithOverwrite(dst, src interface{}) error {
+ return _map(dst, src, true)
+}
+
+func _map(dst, src interface{}, overwrite bool) error {
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ // To be friction-less, we redirect equal-type arguments
+ // to deepMerge. Only because arguments can be anything.
+ if vSrc.Kind() == vDst.Kind() {
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite)
+ }
+ switch vSrc.Kind() {
+ case reflect.Struct:
+ if vDst.Kind() != reflect.Map {
+ return ErrExpectedMapAsDestination
+ }
+ case reflect.Map:
+ if vDst.Kind() != reflect.Struct {
+ return ErrExpectedStructAsDestination
+ }
+ default:
+ return ErrNotSupported
+ }
+ return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite)
+}
diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go
new file mode 100644
index 000000000..11e55b1e2
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/merge.go
@@ -0,0 +1,123 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "reflect"
+)
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) {
+ if !src.IsValid() {
+ return
+ }
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{addr, typ, seen}
+ }
+ switch dst.Kind() {
+ case reflect.Struct:
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, overwrite); err != nil {
+ return
+ }
+ }
+ case reflect.Map:
+ for _, key := range src.MapKeys() {
+ srcElement := src.MapIndex(key)
+ if !srcElement.IsValid() {
+ continue
+ }
+ dstElement := dst.MapIndex(key)
+ switch srcElement.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice:
+ if srcElement.IsNil() {
+ continue
+ }
+ fallthrough
+ default:
+ if !srcElement.CanInterface() {
+ continue
+ }
+ switch reflect.TypeOf(srcElement.Interface()).Kind() {
+ case reflect.Struct:
+ fallthrough
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Map:
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil {
+ return
+ }
+ }
+ }
+ if !isEmptyValue(srcElement) && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) {
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ dst.SetMapIndex(key, srcElement)
+ }
+ }
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Interface:
+ if src.IsNil() {
+ break
+ } else if dst.IsNil() {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
+ dst.Set(src)
+ }
+ } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, overwrite); err != nil {
+ return
+ }
+ default:
+ if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) {
+ dst.Set(src)
+ }
+ }
+ return
+}
+
+// Merge will fill any empty for value type attributes on the dst struct using corresponding
+// src attributes if they themselves are not empty. dst and src must be valid same-type structs
+// and dst must be a pointer to struct.
+// It won't merge unexported (private) fields and will do recursively any exported field.
+func Merge(dst, src interface{}) error {
+ return merge(dst, src, false)
+}
+
+// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by
+// non-empty src attribute values.
+func MergeWithOverwrite(dst, src interface{}) error {
+ return merge(dst, src, true)
+}
+
+func merge(dst, src interface{}, overwrite bool) error {
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ if vDst.Type() != vSrc.Type() {
+ return ErrDifferentArgumentsTypes
+ }
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite)
+}
diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go
new file mode 100644
index 000000000..f8a0991ec
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/mergo.go
@@ -0,0 +1,90 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "errors"
+ "reflect"
+)
+
+// Errors reported by Mergo when it finds invalid arguments.
+var (
+ ErrNilArguments = errors.New("src and dst must not be nil")
+ ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
+ ErrNotSupported = errors.New("only structs and maps are supported")
+ ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
+ ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
+)
+
+// During deepMerge, must keep track of checks that are
+// in progress. The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited are stored in a map indexed by 17 * a1 + a2;
+type visit struct {
+ ptr uintptr
+ typ reflect.Type
+ next *visit
+}
+
+// From src/pkg/encoding/json.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
+ if dst == nil || src == nil {
+ err = ErrNilArguments
+ return
+ }
+ vDst = reflect.ValueOf(dst).Elem()
+ if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
+ err = ErrNotSupported
+ return
+ }
+ vSrc = reflect.ValueOf(src)
+ // We check if vSrc is a pointer to dereference it.
+ if vSrc.Kind() == reflect.Ptr {
+ vSrc = vSrc.Elem()
+ }
+ return
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{addr, typ, seen}
+ }
+ return // TODO refactor
+}
diff --git a/vendor/github.com/juju/ratelimit/LICENSE b/vendor/github.com/juju/ratelimit/LICENSE
new file mode 100644
index 000000000..ade9307b3
--- /dev/null
+++ b/vendor/github.com/juju/ratelimit/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/vendor/github.com/juju/ratelimit/README.md b/vendor/github.com/juju/ratelimit/README.md
new file mode 100644
index 000000000..a0fdfe2b1
--- /dev/null
+++ b/vendor/github.com/juju/ratelimit/README.md
@@ -0,0 +1,117 @@
+# ratelimit
+--
+ import "github.com/juju/ratelimit"
+
+The ratelimit package provides an efficient token bucket implementation. See
+http://en.wikipedia.org/wiki/Token_bucket.
+
+## Usage
+
+#### func Reader
+
+```go
+func Reader(r io.Reader, bucket *Bucket) io.Reader
+```
+Reader returns a reader that is rate limited by the given token bucket. Each
+token in the bucket represents one byte.
+
+#### func Writer
+
+```go
+func Writer(w io.Writer, bucket *Bucket) io.Writer
+```
+Writer returns a writer that is rate limited by the given token bucket. Each
+token in the bucket represents one byte.
+
+#### type Bucket
+
+```go
+type Bucket struct {
+}
+```
+
+Bucket represents a token bucket that fills at a predetermined rate. Methods on
+Bucket may be called concurrently.
+
+#### func NewBucket
+
+```go
+func NewBucket(fillInterval time.Duration, capacity int64) *Bucket
+```
+NewBucket returns a new token bucket that fills at the rate of one token every
+fillInterval, up to the given maximum capacity. Both arguments must be positive.
+The bucket is initially full.
+
+#### func NewBucketWithQuantum
+
+```go
+func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket
+```
+NewBucketWithQuantum is similar to NewBucket, but allows the specification of
+the quantum size - quantum tokens are added every fillInterval.
+
+#### func NewBucketWithRate
+
+```go
+func NewBucketWithRate(rate float64, capacity int64) *Bucket
+```
+NewBucketWithRate returns a token bucket that fills the bucket at the rate of
+rate tokens per second up to the given maximum capacity. Because of limited
+clock resolution, at high rates, the actual rate may be up to 1% different from
+the specified rate.
+
+#### func (*Bucket) Rate
+
+```go
+func (tb *Bucket) Rate() float64
+```
+Rate returns the fill rate of the bucket, in tokens per second.
+
+#### func (*Bucket) Take
+
+```go
+func (tb *Bucket) Take(count int64) time.Duration
+```
+Take takes count tokens from the bucket without blocking. It returns the time
+that the caller should wait until the tokens are actually available.
+
+Note that if the request is irrevocable - there is no way to return tokens to
+the bucket once this method commits us to taking them.
+
+#### func (*Bucket) TakeAvailable
+
+```go
+func (tb *Bucket) TakeAvailable(count int64) int64
+```
+TakeAvailable takes up to count immediately available tokens from the bucket. It
+returns the number of tokens removed, or zero if there are no available tokens.
+It does not block.
+
+#### func (*Bucket) TakeMaxDuration
+
+```go
+func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool)
+```
+TakeMaxDuration is like Take, except that it will only take tokens from the
+bucket if the wait time for the tokens is no greater than maxWait.
+
+If it would take longer than maxWait for the tokens to become available, it does
+nothing and reports false, otherwise it returns the time that the caller should
+wait until the tokens are actually available, and reports true.
+
+#### func (*Bucket) Wait
+
+```go
+func (tb *Bucket) Wait(count int64)
+```
+Wait takes count tokens from the bucket, waiting until they are available.
+
+#### func (*Bucket) WaitMaxDuration
+
+```go
+func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool
+```
+WaitMaxDuration is like Wait except that it will only take tokens from the
+bucket if it needs to wait for no greater than maxWait. It reports whether any
+tokens have been removed from the bucket If no tokens have been removed, it
+returns immediately.
diff --git a/vendor/github.com/juju/ratelimit/ratelimit.go b/vendor/github.com/juju/ratelimit/ratelimit.go
new file mode 100644
index 000000000..1c3f25b2e
--- /dev/null
+++ b/vendor/github.com/juju/ratelimit/ratelimit.go
@@ -0,0 +1,284 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3 with static-linking exception.
+// See LICENCE file for details.
+
+// Package ratelimit provides an efficient token bucket implementation
+// that can be used to limit the rate of arbitrary things.
+// See http://en.wikipedia.org/wiki/Token_bucket.
+package ratelimit
+
+import (
+ "math"
+ "strconv"
+ "sync"
+ "time"
+)
+
+// Bucket represents a token bucket that fills at a predetermined rate.
+// Methods on Bucket may be called concurrently.
+type Bucket struct {
+ startTime time.Time
+ capacity int64
+ quantum int64
+ fillInterval time.Duration
+ clock Clock
+
+ // The mutex guards the fields following it.
+ mu sync.Mutex
+
+ // avail holds the number of available tokens
+ // in the bucket, as of availTick ticks from startTime.
+ // It will be negative when there are consumers
+ // waiting for tokens.
+ avail int64
+ availTick int64
+}
+
+// Clock is used to inject testable fakes.
+type Clock interface {
+ Now() time.Time
+ Sleep(d time.Duration)
+}
+
+// realClock implements Clock in terms of standard time functions.
+type realClock struct{}
+
+// Now is identical to time.Now.
+func (realClock) Now() time.Time {
+ return time.Now()
+}
+
+// Sleep is identical to time.Sleep.
+func (realClock) Sleep(d time.Duration) {
+ time.Sleep(d)
+}
+
+// NewBucket returns a new token bucket that fills at the
+// rate of one token every fillInterval, up to the given
+// maximum capacity. Both arguments must be
+// positive. The bucket is initially full.
+func NewBucket(fillInterval time.Duration, capacity int64) *Bucket {
+ return NewBucketWithClock(fillInterval, capacity, realClock{})
+}
+
+// NewBucketWithClock is identical to NewBucket but injects a testable clock
+// interface.
+func NewBucketWithClock(fillInterval time.Duration, capacity int64, clock Clock) *Bucket {
+ return NewBucketWithQuantumAndClock(fillInterval, capacity, 1, clock)
+}
+
+// rateMargin specifes the allowed variance of actual
+// rate from specified rate. 1% seems reasonable.
+const rateMargin = 0.01
+
+// NewBucketWithRate returns a token bucket that fills the bucket
+// at the rate of rate tokens per second up to the given
+// maximum capacity. Because of limited clock resolution,
+// at high rates, the actual rate may be up to 1% different from the
+// specified rate.
+func NewBucketWithRate(rate float64, capacity int64) *Bucket {
+ return NewBucketWithRateAndClock(rate, capacity, realClock{})
+}
+
+// NewBucketWithRateAndClock is identical to NewBucketWithRate but injects a
+// testable clock interface.
+func NewBucketWithRateAndClock(rate float64, capacity int64, clock Clock) *Bucket {
+ for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) {
+ fillInterval := time.Duration(1e9 * float64(quantum) / rate)
+ if fillInterval <= 0 {
+ continue
+ }
+ tb := NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, clock)
+ if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin {
+ return tb
+ }
+ }
+ panic("cannot find suitable quantum for " + strconv.FormatFloat(rate, 'g', -1, 64))
+}
+
+// nextQuantum returns the next quantum to try after q.
+// We grow the quantum exponentially, but slowly, so we
+// get a good fit in the lower numbers.
+func nextQuantum(q int64) int64 {
+ q1 := q * 11 / 10
+ if q1 == q {
+ q1++
+ }
+ return q1
+}
+
+// NewBucketWithQuantum is similar to NewBucket, but allows
+// the specification of the quantum size - quantum tokens
+// are added every fillInterval.
+func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket {
+ return NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, realClock{})
+}
+
+// NewBucketWithQuantumAndClock is identical to NewBucketWithQuantum but injects
+// a testable clock interface.
+func NewBucketWithQuantumAndClock(fillInterval time.Duration, capacity, quantum int64, clock Clock) *Bucket {
+ if fillInterval <= 0 {
+ panic("token bucket fill interval is not > 0")
+ }
+ if capacity <= 0 {
+ panic("token bucket capacity is not > 0")
+ }
+ if quantum <= 0 {
+ panic("token bucket quantum is not > 0")
+ }
+ return &Bucket{
+ clock: clock,
+ startTime: clock.Now(),
+ capacity: capacity,
+ quantum: quantum,
+ avail: capacity,
+ fillInterval: fillInterval,
+ }
+}
+
+// Wait takes count tokens from the bucket, waiting until they are
+// available.
+func (tb *Bucket) Wait(count int64) {
+ if d := tb.Take(count); d > 0 {
+ tb.clock.Sleep(d)
+ }
+}
+
+// WaitMaxDuration is like Wait except that it will
+// only take tokens from the bucket if it needs to wait
+// for no greater than maxWait. It reports whether
+// any tokens have been removed from the bucket
+// If no tokens have been removed, it returns immediately.
+func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool {
+ d, ok := tb.TakeMaxDuration(count, maxWait)
+ if d > 0 {
+ tb.clock.Sleep(d)
+ }
+ return ok
+}
+
+const infinityDuration time.Duration = 0x7fffffffffffffff
+
+// Take takes count tokens from the bucket without blocking. It returns
+// the time that the caller should wait until the tokens are actually
+// available.
+//
+// Note that if the request is irrevocable - there is no way to return
+// tokens to the bucket once this method commits us to taking them.
+func (tb *Bucket) Take(count int64) time.Duration {
+ d, _ := tb.take(tb.clock.Now(), count, infinityDuration)
+ return d
+}
+
+// TakeMaxDuration is like Take, except that
+// it will only take tokens from the bucket if the wait
+// time for the tokens is no greater than maxWait.
+//
+// If it would take longer than maxWait for the tokens
+// to become available, it does nothing and reports false,
+// otherwise it returns the time that the caller should
+// wait until the tokens are actually available, and reports
+// true.
+func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) {
+ return tb.take(tb.clock.Now(), count, maxWait)
+}
+
+// TakeAvailable takes up to count immediately available tokens from the
+// bucket. It returns the number of tokens removed, or zero if there are
+// no available tokens. It does not block.
+func (tb *Bucket) TakeAvailable(count int64) int64 {
+ return tb.takeAvailable(tb.clock.Now(), count)
+}
+
+// takeAvailable is the internal version of TakeAvailable - it takes the
+// current time as an argument to enable easy testing.
+func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 {
+ if count <= 0 {
+ return 0
+ }
+ tb.mu.Lock()
+ defer tb.mu.Unlock()
+
+ tb.adjust(now)
+ if tb.avail <= 0 {
+ return 0
+ }
+ if count > tb.avail {
+ count = tb.avail
+ }
+ tb.avail -= count
+ return count
+}
+
+// Available returns the number of available tokens. It will be negative
+// when there are consumers waiting for tokens. Note that if this
+// returns greater than zero, it does not guarantee that calls that take
+// tokens from the buffer will succeed, as the number of available
+// tokens could have changed in the meantime. This method is intended
+// primarily for metrics reporting and debugging.
+func (tb *Bucket) Available() int64 {
+ return tb.available(tb.clock.Now())
+}
+
+// available is the internal version of available - it takes the current time as
+// an argument to enable easy testing.
+func (tb *Bucket) available(now time.Time) int64 {
+ tb.mu.Lock()
+ defer tb.mu.Unlock()
+ tb.adjust(now)
+ return tb.avail
+}
+
+// Capacity returns the capacity that the bucket was created with.
+func (tb *Bucket) Capacity() int64 {
+ return tb.capacity
+}
+
+// Rate returns the fill rate of the bucket, in tokens per second.
+func (tb *Bucket) Rate() float64 {
+ return 1e9 * float64(tb.quantum) / float64(tb.fillInterval)
+}
+
+// take is the internal version of Take - it takes the current time as
+// an argument to enable easy testing.
+func (tb *Bucket) take(now time.Time, count int64, maxWait time.Duration) (time.Duration, bool) {
+ if count <= 0 {
+ return 0, true
+ }
+ tb.mu.Lock()
+ defer tb.mu.Unlock()
+
+ currentTick := tb.adjust(now)
+ avail := tb.avail - count
+ if avail >= 0 {
+ tb.avail = avail
+ return 0, true
+ }
+ // Round up the missing tokens to the nearest multiple
+ // of quantum - the tokens won't be available until
+ // that tick.
+ endTick := currentTick + (-avail+tb.quantum-1)/tb.quantum
+ endTime := tb.startTime.Add(time.Duration(endTick) * tb.fillInterval)
+ waitTime := endTime.Sub(now)
+ if waitTime > maxWait {
+ return 0, false
+ }
+ tb.avail = avail
+ return waitTime, true
+}
+
+// adjust adjusts the current bucket capacity based on the current time.
+// It returns the current tick.
+func (tb *Bucket) adjust(now time.Time) (currentTick int64) {
+ currentTick = int64(now.Sub(tb.startTime) / tb.fillInterval)
+
+ if tb.avail >= tb.capacity {
+ return
+ }
+ tb.avail += (currentTick - tb.availTick) * tb.quantum
+ if tb.avail > tb.capacity {
+ tb.avail = tb.capacity
+ }
+ tb.availTick = currentTick
+ return
+}
diff --git a/vendor/github.com/juju/ratelimit/reader.go b/vendor/github.com/juju/ratelimit/reader.go
new file mode 100644
index 000000000..6403bf78d
--- /dev/null
+++ b/vendor/github.com/juju/ratelimit/reader.go
@@ -0,0 +1,51 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3 with static-linking exception.
+// See LICENCE file for details.
+
+package ratelimit
+
+import "io"
+
+type reader struct {
+ r io.Reader
+ bucket *Bucket
+}
+
+// Reader returns a reader that is rate limited by
+// the given token bucket. Each token in the bucket
+// represents one byte.
+func Reader(r io.Reader, bucket *Bucket) io.Reader {
+ return &reader{
+ r: r,
+ bucket: bucket,
+ }
+}
+
+func (r *reader) Read(buf []byte) (int, error) {
+ n, err := r.r.Read(buf)
+ if n <= 0 {
+ return n, err
+ }
+ r.bucket.Wait(int64(n))
+ return n, err
+}
+
+type writer struct {
+ w io.Writer
+ bucket *Bucket
+}
+
+// Writer returns a reader that is rate limited by
+// the given token bucket. Each token in the bucket
+// represents one byte.
+func Writer(w io.Writer, bucket *Bucket) io.Writer {
+ return &writer{
+ w: w,
+ bucket: bucket,
+ }
+}
+
+func (w *writer) Write(buf []byte) (int, error) {
+ w.bucket.Wait(int64(len(buf)))
+ return w.w.Write(buf)
+}
diff --git a/vendor/github.com/kr/pty/License b/vendor/github.com/kr/pty/License
new file mode 100644
index 000000000..6b7558b6b
--- /dev/null
+++ b/vendor/github.com/kr/pty/License
@@ -0,0 +1,23 @@
+Copyright (c) 2011 Keith Rarick
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute,
+sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall
+be included in all copies or substantial portions of the
+Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
+OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/kr/pty/README.md b/vendor/github.com/kr/pty/README.md
new file mode 100644
index 000000000..7b7900c3a
--- /dev/null
+++ b/vendor/github.com/kr/pty/README.md
@@ -0,0 +1,36 @@
+# pty
+
+Pty is a Go package for using unix pseudo-terminals.
+
+## Install
+
+ go get github.com/kr/pty
+
+## Example
+
+```go
+package main
+
+import (
+ "github.com/kr/pty"
+ "io"
+ "os"
+ "os/exec"
+)
+
+func main() {
+ c := exec.Command("grep", "--color=auto", "bar")
+ f, err := pty.Start(c)
+ if err != nil {
+ panic(err)
+ }
+
+ go func() {
+ f.Write([]byte("foo\n"))
+ f.Write([]byte("bar\n"))
+ f.Write([]byte("baz\n"))
+ f.Write([]byte{4}) // EOT
+ }()
+ io.Copy(os.Stdout, f)
+}
+```
diff --git a/vendor/github.com/kr/pty/doc.go b/vendor/github.com/kr/pty/doc.go
new file mode 100644
index 000000000..190cfbea9
--- /dev/null
+++ b/vendor/github.com/kr/pty/doc.go
@@ -0,0 +1,16 @@
+// Package pty provides functions for working with Unix terminals.
+package pty
+
+import (
+ "errors"
+ "os"
+)
+
+// ErrUnsupported is returned if a function is not
+// available on the current platform.
+var ErrUnsupported = errors.New("unsupported")
+
+// Opens a pty and its corresponding tty.
+func Open() (pty, tty *os.File, err error) {
+ return open()
+}
diff --git a/vendor/github.com/kr/pty/ioctl.go b/vendor/github.com/kr/pty/ioctl.go
new file mode 100644
index 000000000..c57c19e7e
--- /dev/null
+++ b/vendor/github.com/kr/pty/ioctl.go
@@ -0,0 +1,13 @@
+// +build !windows
+
+package pty
+
+import "syscall"
+
+func ioctl(fd, cmd, ptr uintptr) error {
+ _, _, e := syscall.Syscall(syscall.SYS_IOCTL, fd, cmd, ptr)
+ if e != 0 {
+ return e
+ }
+ return nil
+}
diff --git a/vendor/github.com/kr/pty/ioctl_bsd.go b/vendor/github.com/kr/pty/ioctl_bsd.go
new file mode 100644
index 000000000..73b12c53c
--- /dev/null
+++ b/vendor/github.com/kr/pty/ioctl_bsd.go
@@ -0,0 +1,39 @@
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package pty
+
+// from <sys/ioccom.h>
+const (
+ _IOC_VOID uintptr = 0x20000000
+ _IOC_OUT uintptr = 0x40000000
+ _IOC_IN uintptr = 0x80000000
+ _IOC_IN_OUT uintptr = _IOC_OUT | _IOC_IN
+ _IOC_DIRMASK = _IOC_VOID | _IOC_OUT | _IOC_IN
+
+ _IOC_PARAM_SHIFT = 13
+ _IOC_PARAM_MASK = (1 << _IOC_PARAM_SHIFT) - 1
+)
+
+func _IOC_PARM_LEN(ioctl uintptr) uintptr {
+ return (ioctl >> 16) & _IOC_PARAM_MASK
+}
+
+func _IOC(inout uintptr, group byte, ioctl_num uintptr, param_len uintptr) uintptr {
+ return inout | (param_len&_IOC_PARAM_MASK)<<16 | uintptr(group)<<8 | ioctl_num
+}
+
+func _IO(group byte, ioctl_num uintptr) uintptr {
+ return _IOC(_IOC_VOID, group, ioctl_num, 0)
+}
+
+func _IOR(group byte, ioctl_num uintptr, param_len uintptr) uintptr {
+ return _IOC(_IOC_OUT, group, ioctl_num, param_len)
+}
+
+func _IOW(group byte, ioctl_num uintptr, param_len uintptr) uintptr {
+ return _IOC(_IOC_IN, group, ioctl_num, param_len)
+}
+
+func _IOWR(group byte, ioctl_num uintptr, param_len uintptr) uintptr {
+ return _IOC(_IOC_IN_OUT, group, ioctl_num, param_len)
+}
diff --git a/vendor/github.com/kr/pty/pty_darwin.go b/vendor/github.com/kr/pty/pty_darwin.go
new file mode 100644
index 000000000..4f4d5ca26
--- /dev/null
+++ b/vendor/github.com/kr/pty/pty_darwin.go
@@ -0,0 +1,60 @@
+package pty
+
+import (
+ "errors"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+func open() (pty, tty *os.File, err error) {
+ p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sname, err := ptsname(p)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ err = grantpt(p)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ err = unlockpt(p)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ t, err := os.OpenFile(sname, os.O_RDWR, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+ return p, t, nil
+}
+
+func ptsname(f *os.File) (string, error) {
+ n := make([]byte, _IOC_PARM_LEN(syscall.TIOCPTYGNAME))
+
+ err := ioctl(f.Fd(), syscall.TIOCPTYGNAME, uintptr(unsafe.Pointer(&n[0])))
+ if err != nil {
+ return "", err
+ }
+
+ for i, c := range n {
+ if c == 0 {
+ return string(n[:i]), nil
+ }
+ }
+ return "", errors.New("TIOCPTYGNAME string not NUL-terminated")
+}
+
+func grantpt(f *os.File) error {
+ return ioctl(f.Fd(), syscall.TIOCPTYGRANT, 0)
+}
+
+func unlockpt(f *os.File) error {
+ return ioctl(f.Fd(), syscall.TIOCPTYUNLK, 0)
+}
diff --git a/vendor/github.com/kr/pty/pty_dragonfly.go b/vendor/github.com/kr/pty/pty_dragonfly.go
new file mode 100644
index 000000000..5431fb5ae
--- /dev/null
+++ b/vendor/github.com/kr/pty/pty_dragonfly.go
@@ -0,0 +1,76 @@
+package pty
+
+import (
+ "errors"
+ "os"
+ "strings"
+ "syscall"
+ "unsafe"
+)
+
+// same code as pty_darwin.go
+func open() (pty, tty *os.File, err error) {
+ p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sname, err := ptsname(p)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ err = grantpt(p)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ err = unlockpt(p)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ t, err := os.OpenFile(sname, os.O_RDWR, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+ return p, t, nil
+}
+
+func grantpt(f *os.File) error {
+ _, err := isptmaster(f.Fd())
+ return err
+}
+
+func unlockpt(f *os.File) error {
+ _, err := isptmaster(f.Fd())
+ return err
+}
+
+func isptmaster(fd uintptr) (bool, error) {
+ err := ioctl(fd, syscall.TIOCISPTMASTER, 0)
+ return err == nil, err
+}
+
+var (
+ emptyFiodgnameArg fiodgnameArg
+ ioctl_FIODNAME = _IOW('f', 120, unsafe.Sizeof(emptyFiodgnameArg))
+)
+
+func ptsname(f *os.File) (string, error) {
+ name := make([]byte, _C_SPECNAMELEN)
+ fa := fiodgnameArg{Name: (*byte)(unsafe.Pointer(&name[0])), Len: _C_SPECNAMELEN, Pad_cgo_0: [4]byte{0, 0, 0, 0}}
+
+ err := ioctl(f.Fd(), ioctl_FIODNAME, uintptr(unsafe.Pointer(&fa)))
+ if err != nil {
+ return "", err
+ }
+
+ for i, c := range name {
+ if c == 0 {
+ s := "/dev/" + string(name[:i])
+ return strings.Replace(s, "ptm", "pts", -1), nil
+ }
+ }
+ return "", errors.New("TIOCPTYGNAME string not NUL-terminated")
+}
diff --git a/vendor/github.com/kr/pty/pty_freebsd.go b/vendor/github.com/kr/pty/pty_freebsd.go
new file mode 100644
index 000000000..b341babd0
--- /dev/null
+++ b/vendor/github.com/kr/pty/pty_freebsd.go
@@ -0,0 +1,73 @@
+package pty
+
+import (
+ "errors"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+func posix_openpt(oflag int) (fd int, err error) {
+ r0, _, e1 := syscall.Syscall(syscall.SYS_POSIX_OPENPT, uintptr(oflag), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func open() (pty, tty *os.File, err error) {
+ fd, err := posix_openpt(syscall.O_RDWR | syscall.O_CLOEXEC)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ p := os.NewFile(uintptr(fd), "/dev/pts")
+ sname, err := ptsname(p)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ t, err := os.OpenFile("/dev/"+sname, os.O_RDWR, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+ return p, t, nil
+}
+
+func isptmaster(fd uintptr) (bool, error) {
+ err := ioctl(fd, syscall.TIOCPTMASTER, 0)
+ return err == nil, err
+}
+
+var (
+ emptyFiodgnameArg fiodgnameArg
+ ioctl_FIODGNAME = _IOW('f', 120, unsafe.Sizeof(emptyFiodgnameArg))
+)
+
+func ptsname(f *os.File) (string, error) {
+ master, err := isptmaster(f.Fd())
+ if err != nil {
+ return "", err
+ }
+ if !master {
+ return "", syscall.EINVAL
+ }
+
+ const n = _C_SPECNAMELEN + 1
+ var (
+ buf = make([]byte, n)
+ arg = fiodgnameArg{Len: n, Buf: (*byte)(unsafe.Pointer(&buf[0]))}
+ )
+ err = ioctl(f.Fd(), ioctl_FIODGNAME, uintptr(unsafe.Pointer(&arg)))
+ if err != nil {
+ return "", err
+ }
+
+ for i, c := range buf {
+ if c == 0 {
+ return string(buf[:i]), nil
+ }
+ }
+ return "", errors.New("FIODGNAME string not NUL-terminated")
+}
diff --git a/vendor/github.com/kr/pty/pty_linux.go b/vendor/github.com/kr/pty/pty_linux.go
new file mode 100644
index 000000000..cb901a21e
--- /dev/null
+++ b/vendor/github.com/kr/pty/pty_linux.go
@@ -0,0 +1,46 @@
+package pty
+
+import (
+ "os"
+ "strconv"
+ "syscall"
+ "unsafe"
+)
+
+func open() (pty, tty *os.File, err error) {
+ p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sname, err := ptsname(p)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ err = unlockpt(p)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ t, err := os.OpenFile(sname, os.O_RDWR|syscall.O_NOCTTY, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+ return p, t, nil
+}
+
+func ptsname(f *os.File) (string, error) {
+ var n _C_uint
+ err := ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n)))
+ if err != nil {
+ return "", err
+ }
+ return "/dev/pts/" + strconv.Itoa(int(n)), nil
+}
+
+func unlockpt(f *os.File) error {
+ var u _C_int
+ // use TIOCSPTLCK with a zero valued arg to clear the slave pty lock
+ return ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u)))
+}
diff --git a/vendor/github.com/kr/pty/pty_unsupported.go b/vendor/github.com/kr/pty/pty_unsupported.go
new file mode 100644
index 000000000..bd3d1e7e0
--- /dev/null
+++ b/vendor/github.com/kr/pty/pty_unsupported.go
@@ -0,0 +1,11 @@
+// +build !linux,!darwin,!freebsd,!dragonfly
+
+package pty
+
+import (
+ "os"
+)
+
+func open() (pty, tty *os.File, err error) {
+ return nil, nil, ErrUnsupported
+}
diff --git a/vendor/github.com/kr/pty/run.go b/vendor/github.com/kr/pty/run.go
new file mode 100644
index 000000000..baecca8af
--- /dev/null
+++ b/vendor/github.com/kr/pty/run.go
@@ -0,0 +1,34 @@
+// +build !windows
+
+package pty
+
+import (
+ "os"
+ "os/exec"
+ "syscall"
+)
+
+// Start assigns a pseudo-terminal tty os.File to c.Stdin, c.Stdout,
+// and c.Stderr, calls c.Start, and returns the File of the tty's
+// corresponding pty.
+func Start(c *exec.Cmd) (pty *os.File, err error) {
+ pty, tty, err := Open()
+ if err != nil {
+ return nil, err
+ }
+ defer tty.Close()
+ c.Stdout = tty
+ c.Stdin = tty
+ c.Stderr = tty
+ if c.SysProcAttr == nil {
+ c.SysProcAttr = &syscall.SysProcAttr{}
+ }
+ c.SysProcAttr.Setctty = true
+ c.SysProcAttr.Setsid = true
+ err = c.Start()
+ if err != nil {
+ pty.Close()
+ return nil, err
+ }
+ return pty, err
+}
diff --git a/vendor/github.com/kr/pty/util.go b/vendor/github.com/kr/pty/util.go
new file mode 100644
index 000000000..a4fab9a7c
--- /dev/null
+++ b/vendor/github.com/kr/pty/util.go
@@ -0,0 +1,37 @@
+// +build !windows
+
+package pty
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+// Getsize returns the number of rows (lines) and cols (positions
+// in each line) in terminal t.
+func Getsize(t *os.File) (rows, cols int, err error) {
+ var ws winsize
+ err = windowrect(&ws, t.Fd())
+ return int(ws.ws_row), int(ws.ws_col), err
+}
+
+type winsize struct {
+ ws_row uint16
+ ws_col uint16
+ ws_xpixel uint16
+ ws_ypixel uint16
+}
+
+func windowrect(ws *winsize, fd uintptr) error {
+ _, _, errno := syscall.Syscall(
+ syscall.SYS_IOCTL,
+ fd,
+ syscall.TIOCGWINSZ,
+ uintptr(unsafe.Pointer(ws)),
+ )
+ if errno != 0 {
+ return syscall.Errno(errno)
+ }
+ return nil
+}
diff --git a/vendor/github.com/kr/pty/ztypes_386.go b/vendor/github.com/kr/pty/ztypes_386.go
new file mode 100644
index 000000000..ff0b8fd83
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_386.go
@@ -0,0 +1,9 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types.go
+
+package pty
+
+type (
+ _C_int int32
+ _C_uint uint32
+)
diff --git a/vendor/github.com/kr/pty/ztypes_amd64.go b/vendor/github.com/kr/pty/ztypes_amd64.go
new file mode 100644
index 000000000..ff0b8fd83
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_amd64.go
@@ -0,0 +1,9 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types.go
+
+package pty
+
+type (
+ _C_int int32
+ _C_uint uint32
+)
diff --git a/vendor/github.com/kr/pty/ztypes_arm.go b/vendor/github.com/kr/pty/ztypes_arm.go
new file mode 100644
index 000000000..ff0b8fd83
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_arm.go
@@ -0,0 +1,9 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types.go
+
+package pty
+
+type (
+ _C_int int32
+ _C_uint uint32
+)
diff --git a/vendor/github.com/kr/pty/ztypes_arm64.go b/vendor/github.com/kr/pty/ztypes_arm64.go
new file mode 100644
index 000000000..6c29a4b91
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_arm64.go
@@ -0,0 +1,11 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types.go
+
+// +build arm64
+
+package pty
+
+type (
+ _C_int int32
+ _C_uint uint32
+)
diff --git a/vendor/github.com/kr/pty/ztypes_dragonfly_amd64.go b/vendor/github.com/kr/pty/ztypes_dragonfly_amd64.go
new file mode 100644
index 000000000..6b0ba037f
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_dragonfly_amd64.go
@@ -0,0 +1,14 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_dragonfly.go
+
+package pty
+
+const (
+ _C_SPECNAMELEN = 0x3f
+)
+
+type fiodgnameArg struct {
+ Name *byte
+ Len uint32
+ Pad_cgo_0 [4]byte
+}
diff --git a/vendor/github.com/kr/pty/ztypes_freebsd_386.go b/vendor/github.com/kr/pty/ztypes_freebsd_386.go
new file mode 100644
index 000000000..d9975374e
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_freebsd_386.go
@@ -0,0 +1,13 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_freebsd.go
+
+package pty
+
+const (
+ _C_SPECNAMELEN = 0x3f
+)
+
+type fiodgnameArg struct {
+ Len int32
+ Buf *byte
+}
diff --git a/vendor/github.com/kr/pty/ztypes_freebsd_amd64.go b/vendor/github.com/kr/pty/ztypes_freebsd_amd64.go
new file mode 100644
index 000000000..5fa102fcd
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_freebsd_amd64.go
@@ -0,0 +1,14 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_freebsd.go
+
+package pty
+
+const (
+ _C_SPECNAMELEN = 0x3f
+)
+
+type fiodgnameArg struct {
+ Len int32
+ Pad_cgo_0 [4]byte
+ Buf *byte
+}
diff --git a/vendor/github.com/kr/pty/ztypes_freebsd_arm.go b/vendor/github.com/kr/pty/ztypes_freebsd_arm.go
new file mode 100644
index 000000000..d9975374e
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_freebsd_arm.go
@@ -0,0 +1,13 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_freebsd.go
+
+package pty
+
+const (
+ _C_SPECNAMELEN = 0x3f
+)
+
+type fiodgnameArg struct {
+ Len int32
+ Buf *byte
+}
diff --git a/vendor/github.com/kr/pty/ztypes_mipsx.go b/vendor/github.com/kr/pty/ztypes_mipsx.go
new file mode 100644
index 000000000..f0ce74086
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_mipsx.go
@@ -0,0 +1,12 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types.go
+
+// +build linux
+// +build mips mipsle mips64 mips64le
+
+package pty
+
+type (
+ _C_int int32
+ _C_uint uint32
+)
diff --git a/vendor/github.com/kr/pty/ztypes_ppc64.go b/vendor/github.com/kr/pty/ztypes_ppc64.go
new file mode 100644
index 000000000..4e1af8431
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_ppc64.go
@@ -0,0 +1,11 @@
+// +build ppc64
+
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types.go
+
+package pty
+
+type (
+ _C_int int32
+ _C_uint uint32
+)
diff --git a/vendor/github.com/kr/pty/ztypes_ppc64le.go b/vendor/github.com/kr/pty/ztypes_ppc64le.go
new file mode 100644
index 000000000..e6780f4e2
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_ppc64le.go
@@ -0,0 +1,11 @@
+// +build ppc64le
+
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types.go
+
+package pty
+
+type (
+ _C_int int32
+ _C_uint uint32
+)
diff --git a/vendor/github.com/kr/pty/ztypes_s390x.go b/vendor/github.com/kr/pty/ztypes_s390x.go
new file mode 100644
index 000000000..a7452b61c
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_s390x.go
@@ -0,0 +1,11 @@
+// +build s390x
+
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types.go
+
+package pty
+
+type (
+ _C_int int32
+ _C_uint uint32
+)
diff --git a/vendor/github.com/mailru/easyjson/LICENSE b/vendor/github.com/mailru/easyjson/LICENSE
new file mode 100644
index 000000000..fbff658f7
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/LICENSE
@@ -0,0 +1,7 @@
+Copyright (c) 2016 Mail.Ru Group
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/mailru/easyjson/README.md b/vendor/github.com/mailru/easyjson/README.md
new file mode 100644
index 000000000..e990111fc
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/README.md
@@ -0,0 +1,193 @@
+# easyjson [![Build Status](https://travis-ci.org/mailru/easyjson.svg?branch=master)](https://travis-ci.org/mailru/easyjson)
+
+easyjson allows to (un-)marshal JSON golang structs without the use of reflection by generating marshaller code.
+
+One of the aims of the library is to keep generated code simple enough so that it can be easily optimized or fixed. Another goal is to provide users with ability to customize the generated code not available in 'encoding/json', such as generating snake_case names or enabling 'omitempty' behavior by default.
+
+## usage
+```
+go get github.com/mailru/easyjson/...
+easyjson -all <file>.go
+```
+
+This will generate `<file>_easyjson.go` with marshaller/unmarshaller methods for structs. `GOPATH` variable needs to be set up correctly, since the generation invokes a `go run` on a temporary file (this is a really convenient approach to code generation borrowed from https://github.com/pquerna/ffjson).
+
+## options
+```
+Usage of .root/bin/easyjson:
+ -all
+ generate un-/marshallers for all structs in a file
+ -build_tags string
+ build tags to add to generated file
+ -leave_temps
+ do not delete temporary files
+ -no_std_marshalers
+ don't generate MarshalJSON/UnmarshalJSON methods
+ -noformat
+ do not run 'gofmt -w' on output file
+ -omit_empty
+ omit empty fields by default
+ -snake_case
+ use snake_case names instead of CamelCase by default
+ -stubs
+ only generate stubs for marshallers/unmarshallers methods
+```
+
+Using `-all` will generate (un-)marshallers for all structs in the file. By default, structs need to have a line beginning with `easyjson:json` in their docstring, e.g.:
+```
+//easyjson:json
+struct A{}
+```
+
+`-snake_case` tells easyjson to generate snake\_case field names by default (unless explicitly overriden by a field tag). The CamelCase to snake\_case conversion algorithm should work in most cases (e.g. HTTPVersion will be converted to http_version). There can be names like JSONHTTPRPC where the conversion will return an unexpected result (jsonhttprpc without underscores), but such names require a dictionary to do the conversion and may be ambiguous.
+
+`-build_tags` will add corresponding build tag line for the generated file.
+## marshaller/unmarshaller interfaces
+
+easyjson generates MarshalJSON/UnmarshalJSON methods that are compatible with interfaces from 'encoding/json'. They are usable with 'json.Marshal' and 'json.Unmarshal' functions, however actually using those will result in significantly worse performance compared to custom interfaces.
+
+`MarshalEasyJSON` / `UnmarshalEasyJSON` methods are generated for faster parsing using custom Lexer/Writer structs (`jlexer.Lexer` and `jwriter.Writer`). The method signature is defined in `easyjson.Marshaler` / `easyjson.Unmarshaler` interfaces. These interfaces allow to avoid using any unnecessary reflection or type assertions during parsing. Functions can be used manually or with `easyjson.Marshal<...>` and `easyjson.Unmarshal<...>` helper methods.
+
+`jwriter.Writer` struct in addition to function for returning the data as a single slice also has methods to return the size and to send the data to an `io.Writer`. This is aimed at a typical HTTP use-case, when you want to know the `Content-Length` before actually starting to send the data.
+
+There are helpers in the top-level package for marhsaling/unmarshaling the data using custom interfaces to and from writers, including a helper for `http.ResponseWriter`.
+
+## custom types
+If `easyjson.Marshaler` / `easyjson.Unmarshaler` interfaces are implemented by a type involved in JSON parsing, the type will be marshaled/unmarshaled using these methods. `easyjson.Optional` interface allows for a custom type to integrate with 'omitempty' logic.
+
+As an example, easyjson includes an `easyjson.RawMessage` analogous to `json.RawMessage`.
+
+Also, there are 'optional' wrappers for primitive types in `easyjson/opt` package. These are useful in the case when it is necessary to distinguish between missing and default value for the type. Wrappers allow to avoid pointers and extra heap allocations in such cases.
+
+## memory pooling
+
+The library uses a custom buffer which allocates data in increasing chunks (128-32768 bytes). Chunks of 512 bytes and larger are reused with the help of `sync.Pool`. The maximum size of a chunk is bounded to reduce redundancy in memory allocation and to make the chunks more reusable in the case of large buffer sizes.
+
+The buffer code is in `easyjson/buffer` package the exact values can be tweaked by a `buffer.Init()` call before the first serialization.
+
+## limitations
+* The library is at an early stage, there are likely to be some bugs and some features of 'encoding/json' may not be supported. Please report such cases, so that they may be fixed sooner.
+* Object keys are case-sensitive (unlike encodin/json). Case-insentive behavior will be implemented as an option (case-insensitive matching is slower).
+* Unsafe package is used by the code. While a non-unsafe version of easyjson can be made in the future, using unsafe package simplifies a lot of code by allowing no-copy []byte to string conversion within the library. This is used only during parsing and all the returned values are allocated properly.
+* Floats are currently formatted with default precision for 'strconv' package. It is obvious that it is not always the correct way to handle it, but there aren't enough use-cases for floats at hand to do anything better.
+* During parsing, parts of JSON that are skipped over are not syntactically validated more than required to skip matching parentheses.
+* No true streaming support for encoding/decoding. For many use-cases and protocols, data length is typically known on input and needs to be known before sending the data.
+
+## benchmarks
+Most benchmarks were done using a sample 13kB JSON (9k if serialized back trimming the whitespace) from https://dev.twitter.com/rest/reference/get/search/tweets. The sample is very close to real-world data, quite structured and contains a variety of different types.
+
+For small request benchmarks, an 80-byte portion of the regular sample was used.
+
+For large request marshalling benchmarks, a struct containing 50 regular samples was used, making a ~500kB output JSON.
+
+Benchmarks are available in the repository and are run on 'make'.
+
+### easyjson vs. encoding/json
+
+easyjson seems to be 5-6 times faster than the default json serialization for unmarshalling, 3-4 times faster for non-concurrent marshalling. Concurrent marshalling is 6-7x faster if marshalling to a writer.
+
+### easyjson vs. ffjson
+
+easyjson uses the same approach for code generation as ffjson, but a significantly different approach to lexing and generated code. This allows easyjson to be 2-3x faster for unmarshalling and 1.5-2x faster for non-concurrent unmarshalling.
+
+ffjson seems to behave weird if used concurrently: for large request pooling hurts performance instead of boosting it, it also does not quite scale well. These issues are likely to be fixable and until that comparisons might vary from version to version a lot.
+
+easyjson is similar in performance for small requests and 2-5x times faster for large ones if used with a writer.
+
+### easyjson vs. go/codec
+
+github.com/ugorji/go/codec library provides compile-time helpers for JSON generation. In this case, helpers are not exactly marshallers as they are encoding-independent.
+
+easyjson is generally ~2x faster for non-concurrent benchmarks and about 3x faster for concurrent encoding (without marshalling to a writer). Unsafe option for generated helpers was used.
+
+As an attempt to measure marshalling performance of 'go/codec' (as opposed to allocations/memcpy/writer interface invocations), a benchmark was done with resetting lenght of a byte slice rather than resetting the whole slice to nil. However, the optimization in this exact form may not be applicable in practice, since the memory is not freed between marshalling operations.
+
+### easyjson vs 'ujson' python module
+ujson is using C code for parsing, so it is interesting to see how plain golang compares to that. It is imporant to note that the resulting object for python is slower to access, since the library parses JSON object into dictionaries.
+
+easyjson seems to be slightly faster for unmarshalling (finally!) and 2-3x faster for marshalling.
+
+### benchmark figures
+The data was measured on 4 February, 2016 using current ffjson and golang 1.6. Data for go/codec was added on 4 March 2016, benchmarked on the same machine.
+
+#### Unmarshalling
+| lib | json size | MB/s | allocs/op | B/op
+|--------|-----------|------|-----------|-------
+|standard| regular | 22 | 218 | 10229
+|standard| small | 9.7 | 14 | 720
+|--------|-----------|------|-----------|-------
+|easyjson| regular | 125 | 128 | 9794
+|easyjson| small | 67 | 3 | 128
+|--------|-----------|------|-----------|-------
+|ffjson | regular | 66 | 141 | 9985
+|ffjson | small | 17.6 | 10 | 488
+|--------|-----------|------|-----------|-------
+|codec | regular | 55 | 434 | 19299
+|codec | small | 29 | 7 | 336
+|--------|-----------|------|-----------|-------
+|ujson | regular | 103 | N/A | N/A
+
+#### Marshalling, one goroutine.
+| lib | json size | MB/s | allocs/op | B/op
+|----------|-----------|------|-----------|-------
+|standard | regular | 75 | 9 | 23256
+|standard | small | 32 | 3 | 328
+|standard | large | 80 | 17 | 1.2M
+|----------|-----------|------|-----------|-------
+|easyjson | regular | 213 | 9 | 10260
+|easyjson* | regular | 263 | 8 | 742
+|easyjson | small | 125 | 1 | 128
+|easyjson | large | 212 | 33 | 490k
+|easyjson* | large | 262 | 25 | 2879
+|----------|-----------|------|-----------|-------
+|ffjson | regular | 122 | 153 | 21340
+|ffjson** | regular | 146 | 152 | 4897
+|ffjson | small | 36 | 5 | 384
+|ffjson** | small | 64 | 4 | 128
+|ffjson | large | 134 | 7317 | 818k
+|ffjson** | large | 125 | 7320 | 827k
+|----------|-----------|------|-----------|-------
+|codec | regular | 80 | 17 | 33601
+|codec*** | regular | 108 | 9 | 1153
+|codec | small | 42 | 3 | 304
+|codec*** | small | 56 | 1 | 48
+|codec | large | 73 | 483 | 2.5M
+|codec*** | large | 103 | 451 | 66007
+|----------|-----------|------|-----------|-------
+|ujson | regular | 92 | N/A | N/A
+\* marshalling to a writer,
+\*\* using `ffjson.Pool()`,
+\*\*\* reusing output slice instead of resetting it to nil
+
+#### Marshalling, concurrent.
+| lib | json size | MB/s | allocs/op | B/op
+|----------|-----------|-------|-----------|-------
+|standard | regular | 252 | 9 | 23257
+|standard | small | 124 | 3 | 328
+|standard | large | 289 | 17 | 1.2M
+|----------|-----------|-------|-----------|-------
+|easyjson | regular | 792 | 9 | 10597
+|easyjson* | regular | 1748 | 8 | 779
+|easyjson | small | 333 | 1 | 128
+|easyjson | large | 718 | 36 | 548k
+|easyjson* | large | 2134 | 25 | 4957
+|----------|-----------|------|-----------|-------
+|ffjson | regular | 301 | 153 | 21629
+|ffjson** | regular | 707 | 152 | 5148
+|ffjson | small | 62 | 5 | 384
+|ffjson** | small | 282 | 4 | 128
+|ffjson | large | 438 | 7330 | 1.0M
+|ffjson** | large | 131 | 7319 | 820k
+|----------|-----------|------|-----------|-------
+|codec | regular | 183 | 17 | 33603
+|codec*** | regular | 671 | 9 | 1157
+|codec | small | 147 | 3 | 304
+|codec*** | small | 299 | 1 | 48
+|codec | large | 190 | 483 | 2.5M
+|codec*** | large | 752 | 451 | 77574
+\* marshalling to a writer,
+\*\* using `ffjson.Pool()`,
+\*\*\* reusing output slice instead of resetting it to nil
+
+
+
diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go
new file mode 100644
index 000000000..4de4a51db
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/buffer/pool.go
@@ -0,0 +1,207 @@
+// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to
+// reduce copying and to allow reuse of individual chunks.
+package buffer
+
+import (
+ "io"
+ "sync"
+)
+
+// PoolConfig contains configuration for the allocation and reuse strategy.
+type PoolConfig struct {
+ StartSize int // Minimum chunk size that is allocated.
+ PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead.
+ MaxSize int // Maximum chunk size that will be allocated.
+}
+
+var config = PoolConfig{
+ StartSize: 128,
+ PooledSize: 512,
+ MaxSize: 32768,
+}
+
+// Reuse pool: chunk size -> pool.
+var buffers = map[int]*sync.Pool{}
+
+func initBuffers() {
+ for l := config.PooledSize; l <= config.MaxSize; l *= 2 {
+ buffers[l] = new(sync.Pool)
+ }
+}
+
+func init() {
+ initBuffers()
+}
+
+// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done.
+func Init(cfg PoolConfig) {
+ config = cfg
+ initBuffers()
+}
+
+// putBuf puts a chunk to reuse pool if it can be reused.
+func putBuf(buf []byte) {
+ size := cap(buf)
+ if size < config.PooledSize {
+ return
+ }
+ if c := buffers[size]; c != nil {
+ c.Put(buf[:0])
+ }
+}
+
+// getBuf gets a chunk from reuse pool or creates a new one if reuse failed.
+func getBuf(size int) []byte {
+ if size < config.PooledSize {
+ return make([]byte, 0, size)
+ }
+
+ if c := buffers[size]; c != nil {
+ v := c.Get()
+ if v != nil {
+ return v.([]byte)
+ }
+ }
+ return make([]byte, 0, size)
+}
+
+// Buffer is a buffer optimized for serialization without extra copying.
+type Buffer struct {
+
+ // Buf is the current chunk that can be used for serialization.
+ Buf []byte
+
+ toPool []byte
+ bufs [][]byte
+}
+
+// EnsureSpace makes sure that the current chunk contains at least s free bytes,
+// possibly creating a new chunk.
+func (b *Buffer) EnsureSpace(s int) {
+ if cap(b.Buf)-len(b.Buf) >= s {
+ return
+ }
+ l := len(b.Buf)
+ if l > 0 {
+ if cap(b.toPool) != cap(b.Buf) {
+ // Chunk was reallocated, toPool can be pooled.
+ putBuf(b.toPool)
+ }
+ if cap(b.bufs) == 0 {
+ b.bufs = make([][]byte, 0, 8)
+ }
+ b.bufs = append(b.bufs, b.Buf)
+ l = cap(b.toPool) * 2
+ } else {
+ l = config.StartSize
+ }
+
+ if l > config.MaxSize {
+ l = config.MaxSize
+ }
+ b.Buf = getBuf(l)
+ b.toPool = b.Buf
+}
+
+// AppendByte appends a single byte to buffer.
+func (b *Buffer) AppendByte(data byte) {
+ if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
+ b.EnsureSpace(1)
+ }
+ b.Buf = append(b.Buf, data)
+}
+
+// AppendBytes appends a byte slice to buffer.
+func (b *Buffer) AppendBytes(data []byte) {
+ for len(data) > 0 {
+ if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
+ b.EnsureSpace(1)
+ }
+
+ sz := cap(b.Buf) - len(b.Buf)
+ if sz > len(data) {
+ sz = len(data)
+ }
+
+ b.Buf = append(b.Buf, data[:sz]...)
+ data = data[sz:]
+ }
+}
+
+// AppendBytes appends a string to buffer.
+func (b *Buffer) AppendString(data string) {
+ for len(data) > 0 {
+ if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
+ b.EnsureSpace(1)
+ }
+
+ sz := cap(b.Buf) - len(b.Buf)
+ if sz > len(data) {
+ sz = len(data)
+ }
+
+ b.Buf = append(b.Buf, data[:sz]...)
+ data = data[sz:]
+ }
+}
+
+// Size computes the size of a buffer by adding sizes of every chunk.
+func (b *Buffer) Size() int {
+ size := len(b.Buf)
+ for _, buf := range b.bufs {
+ size += len(buf)
+ }
+ return size
+}
+
+// DumpTo outputs the contents of a buffer to a writer and resets the buffer.
+func (b *Buffer) DumpTo(w io.Writer) (written int, err error) {
+ var n int
+ for _, buf := range b.bufs {
+ if err == nil {
+ n, err = w.Write(buf)
+ written += n
+ }
+ putBuf(buf)
+ }
+
+ if err == nil {
+ n, err = w.Write(b.Buf)
+ written += n
+ }
+ putBuf(b.toPool)
+
+ b.bufs = nil
+ b.Buf = nil
+ b.toPool = nil
+
+ return
+}
+
+// BuildBytes creates a single byte slice with all the contents of the buffer. Data is
+// copied if it does not fit in a single chunk.
+func (b *Buffer) BuildBytes() []byte {
+ if len(b.bufs) == 0 {
+
+ ret := b.Buf
+ b.toPool = nil
+ b.Buf = nil
+
+ return ret
+ }
+
+ ret := make([]byte, 0, b.Size())
+ for _, buf := range b.bufs {
+ ret = append(ret, buf...)
+ putBuf(buf)
+ }
+
+ ret = append(ret, b.Buf...)
+ putBuf(b.toPool)
+
+ b.bufs = nil
+ b.toPool = nil
+ b.Buf = nil
+
+ return ret
+}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/error.go b/vendor/github.com/mailru/easyjson/jlexer/error.go
new file mode 100644
index 000000000..e90ec40d0
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/jlexer/error.go
@@ -0,0 +1,15 @@
+package jlexer
+
+import "fmt"
+
+// LexerError implements the error interface and represents all possible errors that can be
+// generated during parsing the JSON data.
+type LexerError struct {
+ Reason string
+ Offset int
+ Data string
+}
+
+func (l *LexerError) Error() string {
+ return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data)
+}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go
new file mode 100644
index 000000000..db4939d5a
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go
@@ -0,0 +1,1121 @@
+// Package jlexer contains a JSON lexer implementation.
+//
+// It is expected that it is mostly used with generated parser code, so the interface is tuned
+// for a parser that knows what kind of data is expected.
+package jlexer
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+ "unsafe"
+)
+
+// tokenKind determines type of a token.
+type tokenKind byte
+
+const (
+ tokenUndef tokenKind = iota // No token.
+ tokenDelim // Delimiter: one of '{', '}', '[' or ']'.
+ tokenString // A string literal, e.g. "abc\u1234"
+ tokenNumber // Number literal, e.g. 1.5e5
+ tokenBool // Boolean literal: true or false.
+ tokenNull // null keyword.
+)
+
+// token describes a single token: type, position in the input and value.
+type token struct {
+ kind tokenKind // Type of a token.
+
+ boolValue bool // Value if a boolean literal token.
+ byteValue []byte // Raw value of a token.
+ delimValue byte
+}
+
+// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice.
+type Lexer struct {
+ Data []byte // Input data given to the lexer.
+
+ start int // Start of the current token.
+ pos int // Current unscanned position in the input stream.
+ token token // Last scanned token, if token.kind != tokenUndef.
+
+ firstElement bool // Whether current element is the first in array or an object.
+ wantSep byte // A comma or a colon character, which need to occur before a token.
+
+ UseMultipleErrors bool // If we want to use multiple errors.
+ fatalError error // Fatal error occured during lexing. It is usually a syntax error.
+ multipleErrors []*LexerError // Semantic errors occured during lexing. Marshalling will be continued after finding this errors.
+}
+
+// FetchToken scans the input for the next token.
+func (r *Lexer) FetchToken() {
+ r.token.kind = tokenUndef
+ r.start = r.pos
+
+ // Check if r.Data has r.pos element
+ // If it doesn't, it mean corrupted input data
+ if len(r.Data) < r.pos {
+ r.errParse("Unexpected end of data")
+ return
+ }
+ // Determine the type of a token by skipping whitespace and reading the
+ // first character.
+ for _, c := range r.Data[r.pos:] {
+ switch c {
+ case ':', ',':
+ if r.wantSep == c {
+ r.pos++
+ r.start++
+ r.wantSep = 0
+ } else {
+ r.errSyntax()
+ }
+
+ case ' ', '\t', '\r', '\n':
+ r.pos++
+ r.start++
+
+ case '"':
+ if r.wantSep != 0 {
+ r.errSyntax()
+ }
+
+ r.token.kind = tokenString
+ r.fetchString()
+ return
+
+ case '{', '[':
+ if r.wantSep != 0 {
+ r.errSyntax()
+ }
+ r.firstElement = true
+ r.token.kind = tokenDelim
+ r.token.delimValue = r.Data[r.pos]
+ r.pos++
+ return
+
+ case '}', ']':
+ if !r.firstElement && (r.wantSep != ',') {
+ r.errSyntax()
+ }
+ r.wantSep = 0
+ r.token.kind = tokenDelim
+ r.token.delimValue = r.Data[r.pos]
+ r.pos++
+ return
+
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-':
+ if r.wantSep != 0 {
+ r.errSyntax()
+ }
+ r.token.kind = tokenNumber
+ r.fetchNumber()
+ return
+
+ case 'n':
+ if r.wantSep != 0 {
+ r.errSyntax()
+ }
+
+ r.token.kind = tokenNull
+ r.fetchNull()
+ return
+
+ case 't':
+ if r.wantSep != 0 {
+ r.errSyntax()
+ }
+
+ r.token.kind = tokenBool
+ r.token.boolValue = true
+ r.fetchTrue()
+ return
+
+ case 'f':
+ if r.wantSep != 0 {
+ r.errSyntax()
+ }
+
+ r.token.kind = tokenBool
+ r.token.boolValue = false
+ r.fetchFalse()
+ return
+
+ default:
+ r.errSyntax()
+ return
+ }
+ }
+ r.fatalError = io.EOF
+ return
+}
+
+// isTokenEnd returns true if the char can follow a non-delimiter token
+func isTokenEnd(c byte) bool {
+ return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':'
+}
+
+// fetchNull fetches and checks remaining bytes of null keyword.
+func (r *Lexer) fetchNull() {
+ r.pos += 4
+ if r.pos > len(r.Data) ||
+ r.Data[r.pos-3] != 'u' ||
+ r.Data[r.pos-2] != 'l' ||
+ r.Data[r.pos-1] != 'l' ||
+ (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
+
+ r.pos -= 4
+ r.errSyntax()
+ }
+}
+
+// fetchTrue fetches and checks remaining bytes of true keyword.
+func (r *Lexer) fetchTrue() {
+ r.pos += 4
+ if r.pos > len(r.Data) ||
+ r.Data[r.pos-3] != 'r' ||
+ r.Data[r.pos-2] != 'u' ||
+ r.Data[r.pos-1] != 'e' ||
+ (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
+
+ r.pos -= 4
+ r.errSyntax()
+ }
+}
+
+// fetchFalse fetches and checks remaining bytes of false keyword.
+func (r *Lexer) fetchFalse() {
+ r.pos += 5
+ if r.pos > len(r.Data) ||
+ r.Data[r.pos-4] != 'a' ||
+ r.Data[r.pos-3] != 'l' ||
+ r.Data[r.pos-2] != 's' ||
+ r.Data[r.pos-1] != 'e' ||
+ (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
+
+ r.pos -= 5
+ r.errSyntax()
+ }
+}
+
+// bytesToStr creates a string pointing at the slice to avoid copying.
+//
+// Warning: the string returned by the function should be used with care, as the whole input data
+// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data
+// may be garbage-collected even when the string exists.
+func bytesToStr(data []byte) string {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&data))
+ shdr := reflect.StringHeader{h.Data, h.Len}
+ return *(*string)(unsafe.Pointer(&shdr))
+}
+
+// fetchNumber scans a number literal token.
+func (r *Lexer) fetchNumber() {
+ hasE := false
+ afterE := false
+ hasDot := false
+
+ r.pos++
+ for i, c := range r.Data[r.pos:] {
+ switch {
+ case c >= '0' && c <= '9':
+ afterE = false
+ case c == '.' && !hasDot:
+ hasDot = true
+ case (c == 'e' || c == 'E') && !hasE:
+ hasE = true
+ hasDot = true
+ afterE = true
+ case (c == '+' || c == '-') && afterE:
+ afterE = false
+ default:
+ r.pos += i
+ if !isTokenEnd(c) {
+ r.errSyntax()
+ } else {
+ r.token.byteValue = r.Data[r.start:r.pos]
+ }
+ return
+ }
+ }
+
+ r.pos = len(r.Data)
+ r.token.byteValue = r.Data[r.start:]
+}
+
+// findStringLen tries to scan into the string literal for ending quote char to determine required size.
+// The size will be exact if no escapes are present and may be inexact if there are escaped chars.
+func findStringLen(data []byte) (hasEscapes bool, length int) {
+ delta := 0
+
+ for i := 0; i < len(data); i++ {
+ switch data[i] {
+ case '\\':
+ i++
+ delta++
+ if i < len(data) && data[i] == 'u' {
+ delta++
+ }
+ case '"':
+ return (delta > 0), (i - delta)
+ }
+ }
+
+ return false, len(data)
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ return -1
+ }
+ var val rune
+ for i := 2; i < len(s) && i < 6; i++ {
+ var v byte
+ c := s[i]
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ v = c - '0'
+ case 'a', 'b', 'c', 'd', 'e', 'f':
+ v = c - 'a' + 10
+ case 'A', 'B', 'C', 'D', 'E', 'F':
+ v = c - 'A' + 10
+ default:
+ return -1
+ }
+
+ val <<= 4
+ val |= rune(v)
+ }
+ return val
+}
+
+// processEscape processes a single escape sequence and returns number of bytes processed.
+func (r *Lexer) processEscape(data []byte) (int, error) {
+ if len(data) < 2 {
+ return 0, fmt.Errorf("syntax error at %v", string(data))
+ }
+
+ c := data[1]
+ switch c {
+ case '"', '/', '\\':
+ r.token.byteValue = append(r.token.byteValue, c)
+ return 2, nil
+ case 'b':
+ r.token.byteValue = append(r.token.byteValue, '\b')
+ return 2, nil
+ case 'f':
+ r.token.byteValue = append(r.token.byteValue, '\f')
+ return 2, nil
+ case 'n':
+ r.token.byteValue = append(r.token.byteValue, '\n')
+ return 2, nil
+ case 'r':
+ r.token.byteValue = append(r.token.byteValue, '\r')
+ return 2, nil
+ case 't':
+ r.token.byteValue = append(r.token.byteValue, '\t')
+ return 2, nil
+ case 'u':
+ rr := getu4(data)
+ if rr < 0 {
+ return 0, errors.New("syntax error")
+ }
+
+ read := 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(data[read:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ read += 6
+ rr = dec
+ } else {
+ rr = unicode.ReplacementChar
+ }
+ }
+ var d [4]byte
+ s := utf8.EncodeRune(d[:], rr)
+ r.token.byteValue = append(r.token.byteValue, d[:s]...)
+ return read, nil
+ }
+
+ return 0, errors.New("syntax error")
+}
+
+// fetchString scans a string literal token.
+func (r *Lexer) fetchString() {
+ r.pos++
+ data := r.Data[r.pos:]
+
+ hasEscapes, length := findStringLen(data)
+ if !hasEscapes {
+ r.token.byteValue = data[:length]
+ r.pos += length + 1
+ return
+ }
+
+ r.token.byteValue = make([]byte, 0, length)
+ p := 0
+ for i := 0; i < len(data); {
+ switch data[i] {
+ case '"':
+ r.pos += i + 1
+ r.token.byteValue = append(r.token.byteValue, data[p:i]...)
+ i++
+ return
+
+ case '\\':
+ r.token.byteValue = append(r.token.byteValue, data[p:i]...)
+ off, err := r.processEscape(data[i:])
+ if err != nil {
+ r.errParse(err.Error())
+ return
+ }
+ i += off
+ p = i
+
+ default:
+ i++
+ }
+ }
+ r.errParse("unterminated string literal")
+}
+
+// scanToken scans the next token if no token is currently available in the lexer.
+func (r *Lexer) scanToken() {
+ if r.token.kind != tokenUndef || r.fatalError != nil {
+ return
+ }
+
+ r.FetchToken()
+}
+
+// consume resets the current token to allow scanning the next one.
+func (r *Lexer) consume() {
+ r.token.kind = tokenUndef
+ r.token.delimValue = 0
+}
+
+// Ok returns true if no error (including io.EOF) was encountered during scanning.
+func (r *Lexer) Ok() bool {
+ return r.fatalError == nil
+}
+
+const maxErrorContextLen = 13
+
+func (r *Lexer) errParse(what string) {
+ if r.fatalError == nil {
+ var str string
+ if len(r.Data)-r.pos <= maxErrorContextLen {
+ str = string(r.Data)
+ } else {
+ str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..."
+ }
+ r.fatalError = &LexerError{
+ Reason: what,
+ Offset: r.pos,
+ Data: str,
+ }
+ }
+}
+
+func (r *Lexer) errSyntax() {
+ r.errParse("syntax error")
+}
+
+func (r *Lexer) errInvalidToken(expected string) {
+ if r.fatalError != nil {
+ return
+ }
+ if r.UseMultipleErrors {
+ r.pos = r.start
+ r.consume()
+ r.SkipRecursive()
+ switch expected {
+ case "[":
+ r.token.delimValue = ']'
+ r.token.kind = tokenDelim
+ case "{":
+ r.token.delimValue = '}'
+ r.token.kind = tokenDelim
+ }
+ r.addNonfatalError(&LexerError{
+ Reason: fmt.Sprintf("expected %s", expected),
+ Offset: r.start,
+ Data: string(r.Data[r.start:r.pos]),
+ })
+ return
+ }
+
+ var str string
+ if len(r.token.byteValue) <= maxErrorContextLen {
+ str = string(r.token.byteValue)
+ } else {
+ str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..."
+ }
+ r.fatalError = &LexerError{
+ Reason: fmt.Sprintf("expected %s", expected),
+ Offset: r.pos,
+ Data: str,
+ }
+}
+
+func (r *Lexer) GetPos() int {
+ return r.pos
+}
+
+// Delim consumes a token and verifies that it is the given delimiter.
+func (r *Lexer) Delim(c byte) {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+
+ if !r.Ok() || r.token.delimValue != c {
+ r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled.
+ r.errInvalidToken(string([]byte{c}))
+ } else {
+ r.consume()
+ }
+}
+
+// IsDelim returns true if there was no scanning error and next token is the given delimiter.
+func (r *Lexer) IsDelim(c byte) bool {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ return !r.Ok() || r.token.delimValue == c
+}
+
+// Null verifies that the next token is null and consumes it.
+func (r *Lexer) Null() {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() || r.token.kind != tokenNull {
+ r.errInvalidToken("null")
+ }
+ r.consume()
+}
+
+// IsNull returns true if the next token is a null keyword.
+func (r *Lexer) IsNull() bool {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ return r.Ok() && r.token.kind == tokenNull
+}
+
+// Skip skips a single token.
+func (r *Lexer) Skip() {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ r.consume()
+}
+
+// SkipRecursive skips next array or object completely, or just skips a single token if not
+// an array/object.
+//
+// Note: no syntax validation is performed on the skipped data.
+func (r *Lexer) SkipRecursive() {
+ r.scanToken()
+ var start, end byte
+
+ if r.token.delimValue == '{' {
+ start, end = '{', '}'
+ } else if r.token.delimValue == '[' {
+ start, end = '[', ']'
+ } else {
+ r.consume()
+ return
+ }
+
+ r.consume()
+
+ level := 1
+ inQuotes := false
+ wasEscape := false
+
+ for i, c := range r.Data[r.pos:] {
+ switch {
+ case c == start && !inQuotes:
+ level++
+ case c == end && !inQuotes:
+ level--
+ if level == 0 {
+ r.pos += i + 1
+ return
+ }
+ case c == '\\' && inQuotes:
+ wasEscape = !wasEscape
+ continue
+ case c == '"' && inQuotes:
+ inQuotes = wasEscape
+ case c == '"':
+ inQuotes = true
+ }
+ wasEscape = false
+ }
+ r.pos = len(r.Data)
+ r.fatalError = &LexerError{
+ Reason: "EOF reached while skipping array/object or token",
+ Offset: r.pos,
+ Data: string(r.Data[r.pos:]),
+ }
+}
+
+// Raw fetches the next item recursively as a data slice
+func (r *Lexer) Raw() []byte {
+ r.SkipRecursive()
+ if !r.Ok() {
+ return nil
+ }
+ return r.Data[r.start:r.pos]
+}
+
+// IsStart returns whether the lexer is positioned at the start
+// of an input string.
+func (r *Lexer) IsStart() bool {
+ return r.pos == 0
+}
+
+// Consumed reads all remaining bytes from the input, publishing an error if
+// there is anything but whitespace remaining.
+func (r *Lexer) Consumed() {
+ if r.pos > len(r.Data) {
+ return
+ }
+
+ for _, c := range r.Data[r.pos:] {
+ if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+ r.fatalError = &LexerError{
+ Reason: "invalid character '" + string(c) + "' after top-level value",
+ Offset: r.pos,
+ Data: string(r.Data[r.pos:]),
+ }
+ return
+ }
+
+ r.pos++
+ r.start++
+ }
+}
+
+func (r *Lexer) unsafeString() (string, []byte) {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() || r.token.kind != tokenString {
+ r.errInvalidToken("string")
+ return "", nil
+ }
+ bytes := r.token.byteValue
+ ret := bytesToStr(r.token.byteValue)
+ r.consume()
+ return ret, bytes
+}
+
+// UnsafeString returns the string value if the token is a string literal.
+//
+// Warning: returned string may point to the input buffer, so the string should not outlive
+// the input buffer. Intended pattern of usage is as an argument to a switch statement.
+func (r *Lexer) UnsafeString() string {
+ ret, _ := r.unsafeString()
+ return ret
+}
+
+// String reads a string literal.
+func (r *Lexer) String() string {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() || r.token.kind != tokenString {
+ r.errInvalidToken("string")
+ return ""
+ }
+ ret := string(r.token.byteValue)
+ r.consume()
+ return ret
+}
+
+// Bytes reads a string literal and base64 decodes it into a byte slice.
+func (r *Lexer) Bytes() []byte {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() || r.token.kind != tokenString {
+ r.errInvalidToken("string")
+ return nil
+ }
+ ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue)))
+ len, err := base64.StdEncoding.Decode(ret, r.token.byteValue)
+ if err != nil {
+ r.fatalError = &LexerError{
+ Reason: err.Error(),
+ }
+ return nil
+ }
+
+ r.consume()
+ return ret[:len]
+}
+
+// Bool reads a true or false boolean keyword.
+func (r *Lexer) Bool() bool {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() || r.token.kind != tokenBool {
+ r.errInvalidToken("bool")
+ return false
+ }
+ ret := r.token.boolValue
+ r.consume()
+ return ret
+}
+
+func (r *Lexer) number() string {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() || r.token.kind != tokenNumber {
+ r.errInvalidToken("number")
+ return ""
+ }
+ ret := bytesToStr(r.token.byteValue)
+ r.consume()
+ return ret
+}
+
+func (r *Lexer) Uint8() uint8 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 8)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return uint8(n)
+}
+
+func (r *Lexer) Uint16() uint16 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 16)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return uint16(n)
+}
+
+func (r *Lexer) Uint32() uint32 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 32)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return uint32(n)
+}
+
+func (r *Lexer) Uint64() uint64 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return n
+}
+
+func (r *Lexer) Uint() uint {
+ return uint(r.Uint64())
+}
+
+func (r *Lexer) Int8() int8 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 8)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return int8(n)
+}
+
+func (r *Lexer) Int16() int16 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 16)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return int16(n)
+}
+
+func (r *Lexer) Int32() int32 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return int32(n)
+}
+
+func (r *Lexer) Int64() int64 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return n
+}
+
+func (r *Lexer) Int() int {
+ return int(r.Int64())
+}
+
+func (r *Lexer) Uint8Str() uint8 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 8)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return uint8(n)
+}
+
+func (r *Lexer) Uint16Str() uint16 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 16)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return uint16(n)
+}
+
+func (r *Lexer) Uint32Str() uint32 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 32)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return uint32(n)
+}
+
+func (r *Lexer) Uint64Str() uint64 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return n
+}
+
+func (r *Lexer) UintStr() uint {
+ return uint(r.Uint64Str())
+}
+
+func (r *Lexer) Int8Str() int8 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 8)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return int8(n)
+}
+
+func (r *Lexer) Int16Str() int16 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 16)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return int16(n)
+}
+
+func (r *Lexer) Int32Str() int32 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return int32(n)
+}
+
+func (r *Lexer) Int64Str() int64 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return n
+}
+
+func (r *Lexer) IntStr() int {
+ return int(r.Int64Str())
+}
+
+func (r *Lexer) Float32() float32 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseFloat(s, 32)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return float32(n)
+}
+
+func (r *Lexer) Float64() float64 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return n
+}
+
+func (r *Lexer) Error() error {
+ return r.fatalError
+}
+
+func (r *Lexer) AddError(e error) {
+ if r.fatalError == nil {
+ r.fatalError = e
+ }
+}
+
+func (r *Lexer) AddNonFatalError(e error) {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Data: string(r.Data[r.start:r.pos]),
+ Reason: e.Error(),
+ })
+}
+
+func (r *Lexer) addNonfatalError(err *LexerError) {
+ if r.UseMultipleErrors {
+ // We don't want to add errors with the same offset.
+ if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset {
+ return
+ }
+ r.multipleErrors = append(r.multipleErrors, err)
+ return
+ }
+ r.fatalError = err
+}
+
+func (r *Lexer) GetNonFatalErrors() []*LexerError {
+ return r.multipleErrors
+}
+
+// Interface fetches an interface{} analogous to the 'encoding/json' package.
+func (r *Lexer) Interface() interface{} {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+
+ if !r.Ok() {
+ return nil
+ }
+ switch r.token.kind {
+ case tokenString:
+ return r.String()
+ case tokenNumber:
+ return r.Float64()
+ case tokenBool:
+ return r.Bool()
+ case tokenNull:
+ r.Null()
+ return nil
+ }
+
+ if r.token.delimValue == '{' {
+ r.consume()
+
+ ret := map[string]interface{}{}
+ for !r.IsDelim('}') {
+ key := r.String()
+ r.WantColon()
+ ret[key] = r.Interface()
+ r.WantComma()
+ }
+ r.Delim('}')
+
+ if r.Ok() {
+ return ret
+ } else {
+ return nil
+ }
+ } else if r.token.delimValue == '[' {
+ r.consume()
+
+ var ret []interface{}
+ for !r.IsDelim(']') {
+ ret = append(ret, r.Interface())
+ r.WantComma()
+ }
+ r.Delim(']')
+
+ if r.Ok() {
+ return ret
+ } else {
+ return nil
+ }
+ }
+ r.errSyntax()
+ return nil
+}
+
+// WantComma requires a comma to be present before fetching next token.
+func (r *Lexer) WantComma() {
+ r.wantSep = ','
+ r.firstElement = false
+}
+
+// WantColon requires a colon to be present before fetching next token.
+func (r *Lexer) WantColon() {
+ r.wantSep = ':'
+ r.firstElement = false
+}
diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go
new file mode 100644
index 000000000..04bfddf70
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go
@@ -0,0 +1,302 @@
+// Package jwriter contains a JSON writer.
+package jwriter
+
+import (
+ "encoding/base64"
+ "io"
+ "strconv"
+ "unicode/utf8"
+
+ "github.com/mailru/easyjson/buffer"
+)
+
+// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but
+// Flags field in Writer is used to set and pass them around.
+type Flags int
+
+const (
+ NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'.
+ NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'.
+)
+
+// Writer is a JSON writer.
+type Writer struct {
+ Flags Flags
+
+ Error error
+ Buffer buffer.Buffer
+ NoEscapeHTML bool
+}
+
+// Size returns the size of the data that was written out.
+func (w *Writer) Size() int {
+ return w.Buffer.Size()
+}
+
+// DumpTo outputs the data to given io.Writer, resetting the buffer.
+func (w *Writer) DumpTo(out io.Writer) (written int, err error) {
+ return w.Buffer.DumpTo(out)
+}
+
+// BuildBytes returns writer data as a single byte slice.
+func (w *Writer) BuildBytes() ([]byte, error) {
+ if w.Error != nil {
+ return nil, w.Error
+ }
+
+ return w.Buffer.BuildBytes(), nil
+}
+
+// RawByte appends raw binary data to the buffer.
+func (w *Writer) RawByte(c byte) {
+ w.Buffer.AppendByte(c)
+}
+
+// RawByte appends raw binary data to the buffer.
+func (w *Writer) RawString(s string) {
+ w.Buffer.AppendString(s)
+}
+
+// RawByte appends raw binary data to the buffer or sets the error if it is given. Useful for
+// calling with results of MarshalJSON-like functions.
+func (w *Writer) Raw(data []byte, err error) {
+ switch {
+ case w.Error != nil:
+ return
+ case err != nil:
+ w.Error = err
+ case len(data) > 0:
+ w.Buffer.AppendBytes(data)
+ default:
+ w.RawString("null")
+ }
+}
+
+// Base64Bytes appends data to the buffer after base64 encoding it
+func (w *Writer) Base64Bytes(data []byte) {
+ if data == nil {
+ w.Buffer.AppendString("null")
+ return
+ }
+ w.Buffer.AppendByte('"')
+ dst := make([]byte, base64.StdEncoding.EncodedLen(len(data)))
+ base64.StdEncoding.Encode(dst, data)
+ w.Buffer.AppendBytes(dst)
+ w.Buffer.AppendByte('"')
+}
+
+func (w *Writer) Uint8(n uint8) {
+ w.Buffer.EnsureSpace(3)
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint16(n uint16) {
+ w.Buffer.EnsureSpace(5)
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint32(n uint32) {
+ w.Buffer.EnsureSpace(10)
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint(n uint) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint64(n uint64) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
+}
+
+func (w *Writer) Int8(n int8) {
+ w.Buffer.EnsureSpace(4)
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int16(n int16) {
+ w.Buffer.EnsureSpace(6)
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int32(n int32) {
+ w.Buffer.EnsureSpace(11)
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int(n int) {
+ w.Buffer.EnsureSpace(21)
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int64(n int64) {
+ w.Buffer.EnsureSpace(21)
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
+}
+
+func (w *Writer) Uint8Str(n uint8) {
+ w.Buffer.EnsureSpace(3)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Uint16Str(n uint16) {
+ w.Buffer.EnsureSpace(5)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Uint32Str(n uint32) {
+ w.Buffer.EnsureSpace(10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) UintStr(n uint) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Uint64Str(n uint64) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int8Str(n int8) {
+ w.Buffer.EnsureSpace(4)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int16Str(n int16) {
+ w.Buffer.EnsureSpace(6)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int32Str(n int32) {
+ w.Buffer.EnsureSpace(11)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) IntStr(n int) {
+ w.Buffer.EnsureSpace(21)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int64Str(n int64) {
+ w.Buffer.EnsureSpace(21)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Float32(n float32) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
+}
+
+func (w *Writer) Float64(n float64) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64)
+}
+
+func (w *Writer) Bool(v bool) {
+ w.Buffer.EnsureSpace(5)
+ if v {
+ w.Buffer.Buf = append(w.Buffer.Buf, "true"...)
+ } else {
+ w.Buffer.Buf = append(w.Buffer.Buf, "false"...)
+ }
+}
+
+const chars = "0123456789abcdef"
+
+func isNotEscapedSingleChar(c byte, escapeHTML bool) bool {
+ // Note: might make sense to use a table if there are more chars to escape. With 4 chars
+ // it benchmarks the same.
+ if escapeHTML {
+ return c != '<' && c != '>' && c != '&' && c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf
+ } else {
+ return c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf
+ }
+}
+
+func (w *Writer) String(s string) {
+ w.Buffer.AppendByte('"')
+
+ // Portions of the string that contain no escapes are appended as
+ // byte slices.
+
+ p := 0 // last non-escape symbol
+
+ for i := 0; i < len(s); {
+ c := s[i]
+
+ if isNotEscapedSingleChar(c, !w.NoEscapeHTML) {
+ // single-width character, no escaping is required
+ i++
+ continue
+ } else if c < utf8.RuneSelf {
+ // single-with character, need to escape
+ w.Buffer.AppendString(s[p:i])
+ switch c {
+ case '\t':
+ w.Buffer.AppendString(`\t`)
+ case '\r':
+ w.Buffer.AppendString(`\r`)
+ case '\n':
+ w.Buffer.AppendString(`\n`)
+ case '\\':
+ w.Buffer.AppendString(`\\`)
+ case '"':
+ w.Buffer.AppendString(`\"`)
+ default:
+ w.Buffer.AppendString(`\u00`)
+ w.Buffer.AppendByte(chars[c>>4])
+ w.Buffer.AppendByte(chars[c&0xf])
+ }
+
+ i++
+ p = i
+ continue
+ }
+
+ // broken utf
+ runeValue, runeWidth := utf8.DecodeRuneInString(s[i:])
+ if runeValue == utf8.RuneError && runeWidth == 1 {
+ w.Buffer.AppendString(s[p:i])
+ w.Buffer.AppendString(`\ufffd`)
+ i++
+ p = i
+ continue
+ }
+
+ // jsonp stuff - tab separator and line separator
+ if runeValue == '\u2028' || runeValue == '\u2029' {
+ w.Buffer.AppendString(s[p:i])
+ w.Buffer.AppendString(`\u202`)
+ w.Buffer.AppendByte(chars[runeValue&0xf])
+ i += runeWidth
+ p = i
+ continue
+ }
+ i += runeWidth
+ }
+ w.Buffer.AppendString(s[p:])
+ w.Buffer.AppendByte('"')
+}
diff --git a/vendor/github.com/mattn/go-runewidth/README.mkd b/vendor/github.com/mattn/go-runewidth/README.mkd
new file mode 100644
index 000000000..ffb0edd2c
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/README.mkd
@@ -0,0 +1,26 @@
+go-runewidth
+============
+
+[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth)
+[![Coverage Status](https://coveralls.io/repos/mattn/go-runewidth/badge.png?branch=HEAD)](https://coveralls.io/r/mattn/go-runewidth?branch=HEAD)
+[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth)
+
+Provides functions to get fixed width of the character or string.
+
+Usage
+-----
+
+```go
+runewidth.StringWidth("つのだ☆HIRO") == 12
+```
+
+
+Author
+------
+
+Yasuhiro Matsumoto
+
+License
+-------
+
+under the MIT License: http://mattn.mit-license.org/2013
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go
new file mode 100644
index 000000000..3fbf33d59
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth.go
@@ -0,0 +1,464 @@
+package runewidth
+
+var EastAsianWidth = IsEastAsian()
+var DefaultCondition = &Condition{EastAsianWidth}
+
+type interval struct {
+ first rune
+ last rune
+}
+
+var combining = []interval{
+ {0x0300, 0x036F}, {0x0483, 0x0486}, {0x0488, 0x0489},
+ {0x0591, 0x05BD}, {0x05BF, 0x05BF}, {0x05C1, 0x05C2},
+ {0x05C4, 0x05C5}, {0x05C7, 0x05C7}, {0x0600, 0x0603},
+ {0x0610, 0x0615}, {0x064B, 0x065E}, {0x0670, 0x0670},
+ {0x06D6, 0x06E4}, {0x06E7, 0x06E8}, {0x06EA, 0x06ED},
+ {0x070F, 0x070F}, {0x0711, 0x0711}, {0x0730, 0x074A},
+ {0x07A6, 0x07B0}, {0x07EB, 0x07F3}, {0x0901, 0x0902},
+ {0x093C, 0x093C}, {0x0941, 0x0948}, {0x094D, 0x094D},
+ {0x0951, 0x0954}, {0x0962, 0x0963}, {0x0981, 0x0981},
+ {0x09BC, 0x09BC}, {0x09C1, 0x09C4}, {0x09CD, 0x09CD},
+ {0x09E2, 0x09E3}, {0x0A01, 0x0A02}, {0x0A3C, 0x0A3C},
+ {0x0A41, 0x0A42}, {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D},
+ {0x0A70, 0x0A71}, {0x0A81, 0x0A82}, {0x0ABC, 0x0ABC},
+ {0x0AC1, 0x0AC5}, {0x0AC7, 0x0AC8}, {0x0ACD, 0x0ACD},
+ {0x0AE2, 0x0AE3}, {0x0B01, 0x0B01}, {0x0B3C, 0x0B3C},
+ {0x0B3F, 0x0B3F}, {0x0B41, 0x0B43}, {0x0B4D, 0x0B4D},
+ {0x0B56, 0x0B56}, {0x0B82, 0x0B82}, {0x0BC0, 0x0BC0},
+ {0x0BCD, 0x0BCD}, {0x0C3E, 0x0C40}, {0x0C46, 0x0C48},
+ {0x0C4A, 0x0C4D}, {0x0C55, 0x0C56}, {0x0CBC, 0x0CBC},
+ {0x0CBF, 0x0CBF}, {0x0CC6, 0x0CC6}, {0x0CCC, 0x0CCD},
+ {0x0CE2, 0x0CE3}, {0x0D41, 0x0D43}, {0x0D4D, 0x0D4D},
+ {0x0DCA, 0x0DCA}, {0x0DD2, 0x0DD4}, {0x0DD6, 0x0DD6},
+ {0x0E31, 0x0E31}, {0x0E34, 0x0E3A}, {0x0E47, 0x0E4E},
+ {0x0EB1, 0x0EB1}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC},
+ {0x0EC8, 0x0ECD}, {0x0F18, 0x0F19}, {0x0F35, 0x0F35},
+ {0x0F37, 0x0F37}, {0x0F39, 0x0F39}, {0x0F71, 0x0F7E},
+ {0x0F80, 0x0F84}, {0x0F86, 0x0F87}, {0x0F90, 0x0F97},
+ {0x0F99, 0x0FBC}, {0x0FC6, 0x0FC6}, {0x102D, 0x1030},
+ {0x1032, 0x1032}, {0x1036, 0x1037}, {0x1039, 0x1039},
+ {0x1058, 0x1059}, {0x1160, 0x11FF}, {0x135F, 0x135F},
+ {0x1712, 0x1714}, {0x1732, 0x1734}, {0x1752, 0x1753},
+ {0x1772, 0x1773}, {0x17B4, 0x17B5}, {0x17B7, 0x17BD},
+ {0x17C6, 0x17C6}, {0x17C9, 0x17D3}, {0x17DD, 0x17DD},
+ {0x180B, 0x180D}, {0x18A9, 0x18A9}, {0x1920, 0x1922},
+ {0x1927, 0x1928}, {0x1932, 0x1932}, {0x1939, 0x193B},
+ {0x1A17, 0x1A18}, {0x1B00, 0x1B03}, {0x1B34, 0x1B34},
+ {0x1B36, 0x1B3A}, {0x1B3C, 0x1B3C}, {0x1B42, 0x1B42},
+ {0x1B6B, 0x1B73}, {0x1DC0, 0x1DCA}, {0x1DFE, 0x1DFF},
+ {0x200B, 0x200F}, {0x202A, 0x202E}, {0x2060, 0x2063},
+ {0x206A, 0x206F}, {0x20D0, 0x20EF}, {0x302A, 0x302F},
+ {0x3099, 0x309A}, {0xA806, 0xA806}, {0xA80B, 0xA80B},
+ {0xA825, 0xA826}, {0xFB1E, 0xFB1E}, {0xFE00, 0xFE0F},
+ {0xFE20, 0xFE23}, {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB},
+ {0x10A01, 0x10A03}, {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F},
+ {0x10A38, 0x10A3A}, {0x10A3F, 0x10A3F}, {0x1D167, 0x1D169},
+ {0x1D173, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD},
+ {0x1D242, 0x1D244}, {0xE0001, 0xE0001}, {0xE0020, 0xE007F},
+ {0xE0100, 0xE01EF},
+}
+
+type ctype int
+
+const (
+ narrow ctype = iota
+ ambiguous
+ wide
+ halfwidth
+ fullwidth
+ neutral
+)
+
+type intervalType struct {
+ first rune
+ last rune
+ ctype ctype
+}
+
+var ctypes = []intervalType{
+ {0x0020, 0x007E, narrow},
+ {0x00A1, 0x00A1, ambiguous},
+ {0x00A2, 0x00A3, narrow},
+ {0x00A4, 0x00A4, ambiguous},
+ {0x00A5, 0x00A6, narrow},
+ {0x00A7, 0x00A8, ambiguous},
+ {0x00AA, 0x00AA, ambiguous},
+ {0x00AC, 0x00AC, narrow},
+ {0x00AD, 0x00AE, ambiguous},
+ {0x00AF, 0x00AF, narrow},
+ {0x00B0, 0x00B4, ambiguous},
+ {0x00B6, 0x00BA, ambiguous},
+ {0x00BC, 0x00BF, ambiguous},
+ {0x00C6, 0x00C6, ambiguous},
+ {0x00D0, 0x00D0, ambiguous},
+ {0x00D7, 0x00D8, ambiguous},
+ {0x00DE, 0x00E1, ambiguous},
+ {0x00E6, 0x00E6, ambiguous},
+ {0x00E8, 0x00EA, ambiguous},
+ {0x00EC, 0x00ED, ambiguous},
+ {0x00F0, 0x00F0, ambiguous},
+ {0x00F2, 0x00F3, ambiguous},
+ {0x00F7, 0x00FA, ambiguous},
+ {0x00FC, 0x00FC, ambiguous},
+ {0x00FE, 0x00FE, ambiguous},
+ {0x0101, 0x0101, ambiguous},
+ {0x0111, 0x0111, ambiguous},
+ {0x0113, 0x0113, ambiguous},
+ {0x011B, 0x011B, ambiguous},
+ {0x0126, 0x0127, ambiguous},
+ {0x012B, 0x012B, ambiguous},
+ {0x0131, 0x0133, ambiguous},
+ {0x0138, 0x0138, ambiguous},
+ {0x013F, 0x0142, ambiguous},
+ {0x0144, 0x0144, ambiguous},
+ {0x0148, 0x014B, ambiguous},
+ {0x014D, 0x014D, ambiguous},
+ {0x0152, 0x0153, ambiguous},
+ {0x0166, 0x0167, ambiguous},
+ {0x016B, 0x016B, ambiguous},
+ {0x01CE, 0x01CE, ambiguous},
+ {0x01D0, 0x01D0, ambiguous},
+ {0x01D2, 0x01D2, ambiguous},
+ {0x01D4, 0x01D4, ambiguous},
+ {0x01D6, 0x01D6, ambiguous},
+ {0x01D8, 0x01D8, ambiguous},
+ {0x01DA, 0x01DA, ambiguous},
+ {0x01DC, 0x01DC, ambiguous},
+ {0x0251, 0x0251, ambiguous},
+ {0x0261, 0x0261, ambiguous},
+ {0x02C4, 0x02C4, ambiguous},
+ {0x02C7, 0x02C7, ambiguous},
+ {0x02C9, 0x02CB, ambiguous},
+ {0x02CD, 0x02CD, ambiguous},
+ {0x02D0, 0x02D0, ambiguous},
+ {0x02D8, 0x02DB, ambiguous},
+ {0x02DD, 0x02DD, ambiguous},
+ {0x02DF, 0x02DF, ambiguous},
+ {0x0300, 0x036F, ambiguous},
+ {0x0391, 0x03A2, ambiguous},
+ {0x03A3, 0x03A9, ambiguous},
+ {0x03B1, 0x03C1, ambiguous},
+ {0x03C3, 0x03C9, ambiguous},
+ {0x0401, 0x0401, ambiguous},
+ {0x0410, 0x044F, ambiguous},
+ {0x0451, 0x0451, ambiguous},
+ {0x1100, 0x115F, wide},
+ {0x2010, 0x2010, ambiguous},
+ {0x2013, 0x2016, ambiguous},
+ {0x2018, 0x2019, ambiguous},
+ {0x201C, 0x201D, ambiguous},
+ {0x2020, 0x2022, ambiguous},
+ {0x2024, 0x2027, ambiguous},
+ {0x2030, 0x2030, ambiguous},
+ {0x2032, 0x2033, ambiguous},
+ {0x2035, 0x2035, ambiguous},
+ {0x203B, 0x203B, ambiguous},
+ {0x203E, 0x203E, ambiguous},
+ {0x2074, 0x2074, ambiguous},
+ {0x207F, 0x207F, ambiguous},
+ {0x2081, 0x2084, ambiguous},
+ {0x20A9, 0x20A9, halfwidth},
+ {0x20AC, 0x20AC, ambiguous},
+ {0x2103, 0x2103, ambiguous},
+ {0x2105, 0x2105, ambiguous},
+ {0x2109, 0x2109, ambiguous},
+ {0x2113, 0x2113, ambiguous},
+ {0x2116, 0x2116, ambiguous},
+ {0x2121, 0x2122, ambiguous},
+ {0x2126, 0x2126, ambiguous},
+ {0x212B, 0x212B, ambiguous},
+ {0x2153, 0x2154, ambiguous},
+ {0x215B, 0x215E, ambiguous},
+ {0x2160, 0x216B, ambiguous},
+ {0x2170, 0x2179, ambiguous},
+ {0x2189, 0x218A, ambiguous},
+ {0x2190, 0x2199, ambiguous},
+ {0x21B8, 0x21B9, ambiguous},
+ {0x21D2, 0x21D2, ambiguous},
+ {0x21D4, 0x21D4, ambiguous},
+ {0x21E7, 0x21E7, ambiguous},
+ {0x2200, 0x2200, ambiguous},
+ {0x2202, 0x2203, ambiguous},
+ {0x2207, 0x2208, ambiguous},
+ {0x220B, 0x220B, ambiguous},
+ {0x220F, 0x220F, ambiguous},
+ {0x2211, 0x2211, ambiguous},
+ {0x2215, 0x2215, ambiguous},
+ {0x221A, 0x221A, ambiguous},
+ {0x221D, 0x2220, ambiguous},
+ {0x2223, 0x2223, ambiguous},
+ {0x2225, 0x2225, ambiguous},
+ {0x2227, 0x222C, ambiguous},
+ {0x222E, 0x222E, ambiguous},
+ {0x2234, 0x2237, ambiguous},
+ {0x223C, 0x223D, ambiguous},
+ {0x2248, 0x2248, ambiguous},
+ {0x224C, 0x224C, ambiguous},
+ {0x2252, 0x2252, ambiguous},
+ {0x2260, 0x2261, ambiguous},
+ {0x2264, 0x2267, ambiguous},
+ {0x226A, 0x226B, ambiguous},
+ {0x226E, 0x226F, ambiguous},
+ {0x2282, 0x2283, ambiguous},
+ {0x2286, 0x2287, ambiguous},
+ {0x2295, 0x2295, ambiguous},
+ {0x2299, 0x2299, ambiguous},
+ {0x22A5, 0x22A5, ambiguous},
+ {0x22BF, 0x22BF, ambiguous},
+ {0x2312, 0x2312, ambiguous},
+ {0x2329, 0x232A, wide},
+ {0x2460, 0x24E9, ambiguous},
+ {0x24EB, 0x254B, ambiguous},
+ {0x2550, 0x2573, ambiguous},
+ {0x2580, 0x258F, ambiguous},
+ {0x2592, 0x2595, ambiguous},
+ {0x25A0, 0x25A1, ambiguous},
+ {0x25A3, 0x25A9, ambiguous},
+ {0x25B2, 0x25B3, ambiguous},
+ {0x25B6, 0x25B7, ambiguous},
+ {0x25BC, 0x25BD, ambiguous},
+ {0x25C0, 0x25C1, ambiguous},
+ {0x25C6, 0x25C8, ambiguous},
+ {0x25CB, 0x25CB, ambiguous},
+ {0x25CE, 0x25D1, ambiguous},
+ {0x25E2, 0x25E5, ambiguous},
+ {0x25EF, 0x25EF, ambiguous},
+ {0x2605, 0x2606, ambiguous},
+ {0x2609, 0x2609, ambiguous},
+ {0x260E, 0x260F, ambiguous},
+ {0x2614, 0x2615, ambiguous},
+ {0x261C, 0x261C, ambiguous},
+ {0x261E, 0x261E, ambiguous},
+ {0x2640, 0x2640, ambiguous},
+ {0x2642, 0x2642, ambiguous},
+ {0x2660, 0x2661, ambiguous},
+ {0x2663, 0x2665, ambiguous},
+ {0x2667, 0x266A, ambiguous},
+ {0x266C, 0x266D, ambiguous},
+ {0x266F, 0x266F, ambiguous},
+ {0x269E, 0x269F, ambiguous},
+ {0x26BE, 0x26BF, ambiguous},
+ {0x26C4, 0x26CD, ambiguous},
+ {0x26CF, 0x26E1, ambiguous},
+ {0x26E3, 0x26E3, ambiguous},
+ {0x26E8, 0x26FF, ambiguous},
+ {0x273D, 0x273D, ambiguous},
+ {0x2757, 0x2757, ambiguous},
+ {0x2776, 0x277F, ambiguous},
+ {0x27E6, 0x27ED, narrow},
+ {0x2985, 0x2986, narrow},
+ {0x2B55, 0x2B59, ambiguous},
+ {0x2E80, 0x2E9A, wide},
+ {0x2E9B, 0x2EF4, wide},
+ {0x2F00, 0x2FD6, wide},
+ {0x2FF0, 0x2FFC, wide},
+ {0x3000, 0x3000, fullwidth},
+ {0x3001, 0x303E, wide},
+ {0x3041, 0x3097, wide},
+ {0x3099, 0x3100, wide},
+ {0x3105, 0x312E, wide},
+ {0x3131, 0x318F, wide},
+ {0x3190, 0x31BB, wide},
+ {0x31C0, 0x31E4, wide},
+ {0x31F0, 0x321F, wide},
+ {0x3220, 0x3247, wide},
+ {0x3248, 0x324F, ambiguous},
+ {0x3250, 0x32FF, wide},
+ {0x3300, 0x4DBF, wide},
+ {0x4E00, 0xA48D, wide},
+ {0xA490, 0xA4C7, wide},
+ {0xA960, 0xA97D, wide},
+ {0xAC00, 0xD7A4, wide},
+ {0xE000, 0xF8FF, ambiguous},
+ {0xF900, 0xFAFF, wide},
+ {0xFE00, 0xFE0F, ambiguous},
+ {0xFE10, 0xFE1A, wide},
+ {0xFE30, 0xFE53, wide},
+ {0xFE54, 0xFE67, wide},
+ {0xFE68, 0xFE6C, wide},
+ {0xFF01, 0xFF60, fullwidth},
+ {0xFF61, 0xFFBF, halfwidth},
+ {0xFFC2, 0xFFC8, halfwidth},
+ {0xFFCA, 0xFFD0, halfwidth},
+ {0xFFD2, 0xFFD8, halfwidth},
+ {0xFFDA, 0xFFDD, halfwidth},
+ {0xFFE0, 0xFFE7, fullwidth},
+ {0xFFE8, 0xFFEF, halfwidth},
+ {0xFFFD, 0xFFFE, ambiguous},
+ {0x1B000, 0x1B002, wide},
+ {0x1F100, 0x1F10A, ambiguous},
+ {0x1F110, 0x1F12D, ambiguous},
+ {0x1F130, 0x1F169, ambiguous},
+ {0x1F170, 0x1F19B, ambiguous},
+ {0x1F200, 0x1F203, wide},
+ {0x1F210, 0x1F23B, wide},
+ {0x1F240, 0x1F249, wide},
+ {0x1F250, 0x1F252, wide},
+ {0x20000, 0x2FFFE, wide},
+ {0x30000, 0x3FFFE, wide},
+ {0xE0100, 0xE01F0, ambiguous},
+ {0xF0000, 0xFFFFD, ambiguous},
+ {0x100000, 0x10FFFE, ambiguous},
+}
+
+type Condition struct {
+ EastAsianWidth bool
+}
+
+func NewCondition() *Condition {
+ return &Condition{EastAsianWidth}
+}
+
+// RuneWidth returns the number of cells in r.
+// See http://www.unicode.org/reports/tr11/
+func (c *Condition) RuneWidth(r rune) int {
+ if r == 0 {
+ return 0
+ }
+ if r < 32 || (r >= 0x7f && r < 0xa0) {
+ return 1
+ }
+ for _, iv := range combining {
+ if iv.first <= r && r <= iv.last {
+ return 0
+ }
+ }
+
+ if c.EastAsianWidth && IsAmbiguousWidth(r) {
+ return 2
+ }
+
+ if r >= 0x1100 &&
+ (r <= 0x115f || r == 0x2329 || r == 0x232a ||
+ (r >= 0x2e80 && r <= 0xa4cf && r != 0x303f) ||
+ (r >= 0xac00 && r <= 0xd7a3) ||
+ (r >= 0xf900 && r <= 0xfaff) ||
+ (r >= 0xfe30 && r <= 0xfe6f) ||
+ (r >= 0xff00 && r <= 0xff60) ||
+ (r >= 0xffe0 && r <= 0xffe6) ||
+ (r >= 0x20000 && r <= 0x2fffd) ||
+ (r >= 0x30000 && r <= 0x3fffd)) {
+ return 2
+ }
+ return 1
+}
+
+func (c *Condition) StringWidth(s string) (width int) {
+ for _, r := range []rune(s) {
+ width += c.RuneWidth(r)
+ }
+ return width
+}
+
+func (c *Condition) Truncate(s string, w int, tail string) string {
+ if c.StringWidth(s) <= w {
+ return s
+ }
+ r := []rune(s)
+ tw := c.StringWidth(tail)
+ w -= tw
+ width := 0
+ i := 0
+ for ; i < len(r); i++ {
+ cw := c.RuneWidth(r[i])
+ if width+cw > w {
+ break
+ }
+ width += cw
+ }
+ return string(r[0:i]) + tail
+}
+
+func (c *Condition) Wrap(s string, w int) string {
+ width := 0
+ out := ""
+ for _, r := range []rune(s) {
+ cw := RuneWidth(r)
+ if r == '\n' {
+ out += string(r)
+ width = 0
+ continue
+ } else if width+cw > w {
+ out += "\n"
+ width = 0
+ out += string(r)
+ width += cw
+ continue
+ }
+ out += string(r)
+ width += cw
+ }
+ return out
+}
+
+func (c *Condition) FillLeft(s string, w int) string {
+ width := c.StringWidth(s)
+ count := w - width
+ if count > 0 {
+ b := make([]byte, count)
+ for i := range b {
+ b[i] = ' '
+ }
+ return string(b) + s
+ }
+ return s
+}
+
+func (c *Condition) FillRight(s string, w int) string {
+ width := c.StringWidth(s)
+ count := w - width
+ if count > 0 {
+ b := make([]byte, count)
+ for i := range b {
+ b[i] = ' '
+ }
+ return s + string(b)
+ }
+ return s
+}
+
+// RuneWidth returns the number of cells in r.
+// See http://www.unicode.org/reports/tr11/
+func RuneWidth(r rune) int {
+ return DefaultCondition.RuneWidth(r)
+}
+
+func ct(r rune) ctype {
+ for _, iv := range ctypes {
+ if iv.first <= r && r <= iv.last {
+ return iv.ctype
+ }
+ }
+ return neutral
+}
+
+// IsAmbiguousWidth returns whether is ambiguous width or not.
+func IsAmbiguousWidth(r rune) bool {
+ return ct(r) == ambiguous
+}
+
+// IsAmbiguousWidth returns whether is ambiguous width or not.
+func IsNeutralWidth(r rune) bool {
+ return ct(r) == neutral
+}
+
+func StringWidth(s string) (width int) {
+ return DefaultCondition.StringWidth(s)
+}
+
+func Truncate(s string, w int, tail string) string {
+ return DefaultCondition.Truncate(s, w, tail)
+}
+
+func Wrap(s string, w int) string {
+ return DefaultCondition.Wrap(s, w)
+}
+
+func FillLeft(s string, w int) string {
+ return DefaultCondition.FillLeft(s, w)
+}
+
+func FillRight(s string, w int) string {
+ return DefaultCondition.FillRight(s, w)
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/github.com/mattn/go-runewidth/runewidth_js.go
new file mode 100644
index 000000000..0ce32c5e7
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_js.go
@@ -0,0 +1,8 @@
+// +build js
+
+package runewidth
+
+func IsEastAsian() bool {
+ // TODO: Implement this for the web. Detect east asian in a compatible way, and return true.
+ return false
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go
new file mode 100644
index 000000000..a4495909d
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go
@@ -0,0 +1,69 @@
+// +build !windows,!js
+
+package runewidth
+
+import (
+ "os"
+ "regexp"
+ "strings"
+)
+
+var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`)
+
+func IsEastAsian() bool {
+ locale := os.Getenv("LC_CTYPE")
+ if locale == "" {
+ locale = os.Getenv("LANG")
+ }
+
+ // ignore C locale
+ if locale == "POSIX" || locale == "C" {
+ return false
+ }
+ if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') {
+ return false
+ }
+
+ charset := strings.ToLower(locale)
+ r := reLoc.FindStringSubmatch(locale)
+ if len(r) == 2 {
+ charset = strings.ToLower(r[1])
+ }
+
+ if strings.HasSuffix(charset, "@cjk_narrow") {
+ return false
+ }
+
+ for pos, b := range []byte(charset) {
+ if b == '@' {
+ charset = charset[:pos]
+ break
+ }
+ }
+
+ mbc_max := 1
+ switch charset {
+ case "utf-8", "utf8":
+ mbc_max = 6
+ case "jis":
+ mbc_max = 8
+ case "eucjp":
+ mbc_max = 3
+ case "euckr", "euccn":
+ mbc_max = 2
+ case "sjis", "cp932", "cp51932", "cp936", "cp949", "cp950":
+ mbc_max = 2
+ case "big5":
+ mbc_max = 2
+ case "gbk", "gb2312":
+ mbc_max = 2
+ }
+
+ if mbc_max > 1 && (charset[0] != 'u' ||
+ strings.HasPrefix(locale, "ja") ||
+ strings.HasPrefix(locale, "ko") ||
+ strings.HasPrefix(locale, "zh")) {
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go
new file mode 100644
index 000000000..bdd84454b
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go
@@ -0,0 +1,24 @@
+package runewidth
+
+import (
+ "syscall"
+)
+
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32")
+ procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP")
+)
+
+func IsEastAsian() bool {
+ r1, _, _ := procGetConsoleOutputCP.Call()
+ if r1 == 0 {
+ return false
+ }
+
+ switch int(r1) {
+ case 932, 51932, 936, 949, 950:
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
new file mode 100644
index 000000000..13f15dfce
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2013 Matt T. Proud
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/README.md b/vendor/github.com/matttproud/golang_protobuf_extensions/README.md
new file mode 100644
index 000000000..751ee6967
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/README.md
@@ -0,0 +1,20 @@
+# Overview
+This repository provides various Protocol Buffer extensions for the Go
+language (golang), namely support for record length-delimited message
+streaming.
+
+| Java | Go |
+| ------------------------------ | --------------------- |
+| MessageLite#parseDelimitedFrom | pbutil.ReadDelimited |
+| MessageLite#writeDelimitedTo | pbutil.WriteDelimited |
+
+Because [Code Review 9102043](https://codereview.appspot.com/9102043/) is
+destined to never be merged into mainline (i.e., never be promoted to formal
+[goprotobuf features](https://github.com/golang/protobuf)), this repository
+will live here in the wild.
+
+# Documentation
+We have [generated Go Doc documentation](http://godoc.org/github.com/matttproud/golang_protobuf_extensions/pbutil) here.
+
+# Testing
+[![Build Status](https://travis-ci.org/matttproud/golang_protobuf_extensions.png?branch=master)](https://travis-ci.org/matttproud/golang_protobuf_extensions)
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
new file mode 100644
index 000000000..66d9b5458
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
@@ -0,0 +1,75 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+var errInvalidVarint = errors.New("invalid varint32 encountered")
+
+// ReadDelimited decodes a message from the provided length-delimited stream,
+// where the length is encoded as 32-bit varint prefix to the message body.
+// It returns the total number of bytes read and any applicable error. This is
+// roughly equivalent to the companion Java API's
+// MessageLite#parseDelimitedFrom. As per the reader contract, this function
+// calls r.Read repeatedly as required until exactly one message including its
+// prefix is read and decoded (or an error has occurred). The function never
+// reads more bytes from the stream than required. The function never returns
+// an error if a message has been read and decoded correctly, even if the end
+// of the stream has been reached in doing so. In that case, any subsequent
+// calls return (0, io.EOF).
+func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
+ // Per AbstractParser#parsePartialDelimitedFrom with
+ // CodedInputStream#readRawVarint32.
+ headerBuf := make([]byte, binary.MaxVarintLen32)
+ var bytesRead, varIntBytes int
+ var messageLength uint64
+ for varIntBytes == 0 { // i.e. no varint has been decoded yet.
+ if bytesRead >= len(headerBuf) {
+ return bytesRead, errInvalidVarint
+ }
+ // We have to read byte by byte here to avoid reading more bytes
+ // than required. Each read byte is appended to what we have
+ // read before.
+ newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
+ if newBytesRead == 0 {
+ if err != nil {
+ return bytesRead, err
+ }
+ // A Reader should not return (0, nil), but if it does,
+ // it should be treated as no-op (according to the
+ // Reader contract). So let's go on...
+ continue
+ }
+ bytesRead += newBytesRead
+ // Now present everything read so far to the varint decoder and
+ // see if a varint can be decoded already.
+ messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
+ }
+
+ messageBuf := make([]byte, messageLength)
+ newBytesRead, err := io.ReadFull(r, messageBuf)
+ bytesRead += newBytesRead
+ if err != nil {
+ return bytesRead, err
+ }
+
+ return bytesRead, proto.Unmarshal(messageBuf, m)
+}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
new file mode 100644
index 000000000..c318385cb
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pbutil provides record length-delimited Protocol Buffer streaming.
+package pbutil
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
new file mode 100644
index 000000000..4b76ea9a1
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
@@ -0,0 +1,46 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// WriteDelimited encodes and dumps a message to the provided writer prefixed
+// with a 32-bit varint indicating the length of the encoded message, producing
+// a length-delimited record stream, which can be used to chain together
+// encoded messages of the same type together in a file. It returns the total
+// number of bytes written and any applicable error. This is roughly
+// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
+func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
+ buffer, err := proto.Marshal(m)
+ if err != nil {
+ return 0, err
+ }
+
+ buf := make([]byte, binary.MaxVarintLen32)
+ encodedLength := binary.PutUvarint(buf, uint64(len(buffer)))
+
+ sync, err := w.Write(buf[:encodedLength])
+ if err != nil {
+ return sync, err
+ }
+
+ n, err = w.Write(buffer)
+ return n + sync, err
+}
diff --git a/vendor/github.com/mistifyio/go-zfs/LICENSE b/vendor/github.com/mistifyio/go-zfs/LICENSE
new file mode 100644
index 000000000..f4c265cfe
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/LICENSE
@@ -0,0 +1,201 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright (c) 2014, OmniTI Computer Consulting, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. \ No newline at end of file
diff --git a/vendor/github.com/mistifyio/go-zfs/README.md b/vendor/github.com/mistifyio/go-zfs/README.md
new file mode 100644
index 000000000..2515e588e
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/README.md
@@ -0,0 +1,54 @@
+# Go Wrapper for ZFS #
+
+Simple wrappers for ZFS command line tools.
+
+[![GoDoc](https://godoc.org/github.com/mistifyio/go-zfs?status.svg)](https://godoc.org/github.com/mistifyio/go-zfs)
+
+## Requirements ##
+
+You need a working ZFS setup. To use on Ubuntu 14.04, setup ZFS:
+
+ sudo apt-get install python-software-properties
+ sudo apt-add-repository ppa:zfs-native/stable
+ sudo apt-get update
+ sudo apt-get install ubuntu-zfs libzfs-dev
+
+Developed using Go 1.3, but currently there isn't anything 1.3 specific. Don't use Ubuntu packages for Go, use http://golang.org/doc/install
+
+Generally you need root privileges to use anything zfs related.
+
+## Status ##
+
+This has been only been tested on Ubuntu 14.04
+
+In the future, we hope to work directly with libzfs.
+
+# Hacking #
+
+The tests have decent examples for most functions.
+
+```go
+//assuming a zpool named test
+//error handling ommitted
+
+
+f, err := zfs.CreateFilesystem("test/snapshot-test", nil)
+ok(t, err)
+
+s, err := f.Snapshot("test", nil)
+ok(t, err)
+
+// snapshot is named "test/snapshot-test@test"
+
+c, err := s.Clone("test/clone-test", nil)
+
+err := c.Destroy()
+err := s.Destroy()
+err := f.Destroy()
+
+```
+
+# Contributing #
+
+See the [contributing guidelines](./CONTRIBUTING.md)
+
diff --git a/vendor/github.com/mistifyio/go-zfs/error.go b/vendor/github.com/mistifyio/go-zfs/error.go
new file mode 100644
index 000000000..5408ccdb5
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/error.go
@@ -0,0 +1,18 @@
+package zfs
+
+import (
+ "fmt"
+)
+
+// Error is an error which is returned when the `zfs` or `zpool` shell
+// commands return with a non-zero exit code.
+type Error struct {
+ Err error
+ Debug string
+ Stderr string
+}
+
+// Error returns the string representation of an Error.
+func (e Error) Error() string {
+ return fmt.Sprintf("%s: %q => %s", e.Err, e.Debug, e.Stderr)
+}
diff --git a/vendor/github.com/mistifyio/go-zfs/utils.go b/vendor/github.com/mistifyio/go-zfs/utils.go
new file mode 100644
index 000000000..d5b735349
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/utils.go
@@ -0,0 +1,320 @@
+package zfs
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os/exec"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+type command struct {
+ Command string
+ Stdin io.Reader
+ Stdout io.Writer
+}
+
+func (c *command) Run(arg ...string) ([][]string, error) {
+
+ cmd := exec.Command(c.Command, arg...)
+
+ var stdout, stderr bytes.Buffer
+
+ if c.Stdout == nil {
+ cmd.Stdout = &stdout
+ } else {
+ cmd.Stdout = c.Stdout
+ }
+
+ if c.Stdin != nil {
+ cmd.Stdin = c.Stdin
+
+ }
+ cmd.Stderr = &stderr
+
+ debug := strings.Join([]string{cmd.Path, strings.Join(cmd.Args, " ")}, " ")
+ if logger != nil {
+ logger.Log(cmd.Args)
+ }
+ err := cmd.Run()
+
+ if err != nil {
+ return nil, &Error{
+ Err: err,
+ Debug: debug,
+ Stderr: stderr.String(),
+ }
+ }
+
+ // assume if you passed in something for stdout, that you know what to do with it
+ if c.Stdout != nil {
+ return nil, nil
+ }
+
+ lines := strings.Split(stdout.String(), "\n")
+
+ //last line is always blank
+ lines = lines[0 : len(lines)-1]
+ output := make([][]string, len(lines))
+
+ for i, l := range lines {
+ output[i] = strings.Fields(l)
+ }
+
+ return output, nil
+}
+
+func setString(field *string, value string) {
+ v := ""
+ if value != "-" {
+ v = value
+ }
+ *field = v
+}
+
+func setUint(field *uint64, value string) error {
+ var v uint64
+ if value != "-" {
+ var err error
+ v, err = strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return err
+ }
+ }
+ *field = v
+ return nil
+}
+
+func (ds *Dataset) parseLine(line []string) error {
+ prop := line[1]
+ val := line[2]
+
+ var err error
+
+ switch prop {
+ case "available":
+ err = setUint(&ds.Avail, val)
+ case "compression":
+ setString(&ds.Compression, val)
+ case "mountpoint":
+ setString(&ds.Mountpoint, val)
+ case "quota":
+ err = setUint(&ds.Quota, val)
+ case "type":
+ setString(&ds.Type, val)
+ case "origin":
+ setString(&ds.Origin, val)
+ case "used":
+ err = setUint(&ds.Used, val)
+ case "volsize":
+ err = setUint(&ds.Volsize, val)
+ case "written":
+ err = setUint(&ds.Written, val)
+ case "logicalused":
+ err = setUint(&ds.Logicalused, val)
+ }
+ return err
+}
+
+/*
+ * from zfs diff`s escape function:
+ *
+ * Prints a file name out a character at a time. If the character is
+ * not in the range of what we consider "printable" ASCII, display it
+ * as an escaped 3-digit octal value. ASCII values less than a space
+ * are all control characters and we declare the upper end as the
+ * DELete character. This also is the last 7-bit ASCII character.
+ * We choose to treat all 8-bit ASCII as not printable for this
+ * application.
+ */
+func unescapeFilepath(path string) (string, error) {
+ buf := make([]byte, 0, len(path))
+ llen := len(path)
+ for i := 0; i < llen; {
+ if path[i] == '\\' {
+ if llen < i+4 {
+ return "", fmt.Errorf("Invalid octal code: too short")
+ }
+ octalCode := path[(i + 1):(i + 4)]
+ val, err := strconv.ParseUint(octalCode, 8, 8)
+ if err != nil {
+ return "", fmt.Errorf("Invalid octal code: %v", err)
+ }
+ buf = append(buf, byte(val))
+ i += 4
+ } else {
+ buf = append(buf, path[i])
+ i++
+ }
+ }
+ return string(buf), nil
+}
+
+var changeTypeMap = map[string]ChangeType{
+ "-": Removed,
+ "+": Created,
+ "M": Modified,
+ "R": Renamed,
+}
+var inodeTypeMap = map[string]InodeType{
+ "B": BlockDevice,
+ "C": CharacterDevice,
+ "/": Directory,
+ ">": Door,
+ "|": NamedPipe,
+ "@": SymbolicLink,
+ "P": EventPort,
+ "=": Socket,
+ "F": File,
+}
+
+// matches (+1) or (-1)
+var referenceCountRegex = regexp.MustCompile("\\(([+-]\\d+?)\\)")
+
+func parseReferenceCount(field string) (int, error) {
+ matches := referenceCountRegex.FindStringSubmatch(field)
+ if matches == nil {
+ return 0, fmt.Errorf("Regexp does not match")
+ }
+ return strconv.Atoi(matches[1])
+}
+
+func parseInodeChange(line []string) (*InodeChange, error) {
+ llen := len(line)
+ if llen < 1 {
+ return nil, fmt.Errorf("Empty line passed")
+ }
+
+ changeType := changeTypeMap[line[0]]
+ if changeType == 0 {
+ return nil, fmt.Errorf("Unknown change type '%s'", line[0])
+ }
+
+ switch changeType {
+ case Renamed:
+ if llen != 4 {
+ return nil, fmt.Errorf("Mismatching number of fields: expect 4, got: %d", llen)
+ }
+ case Modified:
+ if llen != 4 && llen != 3 {
+ return nil, fmt.Errorf("Mismatching number of fields: expect 3..4, got: %d", llen)
+ }
+ default:
+ if llen != 3 {
+ return nil, fmt.Errorf("Mismatching number of fields: expect 3, got: %d", llen)
+ }
+ }
+
+ inodeType := inodeTypeMap[line[1]]
+ if inodeType == 0 {
+ return nil, fmt.Errorf("Unknown inode type '%s'", line[1])
+ }
+
+ path, err := unescapeFilepath(line[2])
+ if err != nil {
+ return nil, fmt.Errorf("Failed to parse filename: %v", err)
+ }
+
+ var newPath string
+ var referenceCount int
+ switch changeType {
+ case Renamed:
+ newPath, err = unescapeFilepath(line[3])
+ if err != nil {
+ return nil, fmt.Errorf("Failed to parse filename: %v", err)
+ }
+ case Modified:
+ if llen == 4 {
+ referenceCount, err = parseReferenceCount(line[3])
+ if err != nil {
+ return nil, fmt.Errorf("Failed to parse reference count: %v", err)
+ }
+ }
+ default:
+ newPath = ""
+ }
+
+ return &InodeChange{
+ Change: changeType,
+ Type: inodeType,
+ Path: path,
+ NewPath: newPath,
+ ReferenceCountChange: referenceCount,
+ }, nil
+}
+
+// example input
+//M / /testpool/bar/
+//+ F /testpool/bar/hello.txt
+//M / /testpool/bar/hello.txt (+1)
+//M / /testpool/bar/hello-hardlink
+func parseInodeChanges(lines [][]string) ([]*InodeChange, error) {
+ changes := make([]*InodeChange, len(lines))
+
+ for i, line := range lines {
+ c, err := parseInodeChange(line)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to parse line %d of zfs diff: %v, got: '%s'", i, err, line)
+ }
+ changes[i] = c
+ }
+ return changes, nil
+}
+
+func listByType(t, filter string) ([]*Dataset, error) {
+ args := []string{"get", "-rHp", "-t", t, "all"}
+ if filter != "" {
+ args = append(args, filter)
+ }
+ out, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+
+ var datasets []*Dataset
+
+ name := ""
+ var ds *Dataset
+ for _, line := range out {
+ if name != line[0] {
+ name = line[0]
+ ds = &Dataset{Name: name}
+ datasets = append(datasets, ds)
+ }
+ if err := ds.parseLine(line); err != nil {
+ return nil, err
+ }
+ }
+
+ return datasets, nil
+}
+
+func propsSlice(properties map[string]string) []string {
+ args := make([]string, 0, len(properties)*3)
+ for k, v := range properties {
+ args = append(args, "-o")
+ args = append(args, fmt.Sprintf("%s=%s", k, v))
+ }
+ return args
+}
+
+func (z *Zpool) parseLine(line []string) error {
+ prop := line[1]
+ val := line[2]
+
+ var err error
+
+ switch prop {
+ case "health":
+ setString(&z.Health, val)
+ case "allocated":
+ err = setUint(&z.Allocated, val)
+ case "size":
+ err = setUint(&z.Size, val)
+ case "free":
+ err = setUint(&z.Free, val)
+ }
+ return err
+}
diff --git a/vendor/github.com/mistifyio/go-zfs/zfs.go b/vendor/github.com/mistifyio/go-zfs/zfs.go
new file mode 100644
index 000000000..a1d740e07
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/zfs.go
@@ -0,0 +1,382 @@
+// Package zfs provides wrappers around the ZFS command line tools.
+package zfs
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+// ZFS dataset types, which can indicate if a dataset is a filesystem,
+// snapshot, or volume.
+const (
+ DatasetFilesystem = "filesystem"
+ DatasetSnapshot = "snapshot"
+ DatasetVolume = "volume"
+)
+
+// Dataset is a ZFS dataset. A dataset could be a clone, filesystem, snapshot,
+// or volume. The Type struct member can be used to determine a dataset's type.
+//
+// The field definitions can be found in the ZFS manual:
+// http://www.freebsd.org/cgi/man.cgi?zfs(8).
+type Dataset struct {
+ Name string
+ Origin string
+ Used uint64
+ Avail uint64
+ Mountpoint string
+ Compression string
+ Type string
+ Written uint64
+ Volsize uint64
+ Usedbydataset uint64
+ Logicalused uint64
+ Quota uint64
+}
+
+// InodeType is the type of inode as reported by Diff
+type InodeType int
+
+// Types of Inodes
+const (
+ _ = iota // 0 == unknown type
+ BlockDevice InodeType = iota
+ CharacterDevice
+ Directory
+ Door
+ NamedPipe
+ SymbolicLink
+ EventPort
+ Socket
+ File
+)
+
+// ChangeType is the type of inode change as reported by Diff
+type ChangeType int
+
+// Types of Changes
+const (
+ _ = iota // 0 == unknown type
+ Removed ChangeType = iota
+ Created
+ Modified
+ Renamed
+)
+
+// DestroyFlag is the options flag passed to Destroy
+type DestroyFlag int
+
+// Valid destroy options
+const (
+ DestroyDefault DestroyFlag = 1 << iota
+ DestroyRecursive = 1 << iota
+ DestroyRecursiveClones = 1 << iota
+ DestroyDeferDeletion = 1 << iota
+ DestroyForceUmount = 1 << iota
+)
+
+// InodeChange represents a change as reported by Diff
+type InodeChange struct {
+ Change ChangeType
+ Type InodeType
+ Path string
+ NewPath string
+ ReferenceCountChange int
+}
+
+// Logger can be used to log commands/actions
+type Logger interface {
+ Log(cmd []string)
+}
+
+var logger Logger
+
+// SetLogger set a log handler to log all commands including arguments before
+// they are executed
+func SetLogger(l Logger) {
+ logger = l
+}
+
+// zfs is a helper function to wrap typical calls to zfs.
+func zfs(arg ...string) ([][]string, error) {
+ c := command{Command: "zfs"}
+ return c.Run(arg...)
+}
+
+// Datasets returns a slice of ZFS datasets, regardless of type.
+// A filter argument may be passed to select a dataset with the matching name,
+// or empty string ("") may be used to select all datasets.
+func Datasets(filter string) ([]*Dataset, error) {
+ return listByType("all", filter)
+}
+
+// Snapshots returns a slice of ZFS snapshots.
+// A filter argument may be passed to select a snapshot with the matching name,
+// or empty string ("") may be used to select all snapshots.
+func Snapshots(filter string) ([]*Dataset, error) {
+ return listByType(DatasetSnapshot, filter)
+}
+
+// Filesystems returns a slice of ZFS filesystems.
+// A filter argument may be passed to select a filesystem with the matching name,
+// or empty string ("") may be used to select all filesystems.
+func Filesystems(filter string) ([]*Dataset, error) {
+ return listByType(DatasetFilesystem, filter)
+}
+
+// Volumes returns a slice of ZFS volumes.
+// A filter argument may be passed to select a volume with the matching name,
+// or empty string ("") may be used to select all volumes.
+func Volumes(filter string) ([]*Dataset, error) {
+ return listByType(DatasetVolume, filter)
+}
+
+// GetDataset retrieves a single ZFS dataset by name. This dataset could be
+// any valid ZFS dataset type, such as a clone, filesystem, snapshot, or volume.
+func GetDataset(name string) (*Dataset, error) {
+ out, err := zfs("get", "-Hp", "all", name)
+ if err != nil {
+ return nil, err
+ }
+
+ ds := &Dataset{Name: name}
+ for _, line := range out {
+ if err := ds.parseLine(line); err != nil {
+ return nil, err
+ }
+ }
+
+ return ds, nil
+}
+
+// Clone clones a ZFS snapshot and returns a clone dataset.
+// An error will be returned if the input dataset is not of snapshot type.
+func (d *Dataset) Clone(dest string, properties map[string]string) (*Dataset, error) {
+ if d.Type != DatasetSnapshot {
+ return nil, errors.New("can only clone snapshots")
+ }
+ args := make([]string, 2, 4)
+ args[0] = "clone"
+ args[1] = "-p"
+ if properties != nil {
+ args = append(args, propsSlice(properties)...)
+ }
+ args = append(args, []string{d.Name, dest}...)
+ _, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+ return GetDataset(dest)
+}
+
+// ReceiveSnapshot receives a ZFS stream from the input io.Reader, creates a
+// new snapshot with the specified name, and streams the input data into the
+// newly-created snapshot.
+func ReceiveSnapshot(input io.Reader, name string) (*Dataset, error) {
+ c := command{Command: "zfs", Stdin: input}
+ _, err := c.Run("receive", name)
+ if err != nil {
+ return nil, err
+ }
+ return GetDataset(name)
+}
+
+// SendSnapshot sends a ZFS stream of a snapshot to the input io.Writer.
+// An error will be returned if the input dataset is not of snapshot type.
+func (d *Dataset) SendSnapshot(output io.Writer) error {
+ if d.Type != DatasetSnapshot {
+ return errors.New("can only send snapshots")
+ }
+
+ c := command{Command: "zfs", Stdout: output}
+ _, err := c.Run("send", d.Name)
+ return err
+}
+
+// CreateVolume creates a new ZFS volume with the specified name, size, and
+// properties.
+// A full list of available ZFS properties may be found here:
+// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func CreateVolume(name string, size uint64, properties map[string]string) (*Dataset, error) {
+ args := make([]string, 4, 5)
+ args[0] = "create"
+ args[1] = "-p"
+ args[2] = "-V"
+ args[3] = strconv.FormatUint(size, 10)
+ if properties != nil {
+ args = append(args, propsSlice(properties)...)
+ }
+ args = append(args, name)
+ _, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+ return GetDataset(name)
+}
+
+// Destroy destroys a ZFS dataset. If the destroy bit flag is set, any
+// descendents of the dataset will be recursively destroyed, including snapshots.
+// If the deferred bit flag is set, the snapshot is marked for deferred
+// deletion.
+func (d *Dataset) Destroy(flags DestroyFlag) error {
+ args := make([]string, 1, 3)
+ args[0] = "destroy"
+ if flags&DestroyRecursive != 0 {
+ args = append(args, "-r")
+ }
+
+ if flags&DestroyRecursiveClones != 0 {
+ args = append(args, "-R")
+ }
+
+ if flags&DestroyDeferDeletion != 0 {
+ args = append(args, "-d")
+ }
+
+ if flags&DestroyForceUmount != 0 {
+ args = append(args, "-f")
+ }
+
+ args = append(args, d.Name)
+ _, err := zfs(args...)
+ return err
+}
+
+// SetProperty sets a ZFS property on the receiving dataset.
+// A full list of available ZFS properties may be found here:
+// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func (d *Dataset) SetProperty(key, val string) error {
+ prop := strings.Join([]string{key, val}, "=")
+ _, err := zfs("set", prop, d.Name)
+ return err
+}
+
+// GetProperty returns the current value of a ZFS property from the
+// receiving dataset.
+// A full list of available ZFS properties may be found here:
+// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func (d *Dataset) GetProperty(key string) (string, error) {
+ out, err := zfs("get", key, d.Name)
+ if err != nil {
+ return "", err
+ }
+
+ return out[0][2], nil
+}
+
+// Snapshots returns a slice of all ZFS snapshots of a given dataset.
+func (d *Dataset) Snapshots() ([]*Dataset, error) {
+ return Snapshots(d.Name)
+}
+
+// CreateFilesystem creates a new ZFS filesystem with the specified name and
+// properties.
+// A full list of available ZFS properties may be found here:
+// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func CreateFilesystem(name string, properties map[string]string) (*Dataset, error) {
+ args := make([]string, 1, 4)
+ args[0] = "create"
+
+ if properties != nil {
+ args = append(args, propsSlice(properties)...)
+ }
+
+ args = append(args, name)
+ _, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+ return GetDataset(name)
+}
+
+// Snapshot creates a new ZFS snapshot of the receiving dataset, using the
+// specified name. Optionally, the snapshot can be taken recursively, creating
+// snapshots of all descendent filesystems in a single, atomic operation.
+func (d *Dataset) Snapshot(name string, recursive bool) (*Dataset, error) {
+ args := make([]string, 1, 4)
+ args[0] = "snapshot"
+ if recursive {
+ args = append(args, "-r")
+ }
+ snapName := fmt.Sprintf("%s@%s", d.Name, name)
+ args = append(args, snapName)
+ _, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+ return GetDataset(snapName)
+}
+
+// Rollback rolls back the receiving ZFS dataset to a previous snapshot.
+// Optionally, intermediate snapshots can be destroyed. A ZFS snapshot
+// rollback cannot be completed without this option, if more recent
+// snapshots exist.
+// An error will be returned if the input dataset is not of snapshot type.
+func (d *Dataset) Rollback(destroyMoreRecent bool) error {
+ if d.Type != DatasetSnapshot {
+ return errors.New("can only rollback snapshots")
+ }
+
+ args := make([]string, 1, 3)
+ args[0] = "rollback"
+ if destroyMoreRecent {
+ args = append(args, "-r")
+ }
+ args = append(args, d.Name)
+
+ _, err := zfs(args...)
+ return err
+}
+
+// Children returns a slice of children of the receiving ZFS dataset.
+// A recursion depth may be specified, or a depth of 0 allows unlimited
+// recursion.
+func (d *Dataset) Children(depth uint64) ([]*Dataset, error) {
+ args := []string{"get", "-t", "all", "-Hp", "all"}
+ if depth > 0 {
+ args = append(args, "-d")
+ args = append(args, strconv.FormatUint(depth, 10))
+ } else {
+ args = append(args, "-r")
+ }
+ args = append(args, d.Name)
+
+ out, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+
+ var datasets []*Dataset
+ name := ""
+ var ds *Dataset
+ for _, line := range out {
+ if name != line[0] {
+ name = line[0]
+ ds = &Dataset{Name: name}
+ datasets = append(datasets, ds)
+ }
+ if err := ds.parseLine(line); err != nil {
+ return nil, err
+ }
+ }
+ return datasets[1:], nil
+}
+
+// Diff returns changes between a snapshot and the given ZFS dataset.
+// The snapshot name must include the filesystem part as it is possible to
+// compare clones with their origin snapshots.
+func (d *Dataset) Diff(snapshot string) ([]*InodeChange, error) {
+ args := []string{"diff", "-FH", snapshot, d.Name}[:]
+ out, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+ inodeChanges, err := parseInodeChanges(out)
+ if err != nil {
+ return nil, err
+ }
+ return inodeChanges, nil
+}
diff --git a/vendor/github.com/mistifyio/go-zfs/zpool.go b/vendor/github.com/mistifyio/go-zfs/zpool.go
new file mode 100644
index 000000000..6ba52d30c
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/zpool.go
@@ -0,0 +1,105 @@
+package zfs
+
+// ZFS zpool states, which can indicate if a pool is online, offline,
+// degraded, etc. More information regarding zpool states can be found here:
+// https://docs.oracle.com/cd/E19253-01/819-5461/gamno/index.html.
+const (
+ ZpoolOnline = "ONLINE"
+ ZpoolDegraded = "DEGRADED"
+ ZpoolFaulted = "FAULTED"
+ ZpoolOffline = "OFFLINE"
+ ZpoolUnavail = "UNAVAIL"
+ ZpoolRemoved = "REMOVED"
+)
+
+// Zpool is a ZFS zpool. A pool is a top-level structure in ZFS, and can
+// contain many descendent datasets.
+type Zpool struct {
+ Name string
+ Health string
+ Allocated uint64
+ Size uint64
+ Free uint64
+}
+
+// zpool is a helper function to wrap typical calls to zpool.
+func zpool(arg ...string) ([][]string, error) {
+ c := command{Command: "zpool"}
+ return c.Run(arg...)
+}
+
+// GetZpool retrieves a single ZFS zpool by name.
+func GetZpool(name string) (*Zpool, error) {
+ out, err := zpool("get", "all", "-p", name)
+ if err != nil {
+ return nil, err
+ }
+
+ // there is no -H
+ out = out[1:]
+
+ z := &Zpool{Name: name}
+ for _, line := range out {
+ if err := z.parseLine(line); err != nil {
+ return nil, err
+ }
+ }
+
+ return z, nil
+}
+
+// Datasets returns a slice of all ZFS datasets in a zpool.
+func (z *Zpool) Datasets() ([]*Dataset, error) {
+ return Datasets(z.Name)
+}
+
+// Snapshots returns a slice of all ZFS snapshots in a zpool.
+func (z *Zpool) Snapshots() ([]*Dataset, error) {
+ return Snapshots(z.Name)
+}
+
+// CreateZpool creates a new ZFS zpool with the specified name, properties,
+// and optional arguments.
+// A full list of available ZFS properties and command-line arguments may be
+// found here: https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func CreateZpool(name string, properties map[string]string, args ...string) (*Zpool, error) {
+ cli := make([]string, 1, 4)
+ cli[0] = "create"
+ if properties != nil {
+ cli = append(cli, propsSlice(properties)...)
+ }
+ cli = append(cli, name)
+ cli = append(cli, args...)
+ _, err := zpool(cli...)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Zpool{Name: name}, nil
+}
+
+// Destroy destroys a ZFS zpool by name.
+func (z *Zpool) Destroy() error {
+ _, err := zpool("destroy", z.Name)
+ return err
+}
+
+// ListZpools list all ZFS zpools accessible on the current system.
+func ListZpools() ([]*Zpool, error) {
+ args := []string{"list", "-Ho", "name"}
+ out, err := zpool(args...)
+ if err != nil {
+ return nil, err
+ }
+
+ var pools []*Zpool
+
+ for _, line := range out {
+ z, err := GetZpool(line[0])
+ if err != nil {
+ return nil, err
+ }
+ pools = append(pools, z)
+ }
+ return pools, nil
+}
diff --git a/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md
new file mode 100644
index 000000000..229851590
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/go-wordwrap/README.md b/vendor/github.com/mitchellh/go-wordwrap/README.md
new file mode 100644
index 000000000..60ae31170
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-wordwrap/README.md
@@ -0,0 +1,39 @@
+# go-wordwrap
+
+`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that
+automatically wraps words into multiple lines. The primary use case for this
+is in formatting CLI output, but of course word wrapping is a generally useful
+thing to do.
+
+## Installation and Usage
+
+Install using `go get github.com/mitchellh/go-wordwrap`.
+
+Full documentation is available at
+http://godoc.org/github.com/mitchellh/go-wordwrap
+
+Below is an example of its usage ignoring errors:
+
+```go
+wrapped := wordwrap.WrapString("foo bar baz", 3)
+fmt.Println(wrapped)
+```
+
+Would output:
+
+```
+foo
+bar
+baz
+```
+
+## Word Wrap Algorithm
+
+This library doesn't use any clever algorithm for word wrapping. The wrapping
+is actually very naive: whenever there is whitespace or an explicit linebreak.
+The goal of this library is for word wrapping CLI output, so the input is
+typically pretty well controlled human language. Because of this, the naive
+approach typically works just fine.
+
+In the future, we'd like to make the algorithm more advanced. We would do
+so without breaking the API.
diff --git a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go
new file mode 100644
index 000000000..ac67205bc
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go
@@ -0,0 +1,73 @@
+package wordwrap
+
+import (
+ "bytes"
+ "unicode"
+)
+
+// WrapString wraps the given string within lim width in characters.
+//
+// Wrapping is currently naive and only happens at white-space. A future
+// version of the library will implement smarter wrapping. This means that
+// pathological cases can dramatically reach past the limit, such as a very
+// long word.
+func WrapString(s string, lim uint) string {
+ // Initialize a buffer with a slightly larger size to account for breaks
+ init := make([]byte, 0, len(s))
+ buf := bytes.NewBuffer(init)
+
+ var current uint
+ var wordBuf, spaceBuf bytes.Buffer
+
+ for _, char := range s {
+ if char == '\n' {
+ if wordBuf.Len() == 0 {
+ if current+uint(spaceBuf.Len()) > lim {
+ current = 0
+ } else {
+ current += uint(spaceBuf.Len())
+ spaceBuf.WriteTo(buf)
+ }
+ spaceBuf.Reset()
+ } else {
+ current += uint(spaceBuf.Len() + wordBuf.Len())
+ spaceBuf.WriteTo(buf)
+ spaceBuf.Reset()
+ wordBuf.WriteTo(buf)
+ wordBuf.Reset()
+ }
+ buf.WriteRune(char)
+ current = 0
+ } else if unicode.IsSpace(char) {
+ if spaceBuf.Len() == 0 || wordBuf.Len() > 0 {
+ current += uint(spaceBuf.Len() + wordBuf.Len())
+ spaceBuf.WriteTo(buf)
+ spaceBuf.Reset()
+ wordBuf.WriteTo(buf)
+ wordBuf.Reset()
+ }
+
+ spaceBuf.WriteRune(char)
+ } else {
+
+ wordBuf.WriteRune(char)
+
+ if current+uint(spaceBuf.Len()+wordBuf.Len()) > lim && uint(wordBuf.Len()) < lim {
+ buf.WriteRune('\n')
+ current = 0
+ spaceBuf.Reset()
+ }
+ }
+ }
+
+ if wordBuf.Len() == 0 {
+ if current+uint(spaceBuf.Len()) <= lim {
+ spaceBuf.WriteTo(buf)
+ }
+ } else {
+ spaceBuf.WriteTo(buf)
+ wordBuf.WriteTo(buf)
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/mrunalp/fileutils/LICENSE b/vendor/github.com/mrunalp/fileutils/LICENSE
new file mode 100644
index 000000000..27448585a
--- /dev/null
+++ b/vendor/github.com/mrunalp/fileutils/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/mrunalp/fileutils/README.md b/vendor/github.com/mrunalp/fileutils/README.md
new file mode 100644
index 000000000..6cb4140ea
--- /dev/null
+++ b/vendor/github.com/mrunalp/fileutils/README.md
@@ -0,0 +1,5 @@
+# fileutils
+
+Collection of utilities for file manipulation in golang
+
+The library is based on docker pkg/archive pkg/idtools but does copies instead of handling archive formats.
diff --git a/vendor/github.com/mrunalp/fileutils/fileutils.go b/vendor/github.com/mrunalp/fileutils/fileutils.go
new file mode 100644
index 000000000..b60cb909c
--- /dev/null
+++ b/vendor/github.com/mrunalp/fileutils/fileutils.go
@@ -0,0 +1,161 @@
+package fileutils
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "syscall"
+)
+
+// CopyFile copies the file at source to dest
+func CopyFile(source string, dest string) error {
+ si, err := os.Lstat(source)
+ if err != nil {
+ return err
+ }
+
+ st, ok := si.Sys().(*syscall.Stat_t)
+ if !ok {
+ return fmt.Errorf("could not convert to syscall.Stat_t")
+ }
+
+ uid := int(st.Uid)
+ gid := int(st.Gid)
+
+ // Handle symlinks
+ if si.Mode()&os.ModeSymlink != 0 {
+ target, err := os.Readlink(source)
+ if err != nil {
+ return err
+ }
+ if err := os.Symlink(target, dest); err != nil {
+ return err
+ }
+ }
+
+ // Handle device files
+ if st.Mode&syscall.S_IFMT == syscall.S_IFBLK || st.Mode&syscall.S_IFMT == syscall.S_IFCHR {
+ devMajor := int64(major(uint64(st.Rdev)))
+ devMinor := int64(minor(uint64(st.Rdev)))
+ mode := uint32(si.Mode() & 07777)
+ if st.Mode&syscall.S_IFMT == syscall.S_IFBLK {
+ mode |= syscall.S_IFBLK
+ }
+ if st.Mode&syscall.S_IFMT == syscall.S_IFCHR {
+ mode |= syscall.S_IFCHR
+ }
+ if err := syscall.Mknod(dest, mode, int(mkdev(devMajor, devMinor))); err != nil {
+ return err
+ }
+ }
+
+ // Handle regular files
+ if si.Mode().IsRegular() {
+ sf, err := os.Open(source)
+ if err != nil {
+ return err
+ }
+ defer sf.Close()
+
+ df, err := os.Create(dest)
+ if err != nil {
+ return err
+ }
+ defer df.Close()
+
+ _, err = io.Copy(df, sf)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Chown the file
+ if err := os.Lchown(dest, uid, gid); err != nil {
+ return err
+ }
+
+ // Chmod the file
+ if !(si.Mode()&os.ModeSymlink == os.ModeSymlink) {
+ if err := os.Chmod(dest, si.Mode()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// CopyDirectory copies the files under the source directory
+// to dest directory. The dest directory is created if it
+// does not exist.
+func CopyDirectory(source string, dest string) error {
+ fi, err := os.Stat(source)
+ if err != nil {
+ return err
+ }
+
+ // Get owner.
+ st, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ return fmt.Errorf("could not convert to syscall.Stat_t")
+ }
+
+ // We have to pick an owner here anyway.
+ if err := MkdirAllNewAs(dest, fi.Mode(), int(st.Uid), int(st.Gid)); err != nil {
+ return err
+ }
+
+ return filepath.Walk(source, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Get the relative path
+ relPath, err := filepath.Rel(source, path)
+ if err != nil {
+ return nil
+ }
+
+ if info.IsDir() {
+ // Skip the source directory.
+ if path != source {
+ // Get the owner.
+ st, ok := info.Sys().(*syscall.Stat_t)
+ if !ok {
+ return fmt.Errorf("could not convert to syscall.Stat_t")
+ }
+
+ uid := int(st.Uid)
+ gid := int(st.Gid)
+
+ if err := os.Mkdir(filepath.Join(dest, relPath), info.Mode()); err != nil {
+ return err
+ }
+
+ if err := os.Lchown(filepath.Join(dest, relPath), uid, gid); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ // Copy the file.
+ if err := CopyFile(path, filepath.Join(dest, relPath)); err != nil {
+ return err
+ }
+
+ return nil
+ })
+}
+
+func major(device uint64) uint64 {
+ return (device >> 8) & 0xfff
+}
+
+func minor(device uint64) uint64 {
+ return (device & 0xff) | ((device >> 12) & 0xfff00)
+}
+
+func mkdev(major int64, minor int64) uint32 {
+ return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
+}
diff --git a/vendor/github.com/mrunalp/fileutils/idtools.go b/vendor/github.com/mrunalp/fileutils/idtools.go
new file mode 100644
index 000000000..161aec8f5
--- /dev/null
+++ b/vendor/github.com/mrunalp/fileutils/idtools.go
@@ -0,0 +1,49 @@
+package fileutils
+
+import (
+ "os"
+ "path/filepath"
+)
+
+// MkdirAllNewAs creates a directory (include any along the path) and then modifies
+// ownership ONLY of newly created directories to the requested uid/gid. If the
+// directories along the path exist, no change of ownership will be performed
+func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
+ // make an array containing the original path asked for, plus (for mkAll == true)
+ // all path components leading up to the complete path that don't exist before we MkdirAll
+ // so that we can chown all of them properly at the end. If chownExisting is false, we won't
+ // chown the full directory path if it exists
+ var paths []string
+ if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
+ paths = []string{path}
+ } else if err == nil {
+ // nothing to do; directory path fully exists already
+ return nil
+ }
+
+ // walk back to "/" looking for directories which do not exist
+ // and add them to the paths array for chown after creation
+ dirPath := path
+ for {
+ dirPath = filepath.Dir(dirPath)
+ if dirPath == "/" {
+ break
+ }
+ if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
+ paths = append(paths, dirPath)
+ }
+ }
+
+ if err := os.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
+ return err
+ }
+
+ // even if it existed, we will chown the requested path + any subpaths that
+ // didn't exist when we called MkdirAll
+ for _, pathComponent := range paths {
+ if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/mtrmac/gpgme/LICENSE b/vendor/github.com/mtrmac/gpgme/LICENSE
new file mode 100644
index 000000000..06d4ab773
--- /dev/null
+++ b/vendor/github.com/mtrmac/gpgme/LICENSE
@@ -0,0 +1,12 @@
+Copyright (c) 2015, James Fargher <proglottis@gmail.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/mtrmac/gpgme/README.md b/vendor/github.com/mtrmac/gpgme/README.md
new file mode 100644
index 000000000..4770b82a8
--- /dev/null
+++ b/vendor/github.com/mtrmac/gpgme/README.md
@@ -0,0 +1,13 @@
+# GPGME (golang)
+
+Go wrapper for the GPGME library.
+
+This library is intended for use with desktop applications. If you are looking to add OpenPGP support to a server application I suggest you first look at [golang.org/x/crypto/openpgp](https://godoc.org/golang.org/x/crypto/openpgp).
+
+## Installation
+
+ go get -u github.com/proglottis/gpgme
+
+## Documentation
+
+* [godoc](https://godoc.org/github.com/proglottis/gpgme)
diff --git a/vendor/github.com/mtrmac/gpgme/callbacks.go b/vendor/github.com/mtrmac/gpgme/callbacks.go
new file mode 100644
index 000000000..d1dc610d4
--- /dev/null
+++ b/vendor/github.com/mtrmac/gpgme/callbacks.go
@@ -0,0 +1,42 @@
+package gpgme
+
+import (
+ "sync"
+)
+
+var callbacks struct {
+ sync.Mutex
+ m map[uintptr]interface{}
+ c uintptr
+}
+
+func callbackAdd(v interface{}) uintptr {
+ callbacks.Lock()
+ defer callbacks.Unlock()
+ if callbacks.m == nil {
+ callbacks.m = make(map[uintptr]interface{})
+ }
+ callbacks.c++
+ ret := callbacks.c
+ callbacks.m[ret] = v
+ return ret
+}
+
+func callbackLookup(c uintptr) interface{} {
+ callbacks.Lock()
+ defer callbacks.Unlock()
+ ret := callbacks.m[c]
+ if ret == nil {
+ panic("callback pointer not found")
+ }
+ return ret
+}
+
+func callbackDelete(c uintptr) {
+ callbacks.Lock()
+ defer callbacks.Unlock()
+ if callbacks.m[c] == nil {
+ panic("callback pointer not found")
+ }
+ delete(callbacks.m, c)
+}
diff --git a/vendor/github.com/mtrmac/gpgme/data.go b/vendor/github.com/mtrmac/gpgme/data.go
new file mode 100644
index 000000000..eebc97263
--- /dev/null
+++ b/vendor/github.com/mtrmac/gpgme/data.go
@@ -0,0 +1,191 @@
+package gpgme
+
+// #include <string.h>
+// #include <gpgme.h>
+// #include <errno.h>
+// #include "go_gpgme.h"
+import "C"
+
+import (
+ "io"
+ "os"
+ "runtime"
+ "unsafe"
+)
+
+const (
+ SeekSet = C.SEEK_SET
+ SeekCur = C.SEEK_CUR
+ SeekEnd = C.SEEK_END
+)
+
+//export gogpgme_readfunc
+func gogpgme_readfunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t {
+ d := callbackLookup(uintptr(handle)).(*Data)
+ if len(d.buf) < int(size) {
+ d.buf = make([]byte, size)
+ }
+ n, err := d.r.Read(d.buf[:size])
+ if err != nil && err != io.EOF {
+ C.gpgme_err_set_errno(C.EIO)
+ return -1
+ }
+ C.memcpy(buffer, unsafe.Pointer(&d.buf[0]), C.size_t(n))
+ return C.ssize_t(n)
+}
+
+//export gogpgme_writefunc
+func gogpgme_writefunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t {
+ d := callbackLookup(uintptr(handle)).(*Data)
+ if len(d.buf) < int(size) {
+ d.buf = make([]byte, size)
+ }
+ C.memcpy(unsafe.Pointer(&d.buf[0]), buffer, C.size_t(size))
+ n, err := d.w.Write(d.buf[:size])
+ if err != nil && err != io.EOF {
+ C.gpgme_err_set_errno(C.EIO)
+ return -1
+ }
+ return C.ssize_t(n)
+}
+
+//export gogpgme_seekfunc
+func gogpgme_seekfunc(handle unsafe.Pointer, offset C.off_t, whence C.int) C.off_t {
+ d := callbackLookup(uintptr(handle)).(*Data)
+ n, err := d.s.Seek(int64(offset), int(whence))
+ if err != nil {
+ C.gpgme_err_set_errno(C.EIO)
+ return -1
+ }
+ return C.off_t(n)
+}
+
+// The Data buffer used to communicate with GPGME
+type Data struct {
+ dh C.gpgme_data_t
+ buf []byte
+ cbs C.struct_gpgme_data_cbs
+ r io.Reader
+ w io.Writer
+ s io.Seeker
+ cbc uintptr
+}
+
+func newData() *Data {
+ d := &Data{}
+ runtime.SetFinalizer(d, (*Data).Close)
+ return d
+}
+
+// NewData returns a new memory based data buffer
+func NewData() (*Data, error) {
+ d := newData()
+ return d, handleError(C.gpgme_data_new(&d.dh))
+}
+
+// NewDataFile returns a new file based data buffer
+func NewDataFile(f *os.File) (*Data, error) {
+ d := newData()
+ return d, handleError(C.gpgme_data_new_from_fd(&d.dh, C.int(f.Fd())))
+}
+
+// NewDataBytes returns a new memory based data buffer that contains `b` bytes
+func NewDataBytes(b []byte) (*Data, error) {
+ d := newData()
+ var cb *C.char
+ if len(b) != 0 {
+ cb = (*C.char)(unsafe.Pointer(&b[0]))
+ }
+ return d, handleError(C.gpgme_data_new_from_mem(&d.dh, cb, C.size_t(len(b)), 1))
+}
+
+// NewDataReader returns a new callback based data buffer
+func NewDataReader(r io.Reader) (*Data, error) {
+ d := newData()
+ d.r = r
+ d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc)
+ cbc := callbackAdd(d)
+ d.cbc = cbc
+ return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc)))
+}
+
+// NewDataWriter returns a new callback based data buffer
+func NewDataWriter(w io.Writer) (*Data, error) {
+ d := newData()
+ d.w = w
+ d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc)
+ cbc := callbackAdd(d)
+ d.cbc = cbc
+ return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc)))
+}
+
+// NewDataReadWriter returns a new callback based data buffer
+func NewDataReadWriter(rw io.ReadWriter) (*Data, error) {
+ d := newData()
+ d.r = rw
+ d.w = rw
+ d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc)
+ d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc)
+ cbc := callbackAdd(d)
+ d.cbc = cbc
+ return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc)))
+}
+
+// NewDataReadWriteSeeker returns a new callback based data buffer
+func NewDataReadWriteSeeker(rw io.ReadWriteSeeker) (*Data, error) {
+ d := newData()
+ d.r = rw
+ d.w = rw
+ d.s = rw
+ d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc)
+ d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc)
+ d.cbs.seek = C.gpgme_data_seek_cb_t(C.gogpgme_seekfunc)
+ cbc := callbackAdd(d)
+ d.cbc = cbc
+ return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc)))
+}
+
+// Close releases any resources associated with the data buffer
+func (d *Data) Close() error {
+ if d.dh == nil {
+ return nil
+ }
+ if d.cbc > 0 {
+ callbackDelete(d.cbc)
+ }
+ _, err := C.gpgme_data_release(d.dh)
+ d.dh = nil
+ return err
+}
+
+func (d *Data) Write(p []byte) (int, error) {
+ n, err := C.gpgme_data_write(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p)))
+ if err != nil {
+ return 0, err
+ }
+ if n == 0 {
+ return 0, io.EOF
+ }
+ return int(n), nil
+}
+
+func (d *Data) Read(p []byte) (int, error) {
+ n, err := C.gpgme_data_read(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p)))
+ if err != nil {
+ return 0, err
+ }
+ if n == 0 {
+ return 0, io.EOF
+ }
+ return int(n), nil
+}
+
+func (d *Data) Seek(offset int64, whence int) (int64, error) {
+ n, err := C.gpgme_data_seek(d.dh, C.off_t(offset), C.int(whence))
+ return int64(n), err
+}
+
+// Name returns the associated filename if any
+func (d *Data) Name() string {
+ return C.GoString(C.gpgme_data_get_file_name(d.dh))
+}
diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.c b/vendor/github.com/mtrmac/gpgme/go_gpgme.c
new file mode 100644
index 000000000..b887574e0
--- /dev/null
+++ b/vendor/github.com/mtrmac/gpgme/go_gpgme.c
@@ -0,0 +1,89 @@
+#include "go_gpgme.h"
+
+gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle) {
+ return gpgme_data_new_from_cbs(dh, cbs, (void *)handle);
+}
+
+void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle) {
+ gpgme_set_passphrase_cb(ctx, cb, (void *)handle);
+}
+
+unsigned int key_revoked(gpgme_key_t k) {
+ return k->revoked;
+}
+
+unsigned int key_expired(gpgme_key_t k) {
+ return k->expired;
+}
+
+unsigned int key_disabled(gpgme_key_t k) {
+ return k->disabled;
+}
+
+unsigned int key_invalid(gpgme_key_t k) {
+ return k->invalid;
+}
+
+unsigned int key_can_encrypt(gpgme_key_t k) {
+ return k->can_encrypt;
+}
+
+unsigned int key_can_sign(gpgme_key_t k) {
+ return k->can_sign;
+}
+
+unsigned int key_can_certify(gpgme_key_t k) {
+ return k->can_certify;
+}
+
+unsigned int key_secret(gpgme_key_t k) {
+ return k->secret;
+}
+
+unsigned int key_can_authenticate(gpgme_key_t k) {
+ return k->can_authenticate;
+}
+
+unsigned int key_is_qualified(gpgme_key_t k) {
+ return k->is_qualified;
+}
+
+unsigned int signature_wrong_key_usage(gpgme_signature_t s) {
+ return s->wrong_key_usage;
+}
+
+unsigned int signature_pka_trust(gpgme_signature_t s) {
+ return s->pka_trust;
+}
+
+unsigned int signature_chain_model(gpgme_signature_t s) {
+ return s->chain_model;
+}
+
+unsigned int subkey_revoked(gpgme_subkey_t k) {
+ return k->revoked;
+}
+
+unsigned int subkey_expired(gpgme_subkey_t k) {
+ return k->expired;
+}
+
+unsigned int subkey_disabled(gpgme_subkey_t k) {
+ return k->disabled;
+}
+
+unsigned int subkey_invalid(gpgme_subkey_t k) {
+ return k->invalid;
+}
+
+unsigned int subkey_secret(gpgme_subkey_t k) {
+ return k->secret;
+}
+
+unsigned int uid_revoked(gpgme_user_id_t u) {
+ return u->revoked;
+}
+
+unsigned int uid_invalid(gpgme_user_id_t u) {
+ return u->invalid;
+}
diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.h b/vendor/github.com/mtrmac/gpgme/go_gpgme.h
new file mode 100644
index 000000000..a3678b127
--- /dev/null
+++ b/vendor/github.com/mtrmac/gpgme/go_gpgme.h
@@ -0,0 +1,37 @@
+#ifndef GO_GPGME_H
+#define GO_GPGME_H
+
+#define _FILE_OFFSET_BITS 64
+#include <stdint.h>
+
+#include <gpgme.h>
+
+extern ssize_t gogpgme_readfunc(void *handle, void *buffer, size_t size);
+extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size);
+extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence);
+extern gpgme_error_t gogpgme_passfunc(void *hook, char *uid_hint, char *passphrase_info, int prev_was_bad, int fd);
+extern gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle);
+extern void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle);
+
+extern unsigned int key_revoked(gpgme_key_t k);
+extern unsigned int key_expired(gpgme_key_t k);
+extern unsigned int key_disabled(gpgme_key_t k);
+extern unsigned int key_invalid(gpgme_key_t k);
+extern unsigned int key_can_encrypt(gpgme_key_t k);
+extern unsigned int key_can_sign(gpgme_key_t k);
+extern unsigned int key_can_certify(gpgme_key_t k);
+extern unsigned int key_secret(gpgme_key_t k);
+extern unsigned int key_can_authenticate(gpgme_key_t k);
+extern unsigned int key_is_qualified(gpgme_key_t k);
+extern unsigned int signature_wrong_key_usage(gpgme_signature_t s);
+extern unsigned int signature_pka_trust(gpgme_signature_t s);
+extern unsigned int signature_chain_model(gpgme_signature_t s);
+extern unsigned int subkey_revoked(gpgme_subkey_t k);
+extern unsigned int subkey_expired(gpgme_subkey_t k);
+extern unsigned int subkey_disabled(gpgme_subkey_t k);
+extern unsigned int subkey_invalid(gpgme_subkey_t k);
+extern unsigned int subkey_secret(gpgme_subkey_t k);
+extern unsigned int uid_revoked(gpgme_user_id_t u);
+extern unsigned int uid_invalid(gpgme_user_id_t u);
+
+#endif
diff --git a/vendor/github.com/mtrmac/gpgme/gpgme.go b/vendor/github.com/mtrmac/gpgme/gpgme.go
new file mode 100644
index 000000000..20aad737c
--- /dev/null
+++ b/vendor/github.com/mtrmac/gpgme/gpgme.go
@@ -0,0 +1,748 @@
+// Package gpgme provides a Go wrapper for the GPGME library
+package gpgme
+
+// #cgo LDFLAGS: -lgpgme -lassuan -lgpg-error
+// #cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64
+// #include <stdlib.h>
+// #include <gpgme.h>
+// #include "go_gpgme.h"
+import "C"
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "time"
+ "unsafe"
+)
+
+var Version string
+
+func init() {
+ Version = C.GoString(C.gpgme_check_version(nil))
+}
+
+// Callback is the function that is called when a passphrase is required
+type Callback func(uidHint string, prevWasBad bool, f *os.File) error
+
+//export gogpgme_passfunc
+func gogpgme_passfunc(hook unsafe.Pointer, uid_hint, passphrase_info *C.char, prev_was_bad, fd C.int) C.gpgme_error_t {
+ c := callbackLookup(uintptr(hook)).(*Context)
+ go_uid_hint := C.GoString(uid_hint)
+ f := os.NewFile(uintptr(fd), go_uid_hint)
+ defer f.Close()
+ err := c.callback(go_uid_hint, prev_was_bad != 0, f)
+ if err != nil {
+ return C.GPG_ERR_CANCELED
+ }
+ return 0
+}
+
+type Protocol int
+
+const (
+ ProtocolOpenPGP Protocol = C.GPGME_PROTOCOL_OpenPGP
+ ProtocolCMS Protocol = C.GPGME_PROTOCOL_CMS
+ ProtocolGPGConf Protocol = C.GPGME_PROTOCOL_GPGCONF
+ ProtocolAssuan Protocol = C.GPGME_PROTOCOL_ASSUAN
+ ProtocolG13 Protocol = C.GPGME_PROTOCOL_G13
+ ProtocolUIServer Protocol = C.GPGME_PROTOCOL_UISERVER
+ // ProtocolSpawn Protocol = C.GPGME_PROTOCOL_SPAWN // Unavailable in 1.4.3
+ ProtocolDefault Protocol = C.GPGME_PROTOCOL_DEFAULT
+ ProtocolUnknown Protocol = C.GPGME_PROTOCOL_UNKNOWN
+)
+
+type PinEntryMode int
+
+// const ( // Unavailable in 1.3.2
+// PinEntryDefault PinEntryMode = C.GPGME_PINENTRY_MODE_DEFAULT
+// PinEntryAsk PinEntryMode = C.GPGME_PINENTRY_MODE_ASK
+// PinEntryCancel PinEntryMode = C.GPGME_PINENTRY_MODE_CANCEL
+// PinEntryError PinEntryMode = C.GPGME_PINENTRY_MODE_ERROR
+// PinEntryLoopback PinEntryMode = C.GPGME_PINENTRY_MODE_LOOPBACK
+// )
+
+type EncryptFlag uint
+
+const (
+ EncryptAlwaysTrust EncryptFlag = C.GPGME_ENCRYPT_ALWAYS_TRUST
+ EncryptNoEncryptTo EncryptFlag = C.GPGME_ENCRYPT_NO_ENCRYPT_TO
+ EncryptPrepare EncryptFlag = C.GPGME_ENCRYPT_PREPARE
+ EncryptExceptSign EncryptFlag = C.GPGME_ENCRYPT_EXPECT_SIGN
+ // EncryptNoCompress EncryptFlag = C.GPGME_ENCRYPT_NO_COMPRESS // Unavailable in 1.4.3
+)
+
+type HashAlgo int
+
+// const values for HashAlgo values should be added when necessary.
+
+type KeyListMode uint
+
+const (
+ KeyListModeLocal KeyListMode = C.GPGME_KEYLIST_MODE_LOCAL
+ KeyListModeExtern KeyListMode = C.GPGME_KEYLIST_MODE_EXTERN
+ KeyListModeSigs KeyListMode = C.GPGME_KEYLIST_MODE_SIGS
+ KeyListModeSigNotations KeyListMode = C.GPGME_KEYLIST_MODE_SIG_NOTATIONS
+ // KeyListModeWithSecret KeyListMode = C.GPGME_KEYLIST_MODE_WITH_SECRET // Unavailable in 1.4.3
+ KeyListModeEphemeral KeyListMode = C.GPGME_KEYLIST_MODE_EPHEMERAL
+ KeyListModeModeValidate KeyListMode = C.GPGME_KEYLIST_MODE_VALIDATE
+)
+
+type PubkeyAlgo int
+
+// const values for PubkeyAlgo values should be added when necessary.
+
+type SigMode int
+
+const (
+ SigModeNormal SigMode = C.GPGME_SIG_MODE_NORMAL
+ SigModeDetach SigMode = C.GPGME_SIG_MODE_DETACH
+ SigModeClear SigMode = C.GPGME_SIG_MODE_CLEAR
+)
+
+type SigSum int
+
+const (
+ SigSumValid SigSum = C.GPGME_SIGSUM_VALID
+ SigSumGreen SigSum = C.GPGME_SIGSUM_GREEN
+ SigSumRed SigSum = C.GPGME_SIGSUM_RED
+ SigSumKeyRevoked SigSum = C.GPGME_SIGSUM_KEY_REVOKED
+ SigSumKeyExpired SigSum = C.GPGME_SIGSUM_KEY_EXPIRED
+ SigSumSigExpired SigSum = C.GPGME_SIGSUM_SIG_EXPIRED
+ SigSumKeyMissing SigSum = C.GPGME_SIGSUM_KEY_MISSING
+ SigSumCRLMissing SigSum = C.GPGME_SIGSUM_CRL_MISSING
+ SigSumCRLTooOld SigSum = C.GPGME_SIGSUM_CRL_TOO_OLD
+ SigSumBadPolicy SigSum = C.GPGME_SIGSUM_BAD_POLICY
+ SigSumSysError SigSum = C.GPGME_SIGSUM_SYS_ERROR
+)
+
+type Validity int
+
+const (
+ ValidityUnknown Validity = C.GPGME_VALIDITY_UNKNOWN
+ ValidityUndefined Validity = C.GPGME_VALIDITY_UNDEFINED
+ ValidityNever Validity = C.GPGME_VALIDITY_NEVER
+ ValidityMarginal Validity = C.GPGME_VALIDITY_MARGINAL
+ ValidityFull Validity = C.GPGME_VALIDITY_FULL
+ ValidityUltimate Validity = C.GPGME_VALIDITY_ULTIMATE
+)
+
+type ErrorCode int
+
+const (
+ ErrorNoError ErrorCode = C.GPG_ERR_NO_ERROR
+ ErrorEOF ErrorCode = C.GPG_ERR_EOF
+)
+
+// Error is a wrapper for GPGME errors
+type Error struct {
+ err C.gpgme_error_t
+}
+
+func (e Error) Code() ErrorCode {
+ return ErrorCode(C.gpgme_err_code(e.err))
+}
+
+func (e Error) Error() string {
+ return C.GoString(C.gpgme_strerror(e.err))
+}
+
+func handleError(err C.gpgme_error_t) error {
+ e := Error{err: err}
+ if e.Code() == ErrorNoError {
+ return nil
+ }
+ return e
+}
+
+func cbool(b bool) C.int {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+func EngineCheckVersion(p Protocol) error {
+ return handleError(C.gpgme_engine_check_version(C.gpgme_protocol_t(p)))
+}
+
+type EngineInfo struct {
+ info C.gpgme_engine_info_t
+}
+
+func (e *EngineInfo) Next() *EngineInfo {
+ if e.info.next == nil {
+ return nil
+ }
+ return &EngineInfo{info: e.info.next}
+}
+
+func (e *EngineInfo) Protocol() Protocol {
+ return Protocol(e.info.protocol)
+}
+
+func (e *EngineInfo) FileName() string {
+ return C.GoString(e.info.file_name)
+}
+
+func (e *EngineInfo) Version() string {
+ return C.GoString(e.info.version)
+}
+
+func (e *EngineInfo) RequiredVersion() string {
+ return C.GoString(e.info.req_version)
+}
+
+func (e *EngineInfo) HomeDir() string {
+ return C.GoString(e.info.home_dir)
+}
+
+func GetEngineInfo() (*EngineInfo, error) {
+ info := &EngineInfo{}
+ return info, handleError(C.gpgme_get_engine_info(&info.info))
+}
+
+func SetEngineInfo(proto Protocol, fileName, homeDir string) error {
+ var cfn, chome *C.char
+ if fileName != "" {
+ cfn = C.CString(fileName)
+ defer C.free(unsafe.Pointer(cfn))
+ }
+ if homeDir != "" {
+ chome = C.CString(homeDir)
+ defer C.free(unsafe.Pointer(chome))
+ }
+ return handleError(C.gpgme_set_engine_info(C.gpgme_protocol_t(proto), cfn, chome))
+}
+
+func FindKeys(pattern string, secretOnly bool) ([]*Key, error) {
+ var keys []*Key
+ ctx, err := New()
+ if err != nil {
+ return keys, err
+ }
+ defer ctx.Release()
+ if err := ctx.KeyListStart(pattern, secretOnly); err != nil {
+ return keys, err
+ }
+ defer ctx.KeyListEnd()
+ for ctx.KeyListNext() {
+ keys = append(keys, ctx.Key)
+ }
+ if ctx.KeyError != nil {
+ return keys, ctx.KeyError
+ }
+ return keys, nil
+}
+
+func Decrypt(r io.Reader) (*Data, error) {
+ ctx, err := New()
+ if err != nil {
+ return nil, err
+ }
+ defer ctx.Release()
+ cipher, err := NewDataReader(r)
+ if err != nil {
+ return nil, err
+ }
+ defer cipher.Close()
+ plain, err := NewData()
+ if err != nil {
+ return nil, err
+ }
+ err = ctx.Decrypt(cipher, plain)
+ plain.Seek(0, SeekSet)
+ return plain, err
+}
+
+type Context struct {
+ Key *Key
+ KeyError error
+
+ callback Callback
+ cbc uintptr
+
+ ctx C.gpgme_ctx_t
+}
+
+func New() (*Context, error) {
+ c := &Context{}
+ err := C.gpgme_new(&c.ctx)
+ runtime.SetFinalizer(c, (*Context).Release)
+ return c, handleError(err)
+}
+
+func (c *Context) Release() {
+ if c.ctx == nil {
+ return
+ }
+ if c.cbc > 0 {
+ callbackDelete(c.cbc)
+ }
+ C.gpgme_release(c.ctx)
+ c.ctx = nil
+}
+
+func (c *Context) SetArmor(yes bool) {
+ C.gpgme_set_armor(c.ctx, cbool(yes))
+}
+
+func (c *Context) Armor() bool {
+ return C.gpgme_get_armor(c.ctx) != 0
+}
+
+func (c *Context) SetTextMode(yes bool) {
+ C.gpgme_set_textmode(c.ctx, cbool(yes))
+}
+
+func (c *Context) TextMode() bool {
+ return C.gpgme_get_textmode(c.ctx) != 0
+}
+
+func (c *Context) SetProtocol(p Protocol) error {
+ return handleError(C.gpgme_set_protocol(c.ctx, C.gpgme_protocol_t(p)))
+}
+
+func (c *Context) Protocol() Protocol {
+ return Protocol(C.gpgme_get_protocol(c.ctx))
+}
+
+func (c *Context) SetKeyListMode(m KeyListMode) error {
+ return handleError(C.gpgme_set_keylist_mode(c.ctx, C.gpgme_keylist_mode_t(m)))
+}
+
+func (c *Context) KeyListMode() KeyListMode {
+ return KeyListMode(C.gpgme_get_keylist_mode(c.ctx))
+}
+
+// Unavailable in 1.3.2:
+// func (c *Context) SetPinEntryMode(m PinEntryMode) error {
+// return handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m)))
+// }
+
+// Unavailable in 1.3.2:
+// func (c *Context) PinEntryMode() PinEntryMode {
+// return PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx))
+// }
+
+func (c *Context) SetCallback(callback Callback) error {
+ var err error
+ c.callback = callback
+ if c.cbc > 0 {
+ callbackDelete(c.cbc)
+ }
+ if callback != nil {
+ cbc := callbackAdd(c)
+ c.cbc = cbc
+ _, err = C.gogpgme_set_passphrase_cb(c.ctx, C.gpgme_passphrase_cb_t(C.gogpgme_passfunc), C.uintptr_t(cbc))
+ } else {
+ c.cbc = 0
+ _, err = C.gogpgme_set_passphrase_cb(c.ctx, nil, 0)
+ }
+ return err
+}
+
+func (c *Context) EngineInfo() *EngineInfo {
+ return &EngineInfo{info: C.gpgme_ctx_get_engine_info(c.ctx)}
+}
+
+func (c *Context) SetEngineInfo(proto Protocol, fileName, homeDir string) error {
+ var cfn, chome *C.char
+ if fileName != "" {
+ cfn = C.CString(fileName)
+ defer C.free(unsafe.Pointer(cfn))
+ }
+ if homeDir != "" {
+ chome = C.CString(homeDir)
+ defer C.free(unsafe.Pointer(chome))
+ }
+ return handleError(C.gpgme_ctx_set_engine_info(c.ctx, C.gpgme_protocol_t(proto), cfn, chome))
+}
+
+func (c *Context) KeyListStart(pattern string, secretOnly bool) error {
+ cpattern := C.CString(pattern)
+ defer C.free(unsafe.Pointer(cpattern))
+ err := C.gpgme_op_keylist_start(c.ctx, cpattern, cbool(secretOnly))
+ return handleError(err)
+}
+
+func (c *Context) KeyListNext() bool {
+ c.Key = newKey()
+ err := handleError(C.gpgme_op_keylist_next(c.ctx, &c.Key.k))
+ if err != nil {
+ if e, ok := err.(Error); ok && e.Code() == ErrorEOF {
+ c.KeyError = nil
+ } else {
+ c.KeyError = err
+ }
+ return false
+ }
+ c.KeyError = nil
+ return true
+}
+
+func (c *Context) KeyListEnd() error {
+ return handleError(C.gpgme_op_keylist_end(c.ctx))
+}
+
+func (c *Context) GetKey(fingerprint string, secret bool) (*Key, error) {
+ key := newKey()
+ cfpr := C.CString(fingerprint)
+ defer C.free(unsafe.Pointer(cfpr))
+ err := handleError(C.gpgme_get_key(c.ctx, cfpr, &key.k, cbool(secret)))
+ if e, ok := err.(Error); key.k == nil && ok && e.Code() == ErrorEOF {
+ return nil, fmt.Errorf("key %q not found", fingerprint)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return key, nil
+}
+
+func (c *Context) Decrypt(ciphertext, plaintext *Data) error {
+ return handleError(C.gpgme_op_decrypt(c.ctx, ciphertext.dh, plaintext.dh))
+}
+
+func (c *Context) DecryptVerify(ciphertext, plaintext *Data) error {
+ return handleError(C.gpgme_op_decrypt_verify(c.ctx, ciphertext.dh, plaintext.dh))
+}
+
+type Signature struct {
+ Summary SigSum
+ Fingerprint string
+ Status error
+ Timestamp time.Time
+ ExpTimestamp time.Time
+ WrongKeyUsage bool
+ PKATrust uint
+ ChainModel bool
+ Validity Validity
+ ValidityReason error
+ PubkeyAlgo PubkeyAlgo
+ HashAlgo HashAlgo
+}
+
+func (c *Context) Verify(sig, signedText, plain *Data) (string, []Signature, error) {
+ var signedTextPtr, plainPtr C.gpgme_data_t = nil, nil
+ if signedText != nil {
+ signedTextPtr = signedText.dh
+ }
+ if plain != nil {
+ plainPtr = plain.dh
+ }
+ err := handleError(C.gpgme_op_verify(c.ctx, sig.dh, signedTextPtr, plainPtr))
+ if err != nil {
+ return "", nil, err
+ }
+ res := C.gpgme_op_verify_result(c.ctx)
+ sigs := []Signature{}
+ for s := res.signatures; s != nil; s = s.next {
+ sig := Signature{
+ Summary: SigSum(s.summary),
+ Fingerprint: C.GoString(s.fpr),
+ Status: handleError(s.status),
+ // s.notations not implemented
+ Timestamp: time.Unix(int64(s.timestamp), 0),
+ ExpTimestamp: time.Unix(int64(s.exp_timestamp), 0),
+ WrongKeyUsage: C.signature_wrong_key_usage(s) != 0,
+ PKATrust: uint(C.signature_pka_trust(s)),
+ ChainModel: C.signature_chain_model(s) != 0,
+ Validity: Validity(s.validity),
+ ValidityReason: handleError(s.validity_reason),
+ PubkeyAlgo: PubkeyAlgo(s.pubkey_algo),
+ HashAlgo: HashAlgo(s.hash_algo),
+ }
+ sigs = append(sigs, sig)
+ }
+ return C.GoString(res.file_name), sigs, nil
+}
+
+func (c *Context) Encrypt(recipients []*Key, flags EncryptFlag, plaintext, ciphertext *Data) error {
+ size := unsafe.Sizeof(new(C.gpgme_key_t))
+ recp := C.calloc(C.size_t(len(recipients)+1), C.size_t(size))
+ defer C.free(recp)
+ for i := range recipients {
+ ptr := (*C.gpgme_key_t)(unsafe.Pointer(uintptr(recp) + size*uintptr(i)))
+ *ptr = recipients[i].k
+ }
+ err := C.gpgme_op_encrypt(c.ctx, (*C.gpgme_key_t)(recp), C.gpgme_encrypt_flags_t(flags), plaintext.dh, ciphertext.dh)
+ return handleError(err)
+}
+
+func (c *Context) Sign(signers []*Key, plain, sig *Data, mode SigMode) error {
+ C.gpgme_signers_clear(c.ctx)
+ for _, k := range signers {
+ if err := handleError(C.gpgme_signers_add(c.ctx, k.k)); err != nil {
+ C.gpgme_signers_clear(c.ctx)
+ return err
+ }
+ }
+ return handleError(C.gpgme_op_sign(c.ctx, plain.dh, sig.dh, C.gpgme_sig_mode_t(mode)))
+}
+
+// ImportStatusFlags describes the type of ImportStatus.Status. The C API in gpgme.h simply uses "unsigned".
+type ImportStatusFlags uint
+
+const (
+ ImportNew ImportStatusFlags = C.GPGME_IMPORT_NEW
+ ImportUID ImportStatusFlags = C.GPGME_IMPORT_UID
+ ImportSIG ImportStatusFlags = C.GPGME_IMPORT_SIG
+ ImportSubKey ImportStatusFlags = C.GPGME_IMPORT_SUBKEY
+ ImportSecret ImportStatusFlags = C.GPGME_IMPORT_SECRET
+)
+
+type ImportStatus struct {
+ Fingerprint string
+ Result error
+ Status ImportStatusFlags
+}
+
+type ImportResult struct {
+ Considered int
+ NoUserID int
+ Imported int
+ ImportedRSA int
+ Unchanged int
+ NewUserIDs int
+ NewSubKeys int
+ NewSignatures int
+ NewRevocations int
+ SecretRead int
+ SecretImported int
+ SecretUnchanged int
+ NotImported int
+ Imports []ImportStatus
+}
+
+func (c *Context) Import(keyData *Data) (*ImportResult, error) {
+ err := handleError(C.gpgme_op_import(c.ctx, keyData.dh))
+ if err != nil {
+ return nil, err
+ }
+ res := C.gpgme_op_import_result(c.ctx)
+ imports := []ImportStatus{}
+ for s := res.imports; s != nil; s = s.next {
+ imports = append(imports, ImportStatus{
+ Fingerprint: C.GoString(s.fpr),
+ Result: handleError(s.result),
+ Status: ImportStatusFlags(s.status),
+ })
+ }
+ return &ImportResult{
+ Considered: int(res.considered),
+ NoUserID: int(res.no_user_id),
+ Imported: int(res.imported),
+ ImportedRSA: int(res.imported_rsa),
+ Unchanged: int(res.unchanged),
+ NewUserIDs: int(res.new_user_ids),
+ NewSubKeys: int(res.new_sub_keys),
+ NewSignatures: int(res.new_signatures),
+ NewRevocations: int(res.new_revocations),
+ SecretRead: int(res.secret_read),
+ SecretImported: int(res.secret_imported),
+ SecretUnchanged: int(res.secret_unchanged),
+ NotImported: int(res.not_imported),
+ Imports: imports,
+ }, nil
+}
+
+type Key struct {
+ k C.gpgme_key_t
+}
+
+func newKey() *Key {
+ k := &Key{}
+ runtime.SetFinalizer(k, (*Key).Release)
+ return k
+}
+
+func (k *Key) Release() {
+ C.gpgme_key_release(k.k)
+ k.k = nil
+}
+
+func (k *Key) Revoked() bool {
+ return C.key_revoked(k.k) != 0
+}
+
+func (k *Key) Expired() bool {
+ return C.key_expired(k.k) != 0
+}
+
+func (k *Key) Disabled() bool {
+ return C.key_disabled(k.k) != 0
+}
+
+func (k *Key) Invalid() bool {
+ return C.key_invalid(k.k) != 0
+}
+
+func (k *Key) CanEncrypt() bool {
+ return C.key_can_encrypt(k.k) != 0
+}
+
+func (k *Key) CanSign() bool {
+ return C.key_can_sign(k.k) != 0
+}
+
+func (k *Key) CanCertify() bool {
+ return C.key_can_certify(k.k) != 0
+}
+
+func (k *Key) Secret() bool {
+ return C.key_secret(k.k) != 0
+}
+
+func (k *Key) CanAuthenticate() bool {
+ return C.key_can_authenticate(k.k) != 0
+}
+
+func (k *Key) IsQualified() bool {
+ return C.key_is_qualified(k.k) != 0
+}
+
+func (k *Key) Protocol() Protocol {
+ return Protocol(k.k.protocol)
+}
+
+func (k *Key) IssuerSerial() string {
+ return C.GoString(k.k.issuer_serial)
+}
+
+func (k *Key) IssuerName() string {
+ return C.GoString(k.k.issuer_name)
+}
+
+func (k *Key) ChainID() string {
+ return C.GoString(k.k.chain_id)
+}
+
+func (k *Key) OwnerTrust() Validity {
+ return Validity(k.k.owner_trust)
+}
+
+func (k *Key) SubKeys() *SubKey {
+ if k.k.subkeys == nil {
+ return nil
+ }
+ return &SubKey{k: k.k.subkeys, parent: k}
+}
+
+func (k *Key) UserIDs() *UserID {
+ if k.k.uids == nil {
+ return nil
+ }
+ return &UserID{u: k.k.uids, parent: k}
+}
+
+func (k *Key) KeyListMode() KeyListMode {
+ return KeyListMode(k.k.keylist_mode)
+}
+
+type SubKey struct {
+ k C.gpgme_subkey_t
+ parent *Key // make sure the key is not released when we have a reference to a subkey
+}
+
+func (k *SubKey) Next() *SubKey {
+ if k.k.next == nil {
+ return nil
+ }
+ return &SubKey{k: k.k.next, parent: k.parent}
+}
+
+func (k *SubKey) Revoked() bool {
+ return C.subkey_revoked(k.k) != 0
+}
+
+func (k *SubKey) Expired() bool {
+ return C.subkey_expired(k.k) != 0
+}
+
+func (k *SubKey) Disabled() bool {
+ return C.subkey_disabled(k.k) != 0
+}
+
+func (k *SubKey) Invalid() bool {
+ return C.subkey_invalid(k.k) != 0
+}
+
+func (k *SubKey) Secret() bool {
+ return C.subkey_secret(k.k) != 0
+}
+
+func (k *SubKey) KeyID() string {
+ return C.GoString(k.k.keyid)
+}
+
+func (k *SubKey) Fingerprint() string {
+ return C.GoString(k.k.fpr)
+}
+
+func (k *SubKey) Created() time.Time {
+ if k.k.timestamp <= 0 {
+ return time.Time{}
+ }
+ return time.Unix(int64(k.k.timestamp), 0)
+}
+
+func (k *SubKey) Expires() time.Time {
+ if k.k.expires <= 0 {
+ return time.Time{}
+ }
+ return time.Unix(int64(k.k.expires), 0)
+}
+
+func (k *SubKey) CardNumber() string {
+ return C.GoString(k.k.card_number)
+}
+
+type UserID struct {
+ u C.gpgme_user_id_t
+ parent *Key // make sure the key is not released when we have a reference to a user ID
+}
+
+func (u *UserID) Next() *UserID {
+ if u.u.next == nil {
+ return nil
+ }
+ return &UserID{u: u.u.next, parent: u.parent}
+}
+
+func (u *UserID) Revoked() bool {
+ return C.uid_revoked(u.u) != 0
+}
+
+func (u *UserID) Invalid() bool {
+ return C.uid_invalid(u.u) != 0
+}
+
+func (u *UserID) Validity() Validity {
+ return Validity(u.u.validity)
+}
+
+func (u *UserID) UID() string {
+ return C.GoString(u.u.uid)
+}
+
+func (u *UserID) Name() string {
+ return C.GoString(u.u.name)
+}
+
+func (u *UserID) Comment() string {
+ return C.GoString(u.u.comment)
+}
+
+func (u *UserID) Email() string {
+ return C.GoString(u.u.email)
+}
+
+// This is somewhat of a horrible hack. We need to unset GPG_AGENT_INFO so that gpgme does not pass --use-agent to GPG.
+// os.Unsetenv should be enough, but that only calls the underlying C library (which gpgme uses) if cgo is involved
+// - and cgo can't be used in tests. So, provide this helper for test initialization.
+func unsetenvGPGAgentInfo() {
+ v := C.CString("GPG_AGENT_INFO")
+ defer C.free(unsafe.Pointer(v))
+ C.unsetenv(v)
+}
diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.code b/vendor/github.com/opencontainers/go-digest/LICENSE.code
new file mode 100644
index 000000000..0ea3ff81e
--- /dev/null
+++ b/vendor/github.com/opencontainers/go-digest/LICENSE.code
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2016 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.docs b/vendor/github.com/opencontainers/go-digest/LICENSE.docs
new file mode 100644
index 000000000..e26cd4fc8
--- /dev/null
+++ b/vendor/github.com/opencontainers/go-digest/LICENSE.docs
@@ -0,0 +1,425 @@
+Attribution-ShareAlike 4.0 International
+
+=======================================================================
+
+Creative Commons Corporation ("Creative Commons") is not a law firm and
+does not provide legal services or legal advice. Distribution of
+Creative Commons public licenses does not create a lawyer-client or
+other relationship. Creative Commons makes its licenses and related
+information available on an "as-is" basis. Creative Commons gives no
+warranties regarding its licenses, any material licensed under their
+terms and conditions, or any related information. Creative Commons
+disclaims all liability for damages resulting from their use to the
+fullest extent possible.
+
+Using Creative Commons Public Licenses
+
+Creative Commons public licenses provide a standard set of terms and
+conditions that creators and other rights holders may use to share
+original works of authorship and other material subject to copyright
+and certain other rights specified in the public license below. The
+following considerations are for informational purposes only, are not
+exhaustive, and do not form part of our licenses.
+
+ Considerations for licensors: Our public licenses are
+ intended for use by those authorized to give the public
+ permission to use material in ways otherwise restricted by
+ copyright and certain other rights. Our licenses are
+ irrevocable. Licensors should read and understand the terms
+ and conditions of the license they choose before applying it.
+ Licensors should also secure all rights necessary before
+ applying our licenses so that the public can reuse the
+ material as expected. Licensors should clearly mark any
+ material not subject to the license. This includes other CC-
+ licensed material, or material used under an exception or
+ limitation to copyright. More considerations for licensors:
+ wiki.creativecommons.org/Considerations_for_licensors
+
+ Considerations for the public: By using one of our public
+ licenses, a licensor grants the public permission to use the
+ licensed material under specified terms and conditions. If
+ the licensor's permission is not necessary for any reason--for
+ example, because of any applicable exception or limitation to
+ copyright--then that use is not regulated by the license. Our
+ licenses grant only permissions under copyright and certain
+ other rights that a licensor has authority to grant. Use of
+ the licensed material may still be restricted for other
+ reasons, including because others have copyright or other
+ rights in the material. A licensor may make special requests,
+ such as asking that all changes be marked or described.
+ Although not required by our licenses, you are encouraged to
+ respect those requests where reasonable. More_considerations
+ for the public:
+ wiki.creativecommons.org/Considerations_for_licensees
+
+=======================================================================
+
+Creative Commons Attribution-ShareAlike 4.0 International Public
+License
+
+By exercising the Licensed Rights (defined below), You accept and agree
+to be bound by the terms and conditions of this Creative Commons
+Attribution-ShareAlike 4.0 International Public License ("Public
+License"). To the extent this Public License may be interpreted as a
+contract, You are granted the Licensed Rights in consideration of Your
+acceptance of these terms and conditions, and the Licensor grants You
+such rights in consideration of benefits the Licensor receives from
+making the Licensed Material available under these terms and
+conditions.
+
+
+Section 1 -- Definitions.
+
+ a. Adapted Material means material subject to Copyright and Similar
+ Rights that is derived from or based upon the Licensed Material
+ and in which the Licensed Material is translated, altered,
+ arranged, transformed, or otherwise modified in a manner requiring
+ permission under the Copyright and Similar Rights held by the
+ Licensor. For purposes of this Public License, where the Licensed
+ Material is a musical work, performance, or sound recording,
+ Adapted Material is always produced where the Licensed Material is
+ synched in timed relation with a moving image.
+
+ b. Adapter's License means the license You apply to Your Copyright
+ and Similar Rights in Your contributions to Adapted Material in
+ accordance with the terms and conditions of this Public License.
+
+ c. BY-SA Compatible License means a license listed at
+ creativecommons.org/compatiblelicenses, approved by Creative
+ Commons as essentially the equivalent of this Public License.
+
+ d. Copyright and Similar Rights means copyright and/or similar rights
+ closely related to copyright including, without limitation,
+ performance, broadcast, sound recording, and Sui Generis Database
+ Rights, without regard to how the rights are labeled or
+ categorized. For purposes of this Public License, the rights
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
+ Rights.
+
+ e. Effective Technological Measures means those measures that, in the
+ absence of proper authority, may not be circumvented under laws
+ fulfilling obligations under Article 11 of the WIPO Copyright
+ Treaty adopted on December 20, 1996, and/or similar international
+ agreements.
+
+ f. Exceptions and Limitations means fair use, fair dealing, and/or
+ any other exception or limitation to Copyright and Similar Rights
+ that applies to Your use of the Licensed Material.
+
+ g. License Elements means the license attributes listed in the name
+ of a Creative Commons Public License. The License Elements of this
+ Public License are Attribution and ShareAlike.
+
+ h. Licensed Material means the artistic or literary work, database,
+ or other material to which the Licensor applied this Public
+ License.
+
+ i. Licensed Rights means the rights granted to You subject to the
+ terms and conditions of this Public License, which are limited to
+ all Copyright and Similar Rights that apply to Your use of the
+ Licensed Material and that the Licensor has authority to license.
+
+ j. Licensor means the individual(s) or entity(ies) granting rights
+ under this Public License.
+
+ k. Share means to provide material to the public by any means or
+ process that requires permission under the Licensed Rights, such
+ as reproduction, public display, public performance, distribution,
+ dissemination, communication, or importation, and to make material
+ available to the public including in ways that members of the
+ public may access the material from a place and at a time
+ individually chosen by them.
+
+ l. Sui Generis Database Rights means rights other than copyright
+ resulting from Directive 96/9/EC of the European Parliament and of
+ the Council of 11 March 1996 on the legal protection of databases,
+ as amended and/or succeeded, as well as other essentially
+ equivalent rights anywhere in the world.
+
+ m. You means the individual or entity exercising the Licensed Rights
+ under this Public License. Your has a corresponding meaning.
+
+
+Section 2 -- Scope.
+
+ a. License grant.
+
+ 1. Subject to the terms and conditions of this Public License,
+ the Licensor hereby grants You a worldwide, royalty-free,
+ non-sublicensable, non-exclusive, irrevocable license to
+ exercise the Licensed Rights in the Licensed Material to:
+
+ a. reproduce and Share the Licensed Material, in whole or
+ in part; and
+
+ b. produce, reproduce, and Share Adapted Material.
+
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
+ Exceptions and Limitations apply to Your use, this Public
+ License does not apply, and You do not need to comply with
+ its terms and conditions.
+
+ 3. Term. The term of this Public License is specified in Section
+ 6(a).
+
+ 4. Media and formats; technical modifications allowed. The
+ Licensor authorizes You to exercise the Licensed Rights in
+ all media and formats whether now known or hereafter created,
+ and to make technical modifications necessary to do so. The
+ Licensor waives and/or agrees not to assert any right or
+ authority to forbid You from making technical modifications
+ necessary to exercise the Licensed Rights, including
+ technical modifications necessary to circumvent Effective
+ Technological Measures. For purposes of this Public License,
+ simply making modifications authorized by this Section 2(a)
+ (4) never produces Adapted Material.
+
+ 5. Downstream recipients.
+
+ a. Offer from the Licensor -- Licensed Material. Every
+ recipient of the Licensed Material automatically
+ receives an offer from the Licensor to exercise the
+ Licensed Rights under the terms and conditions of this
+ Public License.
+
+ b. Additional offer from the Licensor -- Adapted Material.
+ Every recipient of Adapted Material from You
+ automatically receives an offer from the Licensor to
+ exercise the Licensed Rights in the Adapted Material
+ under the conditions of the Adapter's License You apply.
+
+ c. No downstream restrictions. You may not offer or impose
+ any additional or different terms or conditions on, or
+ apply any Effective Technological Measures to, the
+ Licensed Material if doing so restricts exercise of the
+ Licensed Rights by any recipient of the Licensed
+ Material.
+
+ 6. No endorsement. Nothing in this Public License constitutes or
+ may be construed as permission to assert or imply that You
+ are, or that Your use of the Licensed Material is, connected
+ with, or sponsored, endorsed, or granted official status by,
+ the Licensor or others designated to receive attribution as
+ provided in Section 3(a)(1)(A)(i).
+
+ b. Other rights.
+
+ 1. Moral rights, such as the right of integrity, are not
+ licensed under this Public License, nor are publicity,
+ privacy, and/or other similar personality rights; however, to
+ the extent possible, the Licensor waives and/or agrees not to
+ assert any such rights held by the Licensor to the limited
+ extent necessary to allow You to exercise the Licensed
+ Rights, but not otherwise.
+
+ 2. Patent and trademark rights are not licensed under this
+ Public License.
+
+ 3. To the extent possible, the Licensor waives any right to
+ collect royalties from You for the exercise of the Licensed
+ Rights, whether directly or through a collecting society
+ under any voluntary or waivable statutory or compulsory
+ licensing scheme. In all other cases the Licensor expressly
+ reserves any right to collect such royalties.
+
+
+Section 3 -- License Conditions.
+
+Your exercise of the Licensed Rights is expressly made subject to the
+following conditions.
+
+ a. Attribution.
+
+ 1. If You Share the Licensed Material (including in modified
+ form), You must:
+
+ a. retain the following if it is supplied by the Licensor
+ with the Licensed Material:
+
+ i. identification of the creator(s) of the Licensed
+ Material and any others designated to receive
+ attribution, in any reasonable manner requested by
+ the Licensor (including by pseudonym if
+ designated);
+
+ ii. a copyright notice;
+
+ iii. a notice that refers to this Public License;
+
+ iv. a notice that refers to the disclaimer of
+ warranties;
+
+ v. a URI or hyperlink to the Licensed Material to the
+ extent reasonably practicable;
+
+ b. indicate if You modified the Licensed Material and
+ retain an indication of any previous modifications; and
+
+ c. indicate the Licensed Material is licensed under this
+ Public License, and include the text of, or the URI or
+ hyperlink to, this Public License.
+
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
+ reasonable manner based on the medium, means, and context in
+ which You Share the Licensed Material. For example, it may be
+ reasonable to satisfy the conditions by providing a URI or
+ hyperlink to a resource that includes the required
+ information.
+
+ 3. If requested by the Licensor, You must remove any of the
+ information required by Section 3(a)(1)(A) to the extent
+ reasonably practicable.
+
+ b. ShareAlike.
+
+ In addition to the conditions in Section 3(a), if You Share
+ Adapted Material You produce, the following conditions also apply.
+
+ 1. The Adapter's License You apply must be a Creative Commons
+ license with the same License Elements, this version or
+ later, or a BY-SA Compatible License.
+
+ 2. You must include the text of, or the URI or hyperlink to, the
+ Adapter's License You apply. You may satisfy this condition
+ in any reasonable manner based on the medium, means, and
+ context in which You Share Adapted Material.
+
+ 3. You may not offer or impose any additional or different terms
+ or conditions on, or apply any Effective Technological
+ Measures to, Adapted Material that restrict exercise of the
+ rights granted under the Adapter's License You apply.
+
+
+Section 4 -- Sui Generis Database Rights.
+
+Where the Licensed Rights include Sui Generis Database Rights that
+apply to Your use of the Licensed Material:
+
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
+ to extract, reuse, reproduce, and Share all or a substantial
+ portion of the contents of the database;
+
+ b. if You include all or a substantial portion of the database
+ contents in a database in which You have Sui Generis Database
+ Rights, then the database in which You have Sui Generis Database
+ Rights (but not its individual contents) is Adapted Material,
+
+ including for purposes of Section 3(b); and
+ c. You must comply with the conditions in Section 3(a) if You Share
+ all or a substantial portion of the contents of the database.
+
+For the avoidance of doubt, this Section 4 supplements and does not
+replace Your obligations under this Public License where the Licensed
+Rights include other Copyright and Similar Rights.
+
+
+Section 5 -- Disclaimer of Warranties and Limitation of Liability.
+
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
+
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
+
+ c. The disclaimer of warranties and limitation of liability provided
+ above shall be interpreted in a manner that, to the extent
+ possible, most closely approximates an absolute disclaimer and
+ waiver of all liability.
+
+
+Section 6 -- Term and Termination.
+
+ a. This Public License applies for the term of the Copyright and
+ Similar Rights licensed here. However, if You fail to comply with
+ this Public License, then Your rights under this Public License
+ terminate automatically.
+
+ b. Where Your right to use the Licensed Material has terminated under
+ Section 6(a), it reinstates:
+
+ 1. automatically as of the date the violation is cured, provided
+ it is cured within 30 days of Your discovery of the
+ violation; or
+
+ 2. upon express reinstatement by the Licensor.
+
+ For the avoidance of doubt, this Section 6(b) does not affect any
+ right the Licensor may have to seek remedies for Your violations
+ of this Public License.
+
+ c. For the avoidance of doubt, the Licensor may also offer the
+ Licensed Material under separate terms or conditions or stop
+ distributing the Licensed Material at any time; however, doing so
+ will not terminate this Public License.
+
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
+ License.
+
+
+Section 7 -- Other Terms and Conditions.
+
+ a. The Licensor shall not be bound by any additional or different
+ terms or conditions communicated by You unless expressly agreed.
+
+ b. Any arrangements, understandings, or agreements regarding the
+ Licensed Material not stated herein are separate from and
+ independent of the terms and conditions of this Public License.
+
+
+Section 8 -- Interpretation.
+
+ a. For the avoidance of doubt, this Public License does not, and
+ shall not be interpreted to, reduce, limit, restrict, or impose
+ conditions on any use of the Licensed Material that could lawfully
+ be made without permission under this Public License.
+
+ b. To the extent possible, if any provision of this Public License is
+ deemed unenforceable, it shall be automatically reformed to the
+ minimum extent necessary to make it enforceable. If the provision
+ cannot be reformed, it shall be severed from this Public License
+ without affecting the enforceability of the remaining terms and
+ conditions.
+
+ c. No term or condition of this Public License will be waived and no
+ failure to comply consented to unless expressly agreed to by the
+ Licensor.
+
+ d. Nothing in this Public License constitutes or may be interpreted
+ as a limitation upon, or waiver of, any privileges and immunities
+ that apply to the Licensor or You, including from the legal
+ processes of any jurisdiction or authority.
+
+
+=======================================================================
+
+Creative Commons is not a party to its public licenses.
+Notwithstanding, Creative Commons may elect to apply one of its public
+licenses to material it publishes and in those instances will be
+considered the "Licensor." Except for the limited purpose of indicating
+that material is shared under a Creative Commons public license or as
+otherwise permitted by the Creative Commons policies published at
+creativecommons.org/policies, Creative Commons does not authorize the
+use of the trademark "Creative Commons" or any other trademark or logo
+of Creative Commons without its prior written consent including,
+without limitation, in connection with any unauthorized modifications
+to any of its public licenses or any other arrangements,
+understandings, or agreements concerning use of licensed material. For
+the avoidance of doubt, this paragraph does not form part of the public
+licenses.
+
+Creative Commons may be contacted at creativecommons.org.
diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md
new file mode 100644
index 000000000..0f5a04092
--- /dev/null
+++ b/vendor/github.com/opencontainers/go-digest/README.md
@@ -0,0 +1,104 @@
+# go-digest
+
+[![GoDoc](https://godoc.org/github.com/opencontainers/go-digest?status.svg)](https://godoc.org/github.com/opencontainers/go-digest) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/go-digest)](https://goreportcard.com/report/github.com/opencontainers/go-digest) [![Build Status](https://travis-ci.org/opencontainers/go-digest.svg?branch=master)](https://travis-ci.org/opencontainers/go-digest)
+
+Common digest package used across the container ecosystem.
+
+Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) for more information.
+
+# What is a digest?
+
+A digest is just a hash.
+
+The most common use case for a digest is to create a content
+identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage)
+systems:
+
+```go
+id := digest.FromBytes([]byte("my content"))
+```
+
+In the example above, the id can be used to uniquely identify
+the byte slice "my content". This allows two disparate applications
+to agree on a verifiable identifier without having to trust one
+another.
+
+An identifying digest can be verified, as follows:
+
+```go
+if id != digest.FromBytes([]byte("my content")) {
+ return errors.New("the content has changed!")
+}
+```
+
+A `Verifier` type can be used to handle cases where an `io.Reader`
+makes more sense:
+
+```go
+rd := getContent()
+verifier := id.Verifier()
+io.Copy(verifier, rd)
+
+if !verifier.Verified() {
+ return errors.New("the content has changed!")
+}
+```
+
+Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this
+can power a rich, safe, content distribution system.
+
+# Usage
+
+While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is
+considered the best resource, a few important items need to be called
+out when using this package.
+
+1. Make sure to import the hash implementations into your application
+ or the package will panic. You should have something like the
+ following in the main (or other entrypoint) of your application:
+
+ ```go
+ import (
+ _ "crypto/sha256"
+ _ "crypto/sha512"
+ )
+ ```
+ This may seem inconvenient but it allows you replace the hash
+ implementations with others, such as https://github.com/stevvooe/resumable.
+
+2. Even though `digest.Digest` may be assemable as a string, _always_
+ verify your input with `digest.Parse` or use `Digest.Validate`
+ when accepting untrusted input. While there are measures to
+ avoid common problems, this will ensure you have valid digests
+ in the rest of your application.
+
+# Stability
+
+The Go API, at this stage, is considered stable, unless otherwise noted.
+
+As always, before using a package export, read the [godoc](https://godoc.org/github.com/opencontainers/go-digest).
+
+# Contributing
+
+This package is considered fairly complete. It has been in production
+in thousands (millions?) of deployments and is fairly battle-hardened.
+New additions will be met with skepticism. If you think there is a
+missing feature, please file a bug clearly describing the problem and
+the alternatives you tried before submitting a PR.
+
+# Reporting security issues
+
+Please DO NOT file a public issue, instead send your report privately to
+security@opencontainers.org.
+
+The maintainers take security seriously. If you discover a security issue,
+please bring it to their attention right away!
+
+If you are reporting a security issue, do not create an issue or file a pull
+request on GitHub. Instead, disclose the issue responsibly by sending an email
+to security@opencontainers.org (which is inhabited only by the maintainers of
+the various OCI projects).
+
+# Copyright and license
+
+Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE.code). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/.
diff --git a/vendor/github.com/opencontainers/go-digest/algorithm.go b/vendor/github.com/opencontainers/go-digest/algorithm.go
new file mode 100644
index 000000000..bdff42d92
--- /dev/null
+++ b/vendor/github.com/opencontainers/go-digest/algorithm.go
@@ -0,0 +1,158 @@
+// Copyright 2017 Docker, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package digest
+
+import (
+ "crypto"
+ "fmt"
+ "hash"
+ "io"
+)
+
+// Algorithm identifies and implementation of a digester by an identifier.
+// Note the that this defines both the hash algorithm used and the string
+// encoding.
+type Algorithm string
+
+// supported digest types
+const (
+ SHA256 Algorithm = "sha256" // sha256 with hex encoding
+ SHA384 Algorithm = "sha384" // sha384 with hex encoding
+ SHA512 Algorithm = "sha512" // sha512 with hex encoding
+
+ // Canonical is the primary digest algorithm used with the distribution
+ // project. Other digests may be used but this one is the primary storage
+ // digest.
+ Canonical = SHA256
+)
+
+var (
+ // TODO(stevvooe): Follow the pattern of the standard crypto package for
+ // registration of digests. Effectively, we are a registerable set and
+ // common symbol access.
+
+ // algorithms maps values to hash.Hash implementations. Other algorithms
+ // may be available but they cannot be calculated by the digest package.
+ algorithms = map[Algorithm]crypto.Hash{
+ SHA256: crypto.SHA256,
+ SHA384: crypto.SHA384,
+ SHA512: crypto.SHA512,
+ }
+)
+
+// Available returns true if the digest type is available for use. If this
+// returns false, Digester and Hash will return nil.
+func (a Algorithm) Available() bool {
+ h, ok := algorithms[a]
+ if !ok {
+ return false
+ }
+
+ // check availability of the hash, as well
+ return h.Available()
+}
+
+func (a Algorithm) String() string {
+ return string(a)
+}
+
+// Size returns number of bytes returned by the hash.
+func (a Algorithm) Size() int {
+ h, ok := algorithms[a]
+ if !ok {
+ return 0
+ }
+ return h.Size()
+}
+
+// Set implemented to allow use of Algorithm as a command line flag.
+func (a *Algorithm) Set(value string) error {
+ if value == "" {
+ *a = Canonical
+ } else {
+ // just do a type conversion, support is queried with Available.
+ *a = Algorithm(value)
+ }
+
+ if !a.Available() {
+ return ErrDigestUnsupported
+ }
+
+ return nil
+}
+
+// Digester returns a new digester for the specified algorithm. If the algorithm
+// does not have a digester implementation, nil will be returned. This can be
+// checked by calling Available before calling Digester.
+func (a Algorithm) Digester() Digester {
+ return &digester{
+ alg: a,
+ hash: a.Hash(),
+ }
+}
+
+// Hash returns a new hash as used by the algorithm. If not available, the
+// method will panic. Check Algorithm.Available() before calling.
+func (a Algorithm) Hash() hash.Hash {
+ if !a.Available() {
+ // Empty algorithm string is invalid
+ if a == "" {
+ panic(fmt.Sprintf("empty digest algorithm, validate before calling Algorithm.Hash()"))
+ }
+
+ // NOTE(stevvooe): A missing hash is usually a programming error that
+ // must be resolved at compile time. We don't import in the digest
+ // package to allow users to choose their hash implementation (such as
+ // when using stevvooe/resumable or a hardware accelerated package).
+ //
+ // Applications that may want to resolve the hash at runtime should
+ // call Algorithm.Available before call Algorithm.Hash().
+ panic(fmt.Sprintf("%v not available (make sure it is imported)", a))
+ }
+
+ return algorithms[a].New()
+}
+
+// FromReader returns the digest of the reader using the algorithm.
+func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
+ digester := a.Digester()
+
+ if _, err := io.Copy(digester.Hash(), rd); err != nil {
+ return "", err
+ }
+
+ return digester.Digest(), nil
+}
+
+// FromBytes digests the input and returns a Digest.
+func (a Algorithm) FromBytes(p []byte) Digest {
+ digester := a.Digester()
+
+ if _, err := digester.Hash().Write(p); err != nil {
+ // Writes to a Hash should never fail. None of the existing
+ // hash implementations in the stdlib or hashes vendored
+ // here can return errors from Write. Having a panic in this
+ // condition instead of having FromBytes return an error value
+ // avoids unnecessary error handling paths in all callers.
+ panic("write to hash function returned error: " + err.Error())
+ }
+
+ return digester.Digest()
+}
+
+// FromString digests the string input and returns a Digest.
+func (a Algorithm) FromString(s string) Digest {
+ return a.FromBytes([]byte(s))
+}
diff --git a/vendor/github.com/opencontainers/go-digest/digest.go b/vendor/github.com/opencontainers/go-digest/digest.go
new file mode 100644
index 000000000..69e1d2b54
--- /dev/null
+++ b/vendor/github.com/opencontainers/go-digest/digest.go
@@ -0,0 +1,154 @@
+// Copyright 2017 Docker, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package digest
+
+import (
+ "fmt"
+ "hash"
+ "io"
+ "regexp"
+ "strings"
+)
+
+// Digest allows simple protection of hex formatted digest strings, prefixed
+// by their algorithm. Strings of type Digest have some guarantee of being in
+// the correct format and it provides quick access to the components of a
+// digest string.
+//
+// The following is an example of the contents of Digest types:
+//
+// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
+//
+// This allows to abstract the digest behind this type and work only in those
+// terms.
+type Digest string
+
+// NewDigest returns a Digest from alg and a hash.Hash object.
+func NewDigest(alg Algorithm, h hash.Hash) Digest {
+ return NewDigestFromBytes(alg, h.Sum(nil))
+}
+
+// NewDigestFromBytes returns a new digest from the byte contents of p.
+// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...)
+// functions. This is also useful for rebuilding digests from binary
+// serializations.
+func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
+ return Digest(fmt.Sprintf("%s:%x", alg, p))
+}
+
+// NewDigestFromHex returns a Digest from alg and a the hex encoded digest.
+func NewDigestFromHex(alg, hex string) Digest {
+ return Digest(fmt.Sprintf("%s:%s", alg, hex))
+}
+
+// DigestRegexp matches valid digest types.
+var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`)
+
+// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
+var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
+
+var (
+ // ErrDigestInvalidFormat returned when digest format invalid.
+ ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format")
+
+ // ErrDigestInvalidLength returned when digest has invalid length.
+ ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length")
+
+ // ErrDigestUnsupported returned when the digest algorithm is unsupported.
+ ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
+)
+
+// Parse parses s and returns the validated digest object. An error will
+// be returned if the format is invalid.
+func Parse(s string) (Digest, error) {
+ d := Digest(s)
+ return d, d.Validate()
+}
+
+// FromReader consumes the content of rd until io.EOF, returning canonical digest.
+func FromReader(rd io.Reader) (Digest, error) {
+ return Canonical.FromReader(rd)
+}
+
+// FromBytes digests the input and returns a Digest.
+func FromBytes(p []byte) Digest {
+ return Canonical.FromBytes(p)
+}
+
+// FromString digests the input and returns a Digest.
+func FromString(s string) Digest {
+ return Canonical.FromString(s)
+}
+
+// Validate checks that the contents of d is a valid digest, returning an
+// error if not.
+func (d Digest) Validate() error {
+ s := string(d)
+
+ i := strings.Index(s, ":")
+
+ // validate i then run through regexp
+ if i < 0 || i+1 == len(s) || !DigestRegexpAnchored.MatchString(s) {
+ return ErrDigestInvalidFormat
+ }
+
+ algorithm := Algorithm(s[:i])
+ if !algorithm.Available() {
+ return ErrDigestUnsupported
+ }
+
+ // Digests much always be hex-encoded, ensuring that their hex portion will
+ // always be size*2
+ if algorithm.Size()*2 != len(s[i+1:]) {
+ return ErrDigestInvalidLength
+ }
+
+ return nil
+}
+
+// Algorithm returns the algorithm portion of the digest. This will panic if
+// the underlying digest is not in a valid format.
+func (d Digest) Algorithm() Algorithm {
+ return Algorithm(d[:d.sepIndex()])
+}
+
+// Verifier returns a writer object that can be used to verify a stream of
+// content against the digest. If the digest is invalid, the method will panic.
+func (d Digest) Verifier() Verifier {
+ return hashVerifier{
+ hash: d.Algorithm().Hash(),
+ digest: d,
+ }
+}
+
+// Hex returns the hex digest portion of the digest. This will panic if the
+// underlying digest is not in a valid format.
+func (d Digest) Hex() string {
+ return string(d[d.sepIndex()+1:])
+}
+
+func (d Digest) String() string {
+ return string(d)
+}
+
+func (d Digest) sepIndex() int {
+ i := strings.Index(string(d), ":")
+
+ if i < 0 {
+ panic(fmt.Sprintf("no ':' separator in digest %q", d))
+ }
+
+ return i
+}
diff --git a/vendor/github.com/opencontainers/go-digest/digester.go b/vendor/github.com/opencontainers/go-digest/digester.go
new file mode 100644
index 000000000..36fa2728e
--- /dev/null
+++ b/vendor/github.com/opencontainers/go-digest/digester.go
@@ -0,0 +1,39 @@
+// Copyright 2017 Docker, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package digest
+
+import "hash"
+
+// Digester calculates the digest of written data. Writes should go directly
+// to the return value of Hash, while calling Digest will return the current
+// value of the digest.
+type Digester interface {
+ Hash() hash.Hash // provides direct access to underlying hash instance.
+ Digest() Digest
+}
+
+// digester provides a simple digester definition that embeds a hasher.
+type digester struct {
+ alg Algorithm
+ hash hash.Hash
+}
+
+func (d *digester) Hash() hash.Hash {
+ return d.hash
+}
+
+func (d *digester) Digest() Digest {
+ return NewDigest(d.alg, d.hash)
+}
diff --git a/vendor/github.com/opencontainers/go-digest/doc.go b/vendor/github.com/opencontainers/go-digest/doc.go
new file mode 100644
index 000000000..491ea1ef1
--- /dev/null
+++ b/vendor/github.com/opencontainers/go-digest/doc.go
@@ -0,0 +1,56 @@
+// Copyright 2017 Docker, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package digest provides a generalized type to opaquely represent message
+// digests and their operations within the registry. The Digest type is
+// designed to serve as a flexible identifier in a content-addressable system.
+// More importantly, it provides tools and wrappers to work with
+// hash.Hash-based digests with little effort.
+//
+// Basics
+//
+// The format of a digest is simply a string with two parts, dubbed the
+// "algorithm" and the "digest", separated by a colon:
+//
+// <algorithm>:<digest>
+//
+// An example of a sha256 digest representation follows:
+//
+// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
+//
+// In this case, the string "sha256" is the algorithm and the hex bytes are
+// the "digest".
+//
+// Because the Digest type is simply a string, once a valid Digest is
+// obtained, comparisons are cheap, quick and simple to express with the
+// standard equality operator.
+//
+// Verification
+//
+// The main benefit of using the Digest type is simple verification against a
+// given digest. The Verifier interface, modeled after the stdlib hash.Hash
+// interface, provides a common write sink for digest verification. After
+// writing is complete, calling the Verifier.Verified method will indicate
+// whether or not the stream of bytes matches the target digest.
+//
+// Missing Features
+//
+// In addition to the above, we intend to add the following features to this
+// package:
+//
+// 1. A Digester type that supports write sink digest calculation.
+//
+// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry.
+//
+package digest
diff --git a/vendor/github.com/opencontainers/go-digest/verifiers.go b/vendor/github.com/opencontainers/go-digest/verifiers.go
new file mode 100644
index 000000000..32125e918
--- /dev/null
+++ b/vendor/github.com/opencontainers/go-digest/verifiers.go
@@ -0,0 +1,45 @@
+// Copyright 2017 Docker, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package digest
+
+import (
+ "hash"
+ "io"
+)
+
+// Verifier presents a general verification interface to be used with message
+// digests and other byte stream verifications. Users instantiate a Verifier
+// from one of the various methods, write the data under test to it then check
+// the result with the Verified method.
+type Verifier interface {
+ io.Writer
+
+ // Verified will return true if the content written to Verifier matches
+ // the digest.
+ Verified() bool
+}
+
+type hashVerifier struct {
+ digest Digest
+ hash hash.Hash
+}
+
+func (hv hashVerifier) Write(p []byte) (n int, err error) {
+ return hv.hash.Write(p)
+}
+
+func (hv hashVerifier) Verified() bool {
+ return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
+}
diff --git a/vendor/github.com/opencontainers/image-spec/LICENSE b/vendor/github.com/opencontainers/image-spec/LICENSE
new file mode 100644
index 000000000..9fdc20fdb
--- /dev/null
+++ b/vendor/github.com/opencontainers/image-spec/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2016 The Linux Foundation.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/opencontainers/image-spec/README.md b/vendor/github.com/opencontainers/image-spec/README.md
new file mode 100644
index 000000000..5ab5554e4
--- /dev/null
+++ b/vendor/github.com/opencontainers/image-spec/README.md
@@ -0,0 +1,167 @@
+# OCI Image Format Specification
+<div>
+<a href="https://travis-ci.org/opencontainers/image-spec">
+<img src="https://travis-ci.org/opencontainers/image-spec.svg?branch=master"></img>
+</a>
+</div>
+
+The OCI Image Format project creates and maintains the software shipping container image format spec (OCI Image Format).
+
+**[The specification can be found here](spec.md).**
+
+This repository also provides [Go types](specs-go), [intra-blob validation tooling, and JSON Schema](schema).
+The Go types and validation should be compatible with the current Go release; earlier Go releases are not supported.
+
+Additional documentation about how this group operates:
+
+- [Code of Conduct](https://github.com/opencontainers/tob/blob/d2f9d68c1332870e40693fe077d311e0742bc73d/code-of-conduct.md)
+- [Roadmap](#roadmap)
+- [Releases](RELEASES.md)
+- [Project Documentation](project.md)
+
+The _optional_ and _base_ layers of all OCI projects are tracked in the [OCI Scope Table](https://www.opencontainers.org/about/oci-scope-table).
+
+## Running an OCI Image
+
+The OCI Image Format partner project is the [OCI Runtime Spec project](https://github.com/opencontainers/runtime-spec).
+The Runtime Specification outlines how to run a "[filesystem bundle](https://github.com/opencontainers/runtime-spec/blob/master/bundle.md)" that is unpacked on disk.
+At a high-level an OCI implementation would download an OCI Image then unpack that image into an OCI Runtime filesystem bundle.
+At this point the OCI Runtime Bundle would be run by an OCI Runtime.
+
+This entire workflow supports the UX that users have come to expect from container engines like Docker and rkt: primarily, the ability to run an image with no additional arguments:
+
+* docker run example.com/org/app:v1.0.0
+* rkt run example.com/org/app,version=v1.0.0
+
+To support this UX the OCI Image Format contains sufficient information to launch the application on the target platform (e.g. command, arguments, environment variables, etc).
+
+## FAQ
+
+**Q: Why doesn't this project mention distribution?**
+
+A: Distribution, for example using HTTP as both Docker v2.2 and AppC do today, is currently out of scope on the [OCI Scope Table](https://www.opencontainers.org/about/oci-scope-table).
+There has been [some discussion on the TOB mailing list](https://groups.google.com/a/opencontainers.org/d/msg/tob/A3JnmI-D-6Y/tLuptPDHAgAJ) to make distribution an optional layer, but this topic is a work in progress.
+
+**Q: What happens to AppC or Docker Image Formats?**
+
+A: Existing formats can continue to be a proving ground for technologies, as needed.
+The OCI Image Format project strives to provide a dependable open specification that can be shared between different tools and be evolved for years or decades of compatibility; as the deb and rpm format have.
+
+Find more [FAQ on the OCI site](https://www.opencontainers.org/faq).
+
+## Roadmap
+
+The [GitHub milestones](https://github.com/opencontainers/image-spec/milestones) lay out the path to the OCI v1.0.0 release in late 2016.
+
+# Contributing
+
+Development happens on GitHub for the spec.
+Issues are used for bugs and actionable items and longer discussions can happen on the [mailing list](#mailing-list).
+
+The specification and code is licensed under the Apache 2.0 license found in the `LICENSE` file of this repository.
+
+## Discuss your design
+
+The project welcomes submissions, but please let everyone know what you are working on.
+
+Before undertaking a nontrivial change to this specification, send mail to the [mailing list](#mailing-list) to discuss what you plan to do.
+This gives everyone a chance to validate the design, helps prevent duplication of effort, and ensures that the idea fits.
+It also guarantees that the design is sound before code is written; a GitHub pull-request is not the place for high-level discussions.
+
+Typos and grammatical errors can go straight to a pull-request.
+When in doubt, start on the [mailing-list](#mailing-list).
+
+## Weekly Call
+
+The contributors and maintainers of all OCI projects have a weekly meeting Wednesdays at 2:00 PM (USA Pacific).
+Everyone is welcome to participate via [UberConference web][UberConference] or audio-only: +1-415-968-0849 (no PIN needed).
+An initial agenda will be posted to the [mailing list](#mailing-list) earlier in the week, and everyone is welcome to propose additional topics or suggest other agenda alterations there.
+Minutes are posted to the [mailing list](#mailing-list) and minutes from past calls are archived [here][minutes].
+
+## Mailing List
+
+You can subscribe and join the mailing list on [Google Groups](https://groups.google.com/a/opencontainers.org/forum/#!forum/dev).
+
+## IRC
+
+OCI discussion happens on #opencontainers on Freenode ([logs][irc-logs]).
+
+## Markdown style
+
+To keep consistency throughout the Markdown files in the Open Container spec all files should be formatted one sentence per line.
+This fixes two things: it makes diffing easier with git and it resolves fights about line wrapping length.
+For example, this paragraph will span three lines in the Markdown source.
+
+## Git commit
+
+### Sign your work
+
+The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch.
+The rules are pretty simple: if you can certify the below (from [developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith <joe@gmail.com>
+
+using your real name (sorry, no pseudonyms or anonymous contributions.)
+
+You can add the sign off when creating the git commit via `git commit -s`.
+
+### Commit Style
+
+Simple house-keeping for clean git history.
+Read more on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/) or the Discussion section of [`git-commit(1)`](http://git-scm.com/docs/git-commit).
+
+1. Separate the subject from body with a blank line
+2. Limit the subject line to 50 characters
+3. Capitalize the subject line
+4. Do not end the subject line with a period
+5. Use the imperative mood in the subject line
+6. Wrap the body at 72 characters
+7. Use the body to explain what and why vs. how
+ * If there was important/useful/essential conversation or information, copy or include a reference
+8. When possible, one keyword to scope the change in the subject (i.e. "README: ...", "runtime: ...")
+
+
+[UberConference]: https://www.uberconference.com/opencontainers
+[irc-logs]: http://ircbot.wl.linuxfoundation.org/eavesdrop/%23opencontainers/
+[minutes]: http://ircbot.wl.linuxfoundation.org/meetings/opencontainers/
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go
new file mode 100644
index 000000000..35d810895
--- /dev/null
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go
@@ -0,0 +1,56 @@
+// Copyright 2016 The Linux Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+const (
+ // AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339).
+ AnnotationCreated = "org.opencontainers.image.created"
+
+ // AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string).
+ AnnotationAuthors = "org.opencontainers.image.authors"
+
+ // AnnotationURL is the annotation key for the URL to find more information on the image.
+ AnnotationURL = "org.opencontainers.image.url"
+
+ // AnnotationDocumentation is the annotation key for the URL to get documentation on the image.
+ AnnotationDocumentation = "org.opencontainers.image.documentation"
+
+ // AnnotationSource is the annotation key for the URL to get source code for building the image.
+ AnnotationSource = "org.opencontainers.image.source"
+
+ // AnnotationVersion is the annotation key for the version of the packaged software.
+ // The version MAY match a label or tag in the source code repository.
+ // The version MAY be Semantic versioning-compatible.
+ AnnotationVersion = "org.opencontainers.image.version"
+
+ // AnnotationRevision is the annotation key for the source control revision identifier for the packaged software.
+ AnnotationRevision = "org.opencontainers.image.revision"
+
+ // AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual.
+ AnnotationVendor = "org.opencontainers.image.vendor"
+
+ // AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression.
+ AnnotationLicenses = "org.opencontainers.image.licenses"
+
+ // AnnotationRefName is the annotation key for the name of the reference for a target.
+ // SHOULD only be considered valid when on descriptors on `index.json` within image layout.
+ AnnotationRefName = "org.opencontainers.image.ref.name"
+
+ // AnnotationTitle is the annotation key for the human-readable title of the image.
+ AnnotationTitle = "org.opencontainers.image.title"
+
+ // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image.
+ AnnotationDescription = "org.opencontainers.image.description"
+)
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go
new file mode 100644
index 000000000..fe799bd69
--- /dev/null
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go
@@ -0,0 +1,103 @@
+// Copyright 2016 The Linux Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "time"
+
+ digest "github.com/opencontainers/go-digest"
+)
+
+// ImageConfig defines the execution parameters which should be used as a base when running a container using an image.
+type ImageConfig struct {
+ // User defines the username or UID which the process in the container should run as.
+ User string `json:"User,omitempty"`
+
+ // ExposedPorts a set of ports to expose from a container running this image.
+ ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"`
+
+ // Env is a list of environment variables to be used in a container.
+ Env []string `json:"Env,omitempty"`
+
+ // Entrypoint defines a list of arguments to use as the command to execute when the container starts.
+ Entrypoint []string `json:"Entrypoint,omitempty"`
+
+ // Cmd defines the default arguments to the entrypoint of the container.
+ Cmd []string `json:"Cmd,omitempty"`
+
+ // Volumes is a set of directories describing where the process is likely write data specific to a container instance.
+ Volumes map[string]struct{} `json:"Volumes,omitempty"`
+
+ // WorkingDir sets the current working directory of the entrypoint process in the container.
+ WorkingDir string `json:"WorkingDir,omitempty"`
+
+ // Labels contains arbitrary metadata for the container.
+ Labels map[string]string `json:"Labels,omitempty"`
+
+ // StopSignal contains the system call signal that will be sent to the container to exit.
+ StopSignal string `json:"StopSignal,omitempty"`
+}
+
+// RootFS describes a layer content addresses
+type RootFS struct {
+ // Type is the type of the rootfs.
+ Type string `json:"type"`
+
+ // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most.
+ DiffIDs []digest.Digest `json:"diff_ids"`
+}
+
+// History describes the history of a layer.
+type History struct {
+ // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6.
+ Created *time.Time `json:"created,omitempty"`
+
+ // CreatedBy is the command which created the layer.
+ CreatedBy string `json:"created_by,omitempty"`
+
+ // Author is the author of the build point.
+ Author string `json:"author,omitempty"`
+
+ // Comment is a custom message set when creating the layer.
+ Comment string `json:"comment,omitempty"`
+
+ // EmptyLayer is used to mark if the history item created a filesystem diff.
+ EmptyLayer bool `json:"empty_layer,omitempty"`
+}
+
+// Image is the JSON structure which describes some basic information about the image.
+// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON.
+type Image struct {
+ // Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6.
+ Created *time.Time `json:"created,omitempty"`
+
+ // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image.
+ Author string `json:"author,omitempty"`
+
+ // Architecture is the CPU architecture which the binaries in this image are built to run on.
+ Architecture string `json:"architecture"`
+
+ // OS is the name of the operating system which the image is built to run on.
+ OS string `json:"os"`
+
+ // Config defines the execution parameters which should be used as a base when running a container using the image.
+ Config ImageConfig `json:"config,omitempty"`
+
+ // RootFS references the layer content addresses used by the image.
+ RootFS RootFS `json:"rootfs"`
+
+ // History describes the history of each layer.
+ History []History `json:"history,omitempty"`
+}
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go
new file mode 100644
index 000000000..6e442a085
--- /dev/null
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go
@@ -0,0 +1,64 @@
+// Copyright 2016 The Linux Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import digest "github.com/opencontainers/go-digest"
+
+// Descriptor describes the disposition of targeted content.
+// This structure provides `application/vnd.oci.descriptor.v1+json` mediatype
+// when marshalled to JSON.
+type Descriptor struct {
+ // MediaType is the media type of the object this schema refers to.
+ MediaType string `json:"mediaType,omitempty"`
+
+ // Digest is the digest of the targeted content.
+ Digest digest.Digest `json:"digest"`
+
+ // Size specifies the size in bytes of the blob.
+ Size int64 `json:"size"`
+
+ // URLs specifies a list of URLs from which this object MAY be downloaded
+ URLs []string `json:"urls,omitempty"`
+
+ // Annotations contains arbitrary metadata relating to the targeted content.
+ Annotations map[string]string `json:"annotations,omitempty"`
+
+ // Platform describes the platform which the image in the manifest runs on.
+ //
+ // This should only be used when referring to a manifest.
+ Platform *Platform `json:"platform,omitempty"`
+}
+
+// Platform describes the platform which the image in the manifest runs on.
+type Platform struct {
+ // Architecture field specifies the CPU architecture, for example
+ // `amd64` or `ppc64`.
+ Architecture string `json:"architecture"`
+
+ // OS specifies the operating system, for example `linux` or `windows`.
+ OS string `json:"os"`
+
+ // OSVersion is an optional field specifying the operating system
+ // version, for example on Windows `10.0.14393.1066`.
+ OSVersion string `json:"os.version,omitempty"`
+
+ // OSFeatures is an optional field specifying an array of strings,
+ // each listing a required OS feature (for example on Windows `win32k`).
+ OSFeatures []string `json:"os.features,omitempty"`
+
+ // Variant is an optional field specifying a variant of the CPU, for
+ // example `v7` to specify ARMv7 when architecture is `arm`.
+ Variant string `json:"variant,omitempty"`
+}
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go
new file mode 100644
index 000000000..4e6c4b236
--- /dev/null
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The Linux Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import "github.com/opencontainers/image-spec/specs-go"
+
+// Index references manifests for various platforms.
+// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON.
+type Index struct {
+ specs.Versioned
+
+ // Manifests references platform specific manifests.
+ Manifests []Descriptor `json:"manifests"`
+
+ // Annotations contains arbitrary metadata for the image index.
+ Annotations map[string]string `json:"annotations,omitempty"`
+}
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go
new file mode 100644
index 000000000..fc79e9e0d
--- /dev/null
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go
@@ -0,0 +1,28 @@
+// Copyright 2016 The Linux Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+const (
+ // ImageLayoutFile is the file name of oci image layout file
+ ImageLayoutFile = "oci-layout"
+ // ImageLayoutVersion is the version of ImageLayout
+ ImageLayoutVersion = "1.0.0"
+)
+
+// ImageLayout is the structure in the "oci-layout" file, found in the root
+// of an OCI Image-layout directory.
+type ImageLayout struct {
+ Version string `json:"imageLayoutVersion"`
+}
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go
new file mode 100644
index 000000000..7ff32c40b
--- /dev/null
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go
@@ -0,0 +1,32 @@
+// Copyright 2016 The Linux Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import "github.com/opencontainers/image-spec/specs-go"
+
+// Manifest provides `application/vnd.oci.image.manifest.v1+json` mediatype structure when marshalled to JSON.
+type Manifest struct {
+ specs.Versioned
+
+ // Config references a configuration object for a container, by digest.
+ // The referenced configuration object is a JSON blob that the runtime uses to set up the container.
+ Config Descriptor `json:"config"`
+
+ // Layers is an indexed list of layers referenced by the manifest.
+ Layers []Descriptor `json:"layers"`
+
+ // Annotations contains arbitrary metadata for the image manifest.
+ Annotations map[string]string `json:"annotations,omitempty"`
+}
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go
new file mode 100644
index 000000000..bad7bb97f
--- /dev/null
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go
@@ -0,0 +1,48 @@
+// Copyright 2016 The Linux Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+const (
+ // MediaTypeDescriptor specifies the media type for a content descriptor.
+ MediaTypeDescriptor = "application/vnd.oci.descriptor.v1+json"
+
+ // MediaTypeLayoutHeader specifies the media type for the oci-layout.
+ MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json"
+
+ // MediaTypeImageManifest specifies the media type for an image manifest.
+ MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json"
+
+ // MediaTypeImageIndex specifies the media type for an image index.
+ MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json"
+
+ // MediaTypeImageLayer is the media type used for layers referenced by the manifest.
+ MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar"
+
+ // MediaTypeImageLayerGzip is the media type used for gzipped layers
+ // referenced by the manifest.
+ MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip"
+
+ // MediaTypeImageLayerNonDistributable is the media type for layers referenced by
+ // the manifest but with distribution restrictions.
+ MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar"
+
+ // MediaTypeImageLayerNonDistributableGzip is the media type for
+ // gzipped layers referenced by the manifest but with distribution
+ // restrictions.
+ MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip"
+
+ // MediaTypeImageConfig specifies the media type for the image configuration.
+ MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json"
+)
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go
new file mode 100644
index 000000000..e3eee29b4
--- /dev/null
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go
@@ -0,0 +1,32 @@
+// Copyright 2016 The Linux Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package specs
+
+import "fmt"
+
+const (
+ // VersionMajor is for an API incompatible changes
+ VersionMajor = 1
+ // VersionMinor is for functionality in a backwards-compatible manner
+ VersionMinor = 0
+ // VersionPatch is for backwards-compatible bug fixes
+ VersionPatch = 0
+
+ // VersionDev indicates development branch. Releases will be empty string.
+ VersionDev = ""
+)
+
+// Version is the specification version that the package types support.
+var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev)
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go
new file mode 100644
index 000000000..58a1510f3
--- /dev/null
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go
@@ -0,0 +1,23 @@
+// Copyright 2016 The Linux Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package specs
+
+// Versioned provides a struct with the manifest schemaVersion and mediaType.
+// Incoming content with unknown schema version can be decoded against this
+// struct to check the version.
+type Versioned struct {
+ // SchemaVersion is the image manifest schema that this image follows
+ SchemaVersion int `json:"schemaVersion"`
+}
diff --git a/vendor/github.com/opencontainers/runc/LICENSE b/vendor/github.com/opencontainers/runc/LICENSE
new file mode 100644
index 000000000..27448585a
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/opencontainers/runc/NOTICE b/vendor/github.com/opencontainers/runc/NOTICE
new file mode 100644
index 000000000..5c97abce4
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/NOTICE
@@ -0,0 +1,17 @@
+runc
+
+Copyright 2012-2015 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (http://www.docker.com).
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see http://www.bis.doc.gov
+
+See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/vendor/github.com/opencontainers/runc/README.md b/vendor/github.com/opencontainers/runc/README.md
new file mode 100644
index 000000000..eabfb982b
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/README.md
@@ -0,0 +1,244 @@
+# runc
+
+[![Build Status](https://travis-ci.org/opencontainers/runc.svg?branch=master)](https://travis-ci.org/opencontainers/runc)
+[![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/runc)](https://goreportcard.com/report/github.com/opencontainers/runc)
+[![GoDoc](https://godoc.org/github.com/opencontainers/runc?status.svg)](https://godoc.org/github.com/opencontainers/runc)
+
+## Introduction
+
+`runc` is a CLI tool for spawning and running containers according to the OCI specification.
+
+## Releases
+
+`runc` depends on and tracks the [runtime-spec](https://github.com/opencontainers/runtime-spec) repository.
+We will try to make sure that `runc` and the OCI specification major versions stay in lockstep.
+This means that `runc` 1.0.0 should implement the 1.0 version of the specification.
+
+You can find official releases of `runc` on the [release](https://github.com/opencontainers/runc/releases) page.
+
+### Security
+
+If you wish to report a security issue, please disclose the issue responsibly
+to security@opencontainers.org.
+
+## Building
+
+`runc` currently supports the Linux platform with various architecture support.
+It must be built with Go version 1.6 or higher in order for some features to function properly.
+
+In order to enable seccomp support you will need to install `libseccomp` on your platform.
+> e.g. `libseccomp-devel` for CentOS, or `libseccomp-dev` for Ubuntu
+
+Otherwise, if you do not want to build `runc` with seccomp support you can add `BUILDTAGS=""` when running make.
+
+```bash
+# create a 'github.com/opencontainers' in your GOPATH/src
+cd github.com/opencontainers
+git clone https://github.com/opencontainers/runc
+cd runc
+
+make
+sudo make install
+```
+
+`runc` will be installed to `/usr/local/sbin/runc` on your system.
+
+#### Build Tags
+
+`runc` supports optional build tags for compiling support of various features.
+To add build tags to the make option the `BUILDTAGS` variable must be set.
+
+```bash
+make BUILDTAGS='seccomp apparmor'
+```
+
+| Build Tag | Feature | Dependency |
+|-----------|------------------------------------|-------------|
+| seccomp | Syscall filtering | libseccomp |
+| selinux | selinux process and mount labeling | <none> |
+| apparmor | apparmor profile support | libapparmor |
+| ambient | ambient capability support | kernel 4.3 |
+
+
+### Running the test suite
+
+`runc` currently supports running its test suite via Docker.
+To run the suite just type `make test`.
+
+```bash
+make test
+```
+
+There are additional make targets for running the tests outside of a container but this is not recommended as the tests are written with the expectation that they can write and remove anywhere.
+
+You can run a specific test case by setting the `TESTFLAGS` variable.
+
+```bash
+# make test TESTFLAGS="-run=SomeTestFunction"
+```
+
+### Dependencies Management
+
+`runc` uses [vndr](https://github.com/LK4D4/vndr) for dependencies management.
+Please refer to [vndr](https://github.com/LK4D4/vndr) for how to add or update
+new dependencies.
+
+## Using runc
+
+### Creating an OCI Bundle
+
+In order to use runc you must have your container in the format of an OCI bundle.
+If you have Docker installed you can use its `export` method to acquire a root filesystem from an existing Docker container.
+
+```bash
+# create the top most bundle directory
+mkdir /mycontainer
+cd /mycontainer
+
+# create the rootfs directory
+mkdir rootfs
+
+# export busybox via Docker into the rootfs directory
+docker export $(docker create busybox) | tar -C rootfs -xvf -
+```
+
+After a root filesystem is populated you just generate a spec in the format of a `config.json` file inside your bundle.
+`runc` provides a `spec` command to generate a base template spec that you are then able to edit.
+To find features and documentation for fields in the spec please refer to the [specs](https://github.com/opencontainers/runtime-spec) repository.
+
+```bash
+runc spec
+```
+
+### Running Containers
+
+Assuming you have an OCI bundle from the previous step you can execute the container in two different ways.
+
+The first way is to use the convenience command `run` that will handle creating, starting, and deleting the container after it exits.
+
+```bash
+# run as root
+cd /mycontainer
+runc run mycontainerid
+```
+
+If you used the unmodified `runc spec` template this should give you a `sh` session inside the container.
+
+The second way to start a container is using the specs lifecycle operations.
+This gives you more power over how the container is created and managed while it is running.
+This will also launch the container in the background so you will have to edit the `config.json` to remove the `terminal` setting for the simple examples here.
+Your process field in the `config.json` should look like this below with `"terminal": false` and `"args": ["sleep", "5"]`.
+
+
+```json
+ "process": {
+ "terminal": false,
+ "user": {
+ "uid": 0,
+ "gid": 0
+ },
+ "args": [
+ "sleep", "5"
+ ],
+ "env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "TERM=xterm"
+ ],
+ "cwd": "/",
+ "capabilities": {
+ "bounding": [
+ "CAP_AUDIT_WRITE",
+ "CAP_KILL",
+ "CAP_NET_BIND_SERVICE"
+ ],
+ "effective": [
+ "CAP_AUDIT_WRITE",
+ "CAP_KILL",
+ "CAP_NET_BIND_SERVICE"
+ ],
+ "inheritable": [
+ "CAP_AUDIT_WRITE",
+ "CAP_KILL",
+ "CAP_NET_BIND_SERVICE"
+ ],
+ "permitted": [
+ "CAP_AUDIT_WRITE",
+ "CAP_KILL",
+ "CAP_NET_BIND_SERVICE"
+ ],
+ "ambient": [
+ "CAP_AUDIT_WRITE",
+ "CAP_KILL",
+ "CAP_NET_BIND_SERVICE"
+ ]
+ },
+ "rlimits": [
+ {
+ "type": "RLIMIT_NOFILE",
+ "hard": 1024,
+ "soft": 1024
+ }
+ ],
+ "noNewPrivileges": true
+ },
+```
+
+Now we can go through the lifecycle operations in your shell.
+
+
+```bash
+# run as root
+cd /mycontainer
+runc create mycontainerid
+
+# view the container is created and in the "created" state
+runc list
+
+# start the process inside the container
+runc start mycontainerid
+
+# after 5 seconds view that the container has exited and is now in the stopped state
+runc list
+
+# now delete the container
+runc delete mycontainerid
+```
+
+This adds more complexity but allows higher level systems to manage runc and provides points in the containers creation to setup various settings after the container has created and/or before it is deleted.
+This is commonly used to setup the container's network stack after `create` but before `start` where the user's defined process will be running.
+
+#### Rootless containers
+`runc` has the ability to run containers without root privileges. This is called `rootless`. You need to pass some parameters to `runc` in order to run rootless containers. See below and compare with the previous version. Run the following commands as an ordinary user:
+```bash
+# Same as the first example
+mkdir ~/mycontainer
+cd ~/mycontainer
+mkdir rootfs
+docker export $(docker create busybox) | tar -C rootfs -xvf -
+
+# The --rootless parameter instructs runc spec to generate a configuration for a rootless container, which will allow you to run the container as a non-root user.
+runc spec --rootless
+
+# The --root parameter tells runc where to store the container state. It must be writable by the user.
+runc --root /tmp/runc run mycontainerid
+```
+
+#### Supervisors
+
+`runc` can be used with process supervisors and init systems to ensure that containers are restarted when they exit.
+An example systemd unit file looks something like this.
+
+```systemd
+[Unit]
+Description=Start My Container
+
+[Service]
+Type=forking
+ExecStart=/usr/local/sbin/runc run -d --pid-file /run/mycontainerid.pid mycontainerid
+ExecStopPost=/usr/local/sbin/runc delete mycontainerid
+WorkingDirectory=/mycontainer
+PIDFile=/run/mycontainerid.pid
+
+[Install]
+WantedBy=multi-user.target
+```
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/README.md b/vendor/github.com/opencontainers/runc/libcontainer/README.md
new file mode 100644
index 000000000..42f3efe56
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/README.md
@@ -0,0 +1,328 @@
+# libcontainer
+
+[![GoDoc](https://godoc.org/github.com/opencontainers/runc/libcontainer?status.svg)](https://godoc.org/github.com/opencontainers/runc/libcontainer)
+
+Libcontainer provides a native Go implementation for creating containers
+with namespaces, cgroups, capabilities, and filesystem access controls.
+It allows you to manage the lifecycle of the container performing additional operations
+after the container is created.
+
+
+#### Container
+A container is a self contained execution environment that shares the kernel of the
+host system and which is (optionally) isolated from other containers in the system.
+
+#### Using libcontainer
+
+Because containers are spawned in a two step process you will need a binary that
+will be executed as the init process for the container. In libcontainer, we use
+the current binary (/proc/self/exe) to be executed as the init process, and use
+arg "init", we call the first step process "bootstrap", so you always need a "init"
+function as the entry of "bootstrap".
+
+In addition to the go init function the early stage bootstrap is handled by importing
+[nsenter](https://github.com/opencontainers/runc/blob/master/libcontainer/nsenter/README.md).
+
+```go
+import (
+ _ "github.com/opencontainers/runc/libcontainer/nsenter"
+)
+
+func init() {
+ if len(os.Args) > 1 && os.Args[1] == "init" {
+ runtime.GOMAXPROCS(1)
+ runtime.LockOSThread()
+ factory, _ := libcontainer.New("")
+ if err := factory.StartInitialization(); err != nil {
+ logrus.Fatal(err)
+ }
+ panic("--this line should have never been executed, congratulations--")
+ }
+}
+```
+
+Then to create a container you first have to initialize an instance of a factory
+that will handle the creation and initialization for a container.
+
+```go
+factory, err := libcontainer.New("/var/lib/container", libcontainer.Cgroupfs, libcontainer.InitArgs(os.Args[0], "init"))
+if err != nil {
+ logrus.Fatal(err)
+ return
+}
+```
+
+Once you have an instance of the factory created we can create a configuration
+struct describing how the container is to be created. A sample would look similar to this:
+
+```go
+defaultMountFlags := unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_NODEV
+config := &configs.Config{
+ Rootfs: "/your/path/to/rootfs",
+ Capabilities: &configs.Capabilities{
+ Bounding: []string{
+ "CAP_CHOWN",
+ "CAP_DAC_OVERRIDE",
+ "CAP_FSETID",
+ "CAP_FOWNER",
+ "CAP_MKNOD",
+ "CAP_NET_RAW",
+ "CAP_SETGID",
+ "CAP_SETUID",
+ "CAP_SETFCAP",
+ "CAP_SETPCAP",
+ "CAP_NET_BIND_SERVICE",
+ "CAP_SYS_CHROOT",
+ "CAP_KILL",
+ "CAP_AUDIT_WRITE",
+ },
+ Effective: []string{
+ "CAP_CHOWN",
+ "CAP_DAC_OVERRIDE",
+ "CAP_FSETID",
+ "CAP_FOWNER",
+ "CAP_MKNOD",
+ "CAP_NET_RAW",
+ "CAP_SETGID",
+ "CAP_SETUID",
+ "CAP_SETFCAP",
+ "CAP_SETPCAP",
+ "CAP_NET_BIND_SERVICE",
+ "CAP_SYS_CHROOT",
+ "CAP_KILL",
+ "CAP_AUDIT_WRITE",
+ },
+ Inheritable: []string{
+ "CAP_CHOWN",
+ "CAP_DAC_OVERRIDE",
+ "CAP_FSETID",
+ "CAP_FOWNER",
+ "CAP_MKNOD",
+ "CAP_NET_RAW",
+ "CAP_SETGID",
+ "CAP_SETUID",
+ "CAP_SETFCAP",
+ "CAP_SETPCAP",
+ "CAP_NET_BIND_SERVICE",
+ "CAP_SYS_CHROOT",
+ "CAP_KILL",
+ "CAP_AUDIT_WRITE",
+ },
+ Permitted: []string{
+ "CAP_CHOWN",
+ "CAP_DAC_OVERRIDE",
+ "CAP_FSETID",
+ "CAP_FOWNER",
+ "CAP_MKNOD",
+ "CAP_NET_RAW",
+ "CAP_SETGID",
+ "CAP_SETUID",
+ "CAP_SETFCAP",
+ "CAP_SETPCAP",
+ "CAP_NET_BIND_SERVICE",
+ "CAP_SYS_CHROOT",
+ "CAP_KILL",
+ "CAP_AUDIT_WRITE",
+ },
+ Ambient: []string{
+ "CAP_CHOWN",
+ "CAP_DAC_OVERRIDE",
+ "CAP_FSETID",
+ "CAP_FOWNER",
+ "CAP_MKNOD",
+ "CAP_NET_RAW",
+ "CAP_SETGID",
+ "CAP_SETUID",
+ "CAP_SETFCAP",
+ "CAP_SETPCAP",
+ "CAP_NET_BIND_SERVICE",
+ "CAP_SYS_CHROOT",
+ "CAP_KILL",
+ "CAP_AUDIT_WRITE",
+ },
+ },
+ Namespaces: configs.Namespaces([]configs.Namespace{
+ {Type: configs.NEWNS},
+ {Type: configs.NEWUTS},
+ {Type: configs.NEWIPC},
+ {Type: configs.NEWPID},
+ {Type: configs.NEWUSER},
+ {Type: configs.NEWNET},
+ }),
+ Cgroups: &configs.Cgroup{
+ Name: "test-container",
+ Parent: "system",
+ Resources: &configs.Resources{
+ MemorySwappiness: nil,
+ AllowAllDevices: nil,
+ AllowedDevices: configs.DefaultAllowedDevices,
+ },
+ },
+ MaskPaths: []string{
+ "/proc/kcore",
+ "/sys/firmware",
+ },
+ ReadonlyPaths: []string{
+ "/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
+ },
+ Devices: configs.DefaultAutoCreatedDevices,
+ Hostname: "testing",
+ Mounts: []*configs.Mount{
+ {
+ Source: "proc",
+ Destination: "/proc",
+ Device: "proc",
+ Flags: defaultMountFlags,
+ },
+ {
+ Source: "tmpfs",
+ Destination: "/dev",
+ Device: "tmpfs",
+ Flags: unix.MS_NOSUID | unix.MS_STRICTATIME,
+ Data: "mode=755",
+ },
+ {
+ Source: "devpts",
+ Destination: "/dev/pts",
+ Device: "devpts",
+ Flags: unix.MS_NOSUID | unix.MS_NOEXEC,
+ Data: "newinstance,ptmxmode=0666,mode=0620,gid=5",
+ },
+ {
+ Device: "tmpfs",
+ Source: "shm",
+ Destination: "/dev/shm",
+ Data: "mode=1777,size=65536k",
+ Flags: defaultMountFlags,
+ },
+ {
+ Source: "mqueue",
+ Destination: "/dev/mqueue",
+ Device: "mqueue",
+ Flags: defaultMountFlags,
+ },
+ {
+ Source: "sysfs",
+ Destination: "/sys",
+ Device: "sysfs",
+ Flags: defaultMountFlags | unix.MS_RDONLY,
+ },
+ },
+ UidMappings: []configs.IDMap{
+ {
+ ContainerID: 0,
+ HostID: 1000,
+ Size: 65536,
+ },
+ },
+ GidMappings: []configs.IDMap{
+ {
+ ContainerID: 0,
+ HostID: 1000,
+ Size: 65536,
+ },
+ },
+ Networks: []*configs.Network{
+ {
+ Type: "loopback",
+ Address: "127.0.0.1/0",
+ Gateway: "localhost",
+ },
+ },
+ Rlimits: []configs.Rlimit{
+ {
+ Type: unix.RLIMIT_NOFILE,
+ Hard: uint64(1025),
+ Soft: uint64(1025),
+ },
+ },
+}
+```
+
+Once you have the configuration populated you can create a container:
+
+```go
+container, err := factory.Create("container-id", config)
+if err != nil {
+ logrus.Fatal(err)
+ return
+}
+```
+
+To spawn bash as the initial process inside the container and have the
+processes pid returned in order to wait, signal, or kill the process:
+
+```go
+process := &libcontainer.Process{
+ Args: []string{"/bin/bash"},
+ Env: []string{"PATH=/bin"},
+ User: "daemon",
+ Stdin: os.Stdin,
+ Stdout: os.Stdout,
+ Stderr: os.Stderr,
+}
+
+err := container.Run(process)
+if err != nil {
+ container.Destroy()
+ logrus.Fatal(err)
+ return
+}
+
+// wait for the process to finish.
+_, err := process.Wait()
+if err != nil {
+ logrus.Fatal(err)
+}
+
+// destroy the container.
+container.Destroy()
+```
+
+Additional ways to interact with a running container are:
+
+```go
+// return all the pids for all processes running inside the container.
+processes, err := container.Processes()
+
+// get detailed cpu, memory, io, and network statistics for the container and
+// it's processes.
+stats, err := container.Stats()
+
+// pause all processes inside the container.
+container.Pause()
+
+// resume all paused processes.
+container.Resume()
+
+// send signal to container's init process.
+container.Signal(signal)
+
+// update container resource constraints.
+container.Set(config)
+
+// get current status of the container.
+status, err := container.Status()
+
+// get current container's state information.
+state, err := container.State()
+```
+
+
+#### Checkpoint & Restore
+
+libcontainer now integrates [CRIU](http://criu.org/) for checkpointing and restoring containers.
+This let's you save the state of a process running inside a container to disk, and then restore
+that state into a new process, on the same machine or on another machine.
+
+`criu` version 1.5.2 or higher is required to use checkpoint and restore.
+If you don't already have `criu` installed, you can build it from source, following the
+[online instructions](http://criu.org/Installation). `criu` is also installed in the docker image
+generated when building libcontainer with docker.
+
+
+## Copyright and license
+
+Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
+Docs released under Creative commons.
+
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go
new file mode 100644
index 000000000..82ed1a68a
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go
@@ -0,0 +1,39 @@
+// +build apparmor,linux
+
+package apparmor
+
+// #cgo LDFLAGS: -lapparmor
+// #include <sys/apparmor.h>
+// #include <stdlib.h>
+import "C"
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "unsafe"
+)
+
+// IsEnabled returns true if apparmor is enabled for the host.
+func IsEnabled() bool {
+ if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil && os.Getenv("container") == "" {
+ if _, err = os.Stat("/sbin/apparmor_parser"); err == nil {
+ buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled")
+ return err == nil && len(buf) > 1 && buf[0] == 'Y'
+ }
+ }
+ return false
+}
+
+// ApplyProfile will apply the profile with the specified name to the process after
+// the next exec.
+func ApplyProfile(name string) error {
+ if name == "" {
+ return nil
+ }
+ cName := C.CString(name)
+ defer C.free(unsafe.Pointer(cName))
+ if _, err := C.aa_change_onexec(cName); err != nil {
+ return fmt.Errorf("apparmor failed to apply profile: %s", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_disabled.go b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_disabled.go
new file mode 100644
index 000000000..d4110cf0b
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_disabled.go
@@ -0,0 +1,20 @@
+// +build !apparmor !linux
+
+package apparmor
+
+import (
+ "errors"
+)
+
+var ErrApparmorNotEnabled = errors.New("apparmor: config provided but apparmor not supported")
+
+func IsEnabled() bool {
+ return false
+}
+
+func ApplyProfile(name string) error {
+ if name != "" {
+ return ErrApparmorNotEnabled
+ }
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go
new file mode 100644
index 000000000..8981b2a2f
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go
@@ -0,0 +1,114 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/syndtr/gocapability/capability"
+)
+
+const allCapabilityTypes = capability.CAPS | capability.BOUNDS | capability.AMBS
+
+var capabilityMap map[string]capability.Cap
+
+func init() {
+ capabilityMap = make(map[string]capability.Cap)
+ last := capability.CAP_LAST_CAP
+ // workaround for RHEL6 which has no /proc/sys/kernel/cap_last_cap
+ if last == capability.Cap(63) {
+ last = capability.CAP_BLOCK_SUSPEND
+ }
+ for _, cap := range capability.List() {
+ if cap > last {
+ continue
+ }
+ capKey := fmt.Sprintf("CAP_%s", strings.ToUpper(cap.String()))
+ capabilityMap[capKey] = cap
+ }
+}
+
+func newContainerCapList(capConfig *configs.Capabilities) (*containerCapabilities, error) {
+ bounding := []capability.Cap{}
+ for _, c := range capConfig.Bounding {
+ v, ok := capabilityMap[c]
+ if !ok {
+ return nil, fmt.Errorf("unknown capability %q", c)
+ }
+ bounding = append(bounding, v)
+ }
+ effective := []capability.Cap{}
+ for _, c := range capConfig.Effective {
+ v, ok := capabilityMap[c]
+ if !ok {
+ return nil, fmt.Errorf("unknown capability %q", c)
+ }
+ effective = append(effective, v)
+ }
+ inheritable := []capability.Cap{}
+ for _, c := range capConfig.Inheritable {
+ v, ok := capabilityMap[c]
+ if !ok {
+ return nil, fmt.Errorf("unknown capability %q", c)
+ }
+ inheritable = append(inheritable, v)
+ }
+ permitted := []capability.Cap{}
+ for _, c := range capConfig.Permitted {
+ v, ok := capabilityMap[c]
+ if !ok {
+ return nil, fmt.Errorf("unknown capability %q", c)
+ }
+ permitted = append(permitted, v)
+ }
+ ambient := []capability.Cap{}
+ for _, c := range capConfig.Ambient {
+ v, ok := capabilityMap[c]
+ if !ok {
+ return nil, fmt.Errorf("unknown capability %q", c)
+ }
+ ambient = append(ambient, v)
+ }
+ pid, err := capability.NewPid(os.Getpid())
+ if err != nil {
+ return nil, err
+ }
+ return &containerCapabilities{
+ bounding: bounding,
+ effective: effective,
+ inheritable: inheritable,
+ permitted: permitted,
+ ambient: ambient,
+ pid: pid,
+ }, nil
+}
+
+type containerCapabilities struct {
+ pid capability.Capabilities
+ bounding []capability.Cap
+ effective []capability.Cap
+ inheritable []capability.Cap
+ permitted []capability.Cap
+ ambient []capability.Cap
+}
+
+// ApplyBoundingSet sets the capability bounding set to those specified in the whitelist.
+func (c *containerCapabilities) ApplyBoundingSet() error {
+ c.pid.Clear(capability.BOUNDS)
+ c.pid.Set(capability.BOUNDS, c.bounding...)
+ return c.pid.Apply(capability.BOUNDS)
+}
+
+// Apply sets all the capabilities for the current process in the config.
+func (c *containerCapabilities) ApplyCaps() error {
+ c.pid.Clear(allCapabilityTypes)
+ c.pid.Set(capability.BOUNDS, c.bounding...)
+ c.pid.Set(capability.PERMITTED, c.permitted...)
+ c.pid.Set(capability.INHERITABLE, c.inheritable...)
+ c.pid.Set(capability.EFFECTIVE, c.effective...)
+ c.pid.Set(capability.AMBIENT, c.ambient...)
+ return c.pid.Apply(allCapabilityTypes)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go
new file mode 100644
index 000000000..25ff51589
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go
@@ -0,0 +1,64 @@
+// +build linux
+
+package cgroups
+
+import (
+ "fmt"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type Manager interface {
+ // Applies cgroup configuration to the process with the specified pid
+ Apply(pid int) error
+
+ // Returns the PIDs inside the cgroup set
+ GetPids() ([]int, error)
+
+ // Returns the PIDs inside the cgroup set & all sub-cgroups
+ GetAllPids() ([]int, error)
+
+ // Returns statistics for the cgroup set
+ GetStats() (*Stats, error)
+
+ // Toggles the freezer cgroup according with specified state
+ Freeze(state configs.FreezerState) error
+
+ // Destroys the cgroup set
+ Destroy() error
+
+ // The option func SystemdCgroups() and Cgroupfs() require following attributes:
+ // Paths map[string]string
+ // Cgroups *configs.Cgroup
+ // Paths maps cgroup subsystem to path at which it is mounted.
+ // Cgroups specifies specific cgroup settings for the various subsystems
+
+ // Returns cgroup paths to save in a state file and to be able to
+ // restore the object later.
+ GetPaths() map[string]string
+
+ // Sets the cgroup as configured.
+ Set(container *configs.Config) error
+}
+
+type NotFoundError struct {
+ Subsystem string
+}
+
+func (e *NotFoundError) Error() string {
+ return fmt.Sprintf("mountpoint for %s not found", e.Subsystem)
+}
+
+func NewNotFoundError(sub string) error {
+ return &NotFoundError{
+ Subsystem: sub,
+ }
+}
+
+func IsNotFound(err error) bool {
+ if err == nil {
+ return false
+ }
+ _, ok := err.(*NotFoundError)
+ return ok
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go
new file mode 100644
index 000000000..278d507e2
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go
@@ -0,0 +1,3 @@
+// +build !linux
+
+package cgroups
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go
new file mode 100644
index 000000000..22d82acb4
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go
@@ -0,0 +1,360 @@
+// +build linux
+
+package fs
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils"
+)
+
+var (
+ subsystems = subsystemSet{
+ &CpusetGroup{},
+ &DevicesGroup{},
+ &MemoryGroup{},
+ &CpuGroup{},
+ &CpuacctGroup{},
+ &PidsGroup{},
+ &BlkioGroup{},
+ &HugetlbGroup{},
+ &NetClsGroup{},
+ &NetPrioGroup{},
+ &PerfEventGroup{},
+ &FreezerGroup{},
+ &NameGroup{GroupName: "name=systemd", Join: true},
+ }
+ HugePageSizes, _ = cgroups.GetHugePageSize()
+)
+
+var errSubsystemDoesNotExist = errors.New("cgroup: subsystem does not exist")
+
+type subsystemSet []subsystem
+
+func (s subsystemSet) Get(name string) (subsystem, error) {
+ for _, ss := range s {
+ if ss.Name() == name {
+ return ss, nil
+ }
+ }
+ return nil, errSubsystemDoesNotExist
+}
+
+type subsystem interface {
+ // Name returns the name of the subsystem.
+ Name() string
+ // Returns the stats, as 'stats', corresponding to the cgroup under 'path'.
+ GetStats(path string, stats *cgroups.Stats) error
+ // Removes the cgroup represented by 'cgroupData'.
+ Remove(*cgroupData) error
+ // Creates and joins the cgroup represented by 'cgroupData'.
+ Apply(*cgroupData) error
+ // Set the cgroup represented by cgroup.
+ Set(path string, cgroup *configs.Cgroup) error
+}
+
+type Manager struct {
+ mu sync.Mutex
+ Cgroups *configs.Cgroup
+ Paths map[string]string
+}
+
+// The absolute path to the root of the cgroup hierarchies.
+var cgroupRootLock sync.Mutex
+var cgroupRoot string
+
+// Gets the cgroupRoot.
+func getCgroupRoot() (string, error) {
+ cgroupRootLock.Lock()
+ defer cgroupRootLock.Unlock()
+
+ if cgroupRoot != "" {
+ return cgroupRoot, nil
+ }
+
+ root, err := cgroups.FindCgroupMountpointDir()
+ if err != nil {
+ return "", err
+ }
+
+ if _, err := os.Stat(root); err != nil {
+ return "", err
+ }
+
+ cgroupRoot = root
+ return cgroupRoot, nil
+}
+
+type cgroupData struct {
+ root string
+ innerPath string
+ config *configs.Cgroup
+ pid int
+}
+
+func (m *Manager) Apply(pid int) (err error) {
+ if m.Cgroups == nil {
+ return nil
+ }
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ var c = m.Cgroups
+
+ d, err := getCgroupData(m.Cgroups, pid)
+ if err != nil {
+ return err
+ }
+
+ m.Paths = make(map[string]string)
+ if c.Paths != nil {
+ for name, path := range c.Paths {
+ _, err := d.path(name)
+ if err != nil {
+ if cgroups.IsNotFound(err) {
+ continue
+ }
+ return err
+ }
+ m.Paths[name] = path
+ }
+ return cgroups.EnterPid(m.Paths, pid)
+ }
+
+ for _, sys := range subsystems {
+ // TODO: Apply should, ideally, be reentrant or be broken up into a separate
+ // create and join phase so that the cgroup hierarchy for a container can be
+ // created then join consists of writing the process pids to cgroup.procs
+ p, err := d.path(sys.Name())
+ if err != nil {
+ // The non-presence of the devices subsystem is
+ // considered fatal for security reasons.
+ if cgroups.IsNotFound(err) && sys.Name() != "devices" {
+ continue
+ }
+ return err
+ }
+ m.Paths[sys.Name()] = p
+
+ if err := sys.Apply(d); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (m *Manager) Destroy() error {
+ if m.Cgroups.Paths != nil {
+ return nil
+ }
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if err := cgroups.RemovePaths(m.Paths); err != nil {
+ return err
+ }
+ m.Paths = make(map[string]string)
+ return nil
+}
+
+func (m *Manager) GetPaths() map[string]string {
+ m.mu.Lock()
+ paths := m.Paths
+ m.mu.Unlock()
+ return paths
+}
+
+func (m *Manager) GetStats() (*cgroups.Stats, error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ stats := cgroups.NewStats()
+ for name, path := range m.Paths {
+ sys, err := subsystems.Get(name)
+ if err == errSubsystemDoesNotExist || !cgroups.PathExists(path) {
+ continue
+ }
+ if err := sys.GetStats(path, stats); err != nil {
+ return nil, err
+ }
+ }
+ return stats, nil
+}
+
+func (m *Manager) Set(container *configs.Config) error {
+ // If Paths are set, then we are just joining cgroups paths
+ // and there is no need to set any values.
+ if m.Cgroups.Paths != nil {
+ return nil
+ }
+
+ paths := m.GetPaths()
+ for _, sys := range subsystems {
+ path := paths[sys.Name()]
+ if err := sys.Set(path, container.Cgroups); err != nil {
+ return err
+ }
+ }
+
+ if m.Paths["cpu"] != "" {
+ if err := CheckCpushares(m.Paths["cpu"], container.Cgroups.Resources.CpuShares); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Freeze toggles the container's freezer cgroup depending on the state
+// provided
+func (m *Manager) Freeze(state configs.FreezerState) error {
+ paths := m.GetPaths()
+ dir := paths["freezer"]
+ prevState := m.Cgroups.Resources.Freezer
+ m.Cgroups.Resources.Freezer = state
+ freezer, err := subsystems.Get("freezer")
+ if err != nil {
+ return err
+ }
+ err = freezer.Set(dir, m.Cgroups)
+ if err != nil {
+ m.Cgroups.Resources.Freezer = prevState
+ return err
+ }
+ return nil
+}
+
+func (m *Manager) GetPids() ([]int, error) {
+ paths := m.GetPaths()
+ return cgroups.GetPids(paths["devices"])
+}
+
+func (m *Manager) GetAllPids() ([]int, error) {
+ paths := m.GetPaths()
+ return cgroups.GetAllPids(paths["devices"])
+}
+
+func getCgroupData(c *configs.Cgroup, pid int) (*cgroupData, error) {
+ root, err := getCgroupRoot()
+ if err != nil {
+ return nil, err
+ }
+
+ if (c.Name != "" || c.Parent != "") && c.Path != "" {
+ return nil, fmt.Errorf("cgroup: either Path or Name and Parent should be used")
+ }
+
+ // XXX: Do not remove this code. Path safety is important! -- cyphar
+ cgPath := libcontainerUtils.CleanPath(c.Path)
+ cgParent := libcontainerUtils.CleanPath(c.Parent)
+ cgName := libcontainerUtils.CleanPath(c.Name)
+
+ innerPath := cgPath
+ if innerPath == "" {
+ innerPath = filepath.Join(cgParent, cgName)
+ }
+
+ return &cgroupData{
+ root: root,
+ innerPath: innerPath,
+ config: c,
+ pid: pid,
+ }, nil
+}
+
+func (raw *cgroupData) path(subsystem string) (string, error) {
+ mnt, err := cgroups.FindCgroupMountpoint(subsystem)
+ // If we didn't mount the subsystem, there is no point we make the path.
+ if err != nil {
+ return "", err
+ }
+
+ // If the cgroup name/path is absolute do not look relative to the cgroup of the init process.
+ if filepath.IsAbs(raw.innerPath) {
+ // Sometimes subsystems can be mounted together as 'cpu,cpuacct'.
+ return filepath.Join(raw.root, filepath.Base(mnt), raw.innerPath), nil
+ }
+
+ // Use GetOwnCgroupPath instead of GetInitCgroupPath, because the creating
+ // process could in container and shared pid namespace with host, and
+ // /proc/1/cgroup could point to whole other world of cgroups.
+ parentPath, err := cgroups.GetOwnCgroupPath(subsystem)
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(parentPath, raw.innerPath), nil
+}
+
+func (raw *cgroupData) join(subsystem string) (string, error) {
+ path, err := raw.path(subsystem)
+ if err != nil {
+ return "", err
+ }
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return "", err
+ }
+ if err := cgroups.WriteCgroupProc(path, raw.pid); err != nil {
+ return "", err
+ }
+ return path, nil
+}
+
+func writeFile(dir, file, data string) error {
+ // Normally dir should not be empty, one case is that cgroup subsystem
+ // is not mounted, we will get empty dir, and we want it fail here.
+ if dir == "" {
+ return fmt.Errorf("no such directory for %s", file)
+ }
+ if err := ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700); err != nil {
+ return fmt.Errorf("failed to write %v to %v: %v", data, file, err)
+ }
+ return nil
+}
+
+func readFile(dir, file string) (string, error) {
+ data, err := ioutil.ReadFile(filepath.Join(dir, file))
+ return string(data), err
+}
+
+func removePath(p string, err error) error {
+ if err != nil {
+ return err
+ }
+ if p != "" {
+ return os.RemoveAll(p)
+ }
+ return nil
+}
+
+func CheckCpushares(path string, c uint64) error {
+ var cpuShares uint64
+
+ if c == 0 {
+ return nil
+ }
+
+ fd, err := os.Open(filepath.Join(path, "cpu.shares"))
+ if err != nil {
+ return err
+ }
+ defer fd.Close()
+
+ _, err = fmt.Fscanf(fd, "%d", &cpuShares)
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ if c > cpuShares {
+ return fmt.Errorf("The maximum allowed cpu-shares is %d", cpuShares)
+ } else if c < cpuShares {
+ return fmt.Errorf("The minimum allowed cpu-shares is %d", cpuShares)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go
new file mode 100644
index 000000000..a142cb991
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go
@@ -0,0 +1,237 @@
+// +build linux
+
+package fs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type BlkioGroup struct {
+}
+
+func (s *BlkioGroup) Name() string {
+ return "blkio"
+}
+
+func (s *BlkioGroup) Apply(d *cgroupData) error {
+ _, err := d.join("blkio")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *BlkioGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.BlkioWeight != 0 {
+ if err := writeFile(path, "blkio.weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioWeight), 10)); err != nil {
+ return err
+ }
+ }
+
+ if cgroup.Resources.BlkioLeafWeight != 0 {
+ if err := writeFile(path, "blkio.leaf_weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioLeafWeight), 10)); err != nil {
+ return err
+ }
+ }
+ for _, wd := range cgroup.Resources.BlkioWeightDevice {
+ if err := writeFile(path, "blkio.weight_device", wd.WeightString()); err != nil {
+ return err
+ }
+ if err := writeFile(path, "blkio.leaf_weight_device", wd.LeafWeightString()); err != nil {
+ return err
+ }
+ }
+ for _, td := range cgroup.Resources.BlkioThrottleReadBpsDevice {
+ if err := writeFile(path, "blkio.throttle.read_bps_device", td.String()); err != nil {
+ return err
+ }
+ }
+ for _, td := range cgroup.Resources.BlkioThrottleWriteBpsDevice {
+ if err := writeFile(path, "blkio.throttle.write_bps_device", td.String()); err != nil {
+ return err
+ }
+ }
+ for _, td := range cgroup.Resources.BlkioThrottleReadIOPSDevice {
+ if err := writeFile(path, "blkio.throttle.read_iops_device", td.String()); err != nil {
+ return err
+ }
+ }
+ for _, td := range cgroup.Resources.BlkioThrottleWriteIOPSDevice {
+ if err := writeFile(path, "blkio.throttle.write_iops_device", td.String()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *BlkioGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("blkio"))
+}
+
+/*
+examples:
+
+ blkio.sectors
+ 8:0 6792
+
+ blkio.io_service_bytes
+ 8:0 Read 1282048
+ 8:0 Write 2195456
+ 8:0 Sync 2195456
+ 8:0 Async 1282048
+ 8:0 Total 3477504
+ Total 3477504
+
+ blkio.io_serviced
+ 8:0 Read 124
+ 8:0 Write 104
+ 8:0 Sync 104
+ 8:0 Async 124
+ 8:0 Total 228
+ Total 228
+
+ blkio.io_queued
+ 8:0 Read 0
+ 8:0 Write 0
+ 8:0 Sync 0
+ 8:0 Async 0
+ 8:0 Total 0
+ Total 0
+*/
+
+func splitBlkioStatLine(r rune) bool {
+ return r == ' ' || r == ':'
+}
+
+func getBlkioStat(path string) ([]cgroups.BlkioStatEntry, error) {
+ var blkioStats []cgroups.BlkioStatEntry
+ f, err := os.Open(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return blkioStats, nil
+ }
+ return nil, err
+ }
+ defer f.Close()
+
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ // format: dev type amount
+ fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine)
+ if len(fields) < 3 {
+ if len(fields) == 2 && fields[0] == "Total" {
+ // skip total line
+ continue
+ } else {
+ return nil, fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text())
+ }
+ }
+
+ v, err := strconv.ParseUint(fields[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ major := v
+
+ v, err = strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ minor := v
+
+ op := ""
+ valueField := 2
+ if len(fields) == 4 {
+ op = fields[2]
+ valueField = 3
+ }
+ v, err = strconv.ParseUint(fields[valueField], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ blkioStats = append(blkioStats, cgroups.BlkioStatEntry{Major: major, Minor: minor, Op: op, Value: v})
+ }
+
+ return blkioStats, nil
+}
+
+func (s *BlkioGroup) GetStats(path string, stats *cgroups.Stats) error {
+ // Try to read CFQ stats available on all CFQ enabled kernels first
+ if blkioStats, err := getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err == nil && blkioStats != nil {
+ return getCFQStats(path, stats)
+ }
+ return getStats(path, stats) // Use generic stats as fallback
+}
+
+func getCFQStats(path string, stats *cgroups.Stats) error {
+ var blkioStats []cgroups.BlkioStatEntry
+ var err error
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.sectors_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.SectorsRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_bytes_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServiceBytesRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServicedRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_queued_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoQueuedRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_time_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServiceTimeRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_wait_time_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoWaitTimeRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_merged_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoMergedRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.time_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoTimeRecursive = blkioStats
+
+ return nil
+}
+
+func getStats(path string, stats *cgroups.Stats) error {
+ var blkioStats []cgroups.BlkioStatEntry
+ var err error
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_service_bytes")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServiceBytesRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_serviced")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServicedRecursive = blkioStats
+
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go
new file mode 100644
index 000000000..b712bd0b1
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go
@@ -0,0 +1,125 @@
+// +build linux
+
+package fs
+
+import (
+ "bufio"
+ "os"
+ "path/filepath"
+ "strconv"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type CpuGroup struct {
+}
+
+func (s *CpuGroup) Name() string {
+ return "cpu"
+}
+
+func (s *CpuGroup) Apply(d *cgroupData) error {
+ // We always want to join the cpu group, to allow fair cpu scheduling
+ // on a container basis
+ path, err := d.path("cpu")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return s.ApplyDir(path, d.config, d.pid)
+}
+
+func (s *CpuGroup) ApplyDir(path string, cgroup *configs.Cgroup, pid int) error {
+ // This might happen if we have no cpu cgroup mounted.
+ // Just do nothing and don't fail.
+ if path == "" {
+ return nil
+ }
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+ // We should set the real-Time group scheduling settings before moving
+ // in the process because if the process is already in SCHED_RR mode
+ // and no RT bandwidth is set, adding it will fail.
+ if err := s.SetRtSched(path, cgroup); err != nil {
+ return err
+ }
+ // because we are not using d.join we need to place the pid into the procs file
+ // unlike the other subsystems
+ if err := cgroups.WriteCgroupProc(path, pid); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *CpuGroup) SetRtSched(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.CpuRtPeriod != 0 {
+ if err := writeFile(path, "cpu.rt_period_us", strconv.FormatUint(cgroup.Resources.CpuRtPeriod, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.CpuRtRuntime != 0 {
+ if err := writeFile(path, "cpu.rt_runtime_us", strconv.FormatInt(cgroup.Resources.CpuRtRuntime, 10)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *CpuGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.CpuShares != 0 {
+ if err := writeFile(path, "cpu.shares", strconv.FormatUint(cgroup.Resources.CpuShares, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.CpuPeriod != 0 {
+ if err := writeFile(path, "cpu.cfs_period_us", strconv.FormatUint(cgroup.Resources.CpuPeriod, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.CpuQuota != 0 {
+ if err := writeFile(path, "cpu.cfs_quota_us", strconv.FormatInt(cgroup.Resources.CpuQuota, 10)); err != nil {
+ return err
+ }
+ }
+ if err := s.SetRtSched(path, cgroup); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *CpuGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("cpu"))
+}
+
+func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error {
+ f, err := os.Open(filepath.Join(path, "cpu.stat"))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+ defer f.Close()
+
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ t, v, err := getCgroupParamKeyValue(sc.Text())
+ if err != nil {
+ return err
+ }
+ switch t {
+ case "nr_periods":
+ stats.CpuStats.ThrottlingData.Periods = v
+
+ case "nr_throttled":
+ stats.CpuStats.ThrottlingData.ThrottledPeriods = v
+
+ case "throttled_time":
+ stats.CpuStats.ThrottlingData.ThrottledTime = v
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go
new file mode 100644
index 000000000..53afbaddf
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go
@@ -0,0 +1,121 @@
+// +build linux
+
+package fs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/system"
+)
+
+const (
+ cgroupCpuacctStat = "cpuacct.stat"
+ nanosecondsInSecond = 1000000000
+)
+
+var clockTicks = uint64(system.GetClockTicks())
+
+type CpuacctGroup struct {
+}
+
+func (s *CpuacctGroup) Name() string {
+ return "cpuacct"
+}
+
+func (s *CpuacctGroup) Apply(d *cgroupData) error {
+ // we just want to join this group even though we don't set anything
+ if _, err := d.join("cpuacct"); err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+
+ return nil
+}
+
+func (s *CpuacctGroup) Set(path string, cgroup *configs.Cgroup) error {
+ return nil
+}
+
+func (s *CpuacctGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("cpuacct"))
+}
+
+func (s *CpuacctGroup) GetStats(path string, stats *cgroups.Stats) error {
+ userModeUsage, kernelModeUsage, err := getCpuUsageBreakdown(path)
+ if err != nil {
+ return err
+ }
+
+ totalUsage, err := getCgroupParamUint(path, "cpuacct.usage")
+ if err != nil {
+ return err
+ }
+
+ percpuUsage, err := getPercpuUsage(path)
+ if err != nil {
+ return err
+ }
+
+ stats.CpuStats.CpuUsage.TotalUsage = totalUsage
+ stats.CpuStats.CpuUsage.PercpuUsage = percpuUsage
+ stats.CpuStats.CpuUsage.UsageInUsermode = userModeUsage
+ stats.CpuStats.CpuUsage.UsageInKernelmode = kernelModeUsage
+ return nil
+}
+
+// Returns user and kernel usage breakdown in nanoseconds.
+func getCpuUsageBreakdown(path string) (uint64, uint64, error) {
+ userModeUsage := uint64(0)
+ kernelModeUsage := uint64(0)
+ const (
+ userField = "user"
+ systemField = "system"
+ )
+
+ // Expected format:
+ // user <usage in ticks>
+ // system <usage in ticks>
+ data, err := ioutil.ReadFile(filepath.Join(path, cgroupCpuacctStat))
+ if err != nil {
+ return 0, 0, err
+ }
+ fields := strings.Fields(string(data))
+ if len(fields) != 4 {
+ return 0, 0, fmt.Errorf("failure - %s is expected to have 4 fields", filepath.Join(path, cgroupCpuacctStat))
+ }
+ if fields[0] != userField {
+ return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[0], cgroupCpuacctStat, userField)
+ }
+ if fields[2] != systemField {
+ return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[2], cgroupCpuacctStat, systemField)
+ }
+ if userModeUsage, err = strconv.ParseUint(fields[1], 10, 64); err != nil {
+ return 0, 0, err
+ }
+ if kernelModeUsage, err = strconv.ParseUint(fields[3], 10, 64); err != nil {
+ return 0, 0, err
+ }
+
+ return (userModeUsage * nanosecondsInSecond) / clockTicks, (kernelModeUsage * nanosecondsInSecond) / clockTicks, nil
+}
+
+func getPercpuUsage(path string) ([]uint64, error) {
+ percpuUsage := []uint64{}
+ data, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.usage_percpu"))
+ if err != nil {
+ return percpuUsage, err
+ }
+ for _, value := range strings.Fields(string(data)) {
+ value, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return percpuUsage, fmt.Errorf("Unable to convert param value to uint64: %s", err)
+ }
+ percpuUsage = append(percpuUsage, value)
+ }
+ return percpuUsage, nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go
new file mode 100644
index 000000000..20c9eafac
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go
@@ -0,0 +1,163 @@
+// +build linux
+
+package fs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils"
+)
+
+type CpusetGroup struct {
+}
+
+func (s *CpusetGroup) Name() string {
+ return "cpuset"
+}
+
+func (s *CpusetGroup) Apply(d *cgroupData) error {
+ dir, err := d.path("cpuset")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return s.ApplyDir(dir, d.config, d.pid)
+}
+
+func (s *CpusetGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.CpusetCpus != "" {
+ if err := writeFile(path, "cpuset.cpus", cgroup.Resources.CpusetCpus); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.CpusetMems != "" {
+ if err := writeFile(path, "cpuset.mems", cgroup.Resources.CpusetMems); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *CpusetGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("cpuset"))
+}
+
+func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
+
+func (s *CpusetGroup) ApplyDir(dir string, cgroup *configs.Cgroup, pid int) error {
+ // This might happen if we have no cpuset cgroup mounted.
+ // Just do nothing and don't fail.
+ if dir == "" {
+ return nil
+ }
+ mountInfo, err := ioutil.ReadFile("/proc/self/mountinfo")
+ if err != nil {
+ return err
+ }
+ root := filepath.Dir(cgroups.GetClosestMountpointAncestor(dir, string(mountInfo)))
+ // 'ensureParent' start with parent because we don't want to
+ // explicitly inherit from parent, it could conflict with
+ // 'cpuset.cpu_exclusive'.
+ if err := s.ensureParent(filepath.Dir(dir), root); err != nil {
+ return err
+ }
+ if err := os.MkdirAll(dir, 0755); err != nil {
+ return err
+ }
+ // We didn't inherit cpuset configs from parent, but we have
+ // to ensure cpuset configs are set before moving task into the
+ // cgroup.
+ // The logic is, if user specified cpuset configs, use these
+ // specified configs, otherwise, inherit from parent. This makes
+ // cpuset configs work correctly with 'cpuset.cpu_exclusive', and
+ // keep backward compatbility.
+ if err := s.ensureCpusAndMems(dir, cgroup); err != nil {
+ return err
+ }
+
+ // because we are not using d.join we need to place the pid into the procs file
+ // unlike the other subsystems
+ if err := cgroups.WriteCgroupProc(dir, pid); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *CpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) {
+ if cpus, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.cpus")); err != nil {
+ return
+ }
+ if mems, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.mems")); err != nil {
+ return
+ }
+ return cpus, mems, nil
+}
+
+// ensureParent makes sure that the parent directory of current is created
+// and populated with the proper cpus and mems files copied from
+// it's parent.
+func (s *CpusetGroup) ensureParent(current, root string) error {
+ parent := filepath.Dir(current)
+ if libcontainerUtils.CleanPath(parent) == root {
+ return nil
+ }
+ // Avoid infinite recursion.
+ if parent == current {
+ return fmt.Errorf("cpuset: cgroup parent path outside cgroup root")
+ }
+ if err := s.ensureParent(parent, root); err != nil {
+ return err
+ }
+ if err := os.MkdirAll(current, 0755); err != nil {
+ return err
+ }
+ return s.copyIfNeeded(current, parent)
+}
+
+// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent
+// directory to the current directory if the file's contents are 0
+func (s *CpusetGroup) copyIfNeeded(current, parent string) error {
+ var (
+ err error
+ currentCpus, currentMems []byte
+ parentCpus, parentMems []byte
+ )
+
+ if currentCpus, currentMems, err = s.getSubsystemSettings(current); err != nil {
+ return err
+ }
+ if parentCpus, parentMems, err = s.getSubsystemSettings(parent); err != nil {
+ return err
+ }
+
+ if s.isEmpty(currentCpus) {
+ if err := writeFile(current, "cpuset.cpus", string(parentCpus)); err != nil {
+ return err
+ }
+ }
+ if s.isEmpty(currentMems) {
+ if err := writeFile(current, "cpuset.mems", string(parentMems)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *CpusetGroup) isEmpty(b []byte) bool {
+ return len(bytes.Trim(b, "\n")) == 0
+}
+
+func (s *CpusetGroup) ensureCpusAndMems(path string, cgroup *configs.Cgroup) error {
+ if err := s.Set(path, cgroup); err != nil {
+ return err
+ }
+ return s.copyIfNeeded(path, filepath.Dir(path))
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go
new file mode 100644
index 000000000..0ac5b4ed7
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go
@@ -0,0 +1,80 @@
+// +build linux
+
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/system"
+)
+
+type DevicesGroup struct {
+}
+
+func (s *DevicesGroup) Name() string {
+ return "devices"
+}
+
+func (s *DevicesGroup) Apply(d *cgroupData) error {
+ _, err := d.join("devices")
+ if err != nil {
+ // We will return error even it's `not found` error, devices
+ // cgroup is hard requirement for container's security.
+ return err
+ }
+ return nil
+}
+
+func (s *DevicesGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if system.RunningInUserNS() {
+ return nil
+ }
+
+ devices := cgroup.Resources.Devices
+ if len(devices) > 0 {
+ for _, dev := range devices {
+ file := "devices.deny"
+ if dev.Allow {
+ file = "devices.allow"
+ }
+ if err := writeFile(path, file, dev.CgroupString()); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ if cgroup.Resources.AllowAllDevices != nil {
+ if *cgroup.Resources.AllowAllDevices == false {
+ if err := writeFile(path, "devices.deny", "a"); err != nil {
+ return err
+ }
+
+ for _, dev := range cgroup.Resources.AllowedDevices {
+ if err := writeFile(path, "devices.allow", dev.CgroupString()); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ if err := writeFile(path, "devices.allow", "a"); err != nil {
+ return err
+ }
+ }
+
+ for _, dev := range cgroup.Resources.DeniedDevices {
+ if err := writeFile(path, "devices.deny", dev.CgroupString()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *DevicesGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("devices"))
+}
+
+func (s *DevicesGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go
new file mode 100644
index 000000000..e70dfe3b9
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go
@@ -0,0 +1,61 @@
+// +build linux
+
+package fs
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type FreezerGroup struct {
+}
+
+func (s *FreezerGroup) Name() string {
+ return "freezer"
+}
+
+func (s *FreezerGroup) Apply(d *cgroupData) error {
+ _, err := d.join("freezer")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *FreezerGroup) Set(path string, cgroup *configs.Cgroup) error {
+ switch cgroup.Resources.Freezer {
+ case configs.Frozen, configs.Thawed:
+ if err := writeFile(path, "freezer.state", string(cgroup.Resources.Freezer)); err != nil {
+ return err
+ }
+
+ for {
+ state, err := readFile(path, "freezer.state")
+ if err != nil {
+ return err
+ }
+ if strings.TrimSpace(state) == string(cgroup.Resources.Freezer) {
+ break
+ }
+ time.Sleep(1 * time.Millisecond)
+ }
+ case configs.Undefined:
+ return nil
+ default:
+ return fmt.Errorf("Invalid argument '%s' to freezer.state", string(cgroup.Resources.Freezer))
+ }
+
+ return nil
+}
+
+func (s *FreezerGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("freezer"))
+}
+
+func (s *FreezerGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go
new file mode 100644
index 000000000..3ef9e0315
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go
@@ -0,0 +1,3 @@
+// +build !linux
+
+package fs
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go
new file mode 100644
index 000000000..2f9727719
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go
@@ -0,0 +1,71 @@
+// +build linux
+
+package fs
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type HugetlbGroup struct {
+}
+
+func (s *HugetlbGroup) Name() string {
+ return "hugetlb"
+}
+
+func (s *HugetlbGroup) Apply(d *cgroupData) error {
+ _, err := d.join("hugetlb")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *HugetlbGroup) Set(path string, cgroup *configs.Cgroup) error {
+ for _, hugetlb := range cgroup.Resources.HugetlbLimit {
+ if err := writeFile(path, strings.Join([]string{"hugetlb", hugetlb.Pagesize, "limit_in_bytes"}, "."), strconv.FormatUint(hugetlb.Limit, 10)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *HugetlbGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("hugetlb"))
+}
+
+func (s *HugetlbGroup) GetStats(path string, stats *cgroups.Stats) error {
+ hugetlbStats := cgroups.HugetlbStats{}
+ for _, pageSize := range HugePageSizes {
+ usage := strings.Join([]string{"hugetlb", pageSize, "usage_in_bytes"}, ".")
+ value, err := getCgroupParamUint(path, usage)
+ if err != nil {
+ return fmt.Errorf("failed to parse %s - %v", usage, err)
+ }
+ hugetlbStats.Usage = value
+
+ maxUsage := strings.Join([]string{"hugetlb", pageSize, "max_usage_in_bytes"}, ".")
+ value, err = getCgroupParamUint(path, maxUsage)
+ if err != nil {
+ return fmt.Errorf("failed to parse %s - %v", maxUsage, err)
+ }
+ hugetlbStats.MaxUsage = value
+
+ failcnt := strings.Join([]string{"hugetlb", pageSize, "failcnt"}, ".")
+ value, err = getCgroupParamUint(path, failcnt)
+ if err != nil {
+ return fmt.Errorf("failed to parse %s - %v", failcnt, err)
+ }
+ hugetlbStats.Failcnt = value
+
+ stats.HugetlbStats[pageSize] = hugetlbStats
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go
new file mode 100644
index 000000000..ad395a5d6
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go
@@ -0,0 +1,313 @@
+// +build linux
+
+package fs
+
+import (
+ "bufio"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "syscall" // only for Errno
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ cgroupKernelMemoryLimit = "memory.kmem.limit_in_bytes"
+ cgroupMemorySwapLimit = "memory.memsw.limit_in_bytes"
+ cgroupMemoryLimit = "memory.limit_in_bytes"
+)
+
+type MemoryGroup struct {
+}
+
+func (s *MemoryGroup) Name() string {
+ return "memory"
+}
+
+func (s *MemoryGroup) Apply(d *cgroupData) (err error) {
+ path, err := d.path("memory")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ } else if path == "" {
+ return nil
+ }
+ if memoryAssigned(d.config) {
+ if _, err := os.Stat(path); os.IsNotExist(err) {
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+ // Only enable kernel memory accouting when this cgroup
+ // is created by libcontainer, otherwise we might get
+ // error when people use `cgroupsPath` to join an existed
+ // cgroup whose kernel memory is not initialized.
+ if err := EnableKernelMemoryAccounting(path); err != nil {
+ return err
+ }
+ }
+ }
+ defer func() {
+ if err != nil {
+ os.RemoveAll(path)
+ }
+ }()
+
+ // We need to join memory cgroup after set memory limits, because
+ // kmem.limit_in_bytes can only be set when the cgroup is empty.
+ _, err = d.join("memory")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func EnableKernelMemoryAccounting(path string) error {
+ // Check if kernel memory is enabled
+ // We have to limit the kernel memory here as it won't be accounted at all
+ // until a limit is set on the cgroup and limit cannot be set once the
+ // cgroup has children, or if there are already tasks in the cgroup.
+ for _, i := range []int64{1, -1} {
+ if err := setKernelMemory(path, i); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func setKernelMemory(path string, kernelMemoryLimit int64) error {
+ if path == "" {
+ return fmt.Errorf("no such directory for %s", cgroupKernelMemoryLimit)
+ }
+ if !cgroups.PathExists(filepath.Join(path, cgroupKernelMemoryLimit)) {
+ // kernel memory is not enabled on the system so we should do nothing
+ return nil
+ }
+ if err := ioutil.WriteFile(filepath.Join(path, cgroupKernelMemoryLimit), []byte(strconv.FormatInt(kernelMemoryLimit, 10)), 0700); err != nil {
+ // Check if the error number returned by the syscall is "EBUSY"
+ // The EBUSY signal is returned on attempts to write to the
+ // memory.kmem.limit_in_bytes file if the cgroup has children or
+ // once tasks have been attached to the cgroup
+ if pathErr, ok := err.(*os.PathError); ok {
+ if errNo, ok := pathErr.Err.(syscall.Errno); ok {
+ if errNo == unix.EBUSY {
+ return fmt.Errorf("failed to set %s, because either tasks have already joined this cgroup or it has children", cgroupKernelMemoryLimit)
+ }
+ }
+ }
+ return fmt.Errorf("failed to write %v to %v: %v", kernelMemoryLimit, cgroupKernelMemoryLimit, err)
+ }
+ return nil
+}
+
+func setMemoryAndSwap(path string, cgroup *configs.Cgroup) error {
+ // If the memory update is set to -1 we should also
+ // set swap to -1, it means unlimited memory.
+ if cgroup.Resources.Memory == -1 {
+ // Only set swap if it's enabled in kernel
+ if cgroups.PathExists(filepath.Join(path, cgroupMemorySwapLimit)) {
+ cgroup.Resources.MemorySwap = -1
+ }
+ }
+
+ // When memory and swap memory are both set, we need to handle the cases
+ // for updating container.
+ if cgroup.Resources.Memory != 0 && cgroup.Resources.MemorySwap != 0 {
+ memoryUsage, err := getMemoryData(path, "")
+ if err != nil {
+ return err
+ }
+
+ // When update memory limit, we should adapt the write sequence
+ // for memory and swap memory, so it won't fail because the new
+ // value and the old value don't fit kernel's validation.
+ if cgroup.Resources.MemorySwap == -1 || memoryUsage.Limit < uint64(cgroup.Resources.MemorySwap) {
+ if err := writeFile(path, cgroupMemorySwapLimit, strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil {
+ return err
+ }
+ if err := writeFile(path, cgroupMemoryLimit, strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil {
+ return err
+ }
+ } else {
+ if err := writeFile(path, cgroupMemoryLimit, strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil {
+ return err
+ }
+ if err := writeFile(path, cgroupMemorySwapLimit, strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil {
+ return err
+ }
+ }
+ } else {
+ if cgroup.Resources.Memory != 0 {
+ if err := writeFile(path, cgroupMemoryLimit, strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.MemorySwap != 0 {
+ if err := writeFile(path, cgroupMemorySwapLimit, strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (s *MemoryGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if err := setMemoryAndSwap(path, cgroup); err != nil {
+ return err
+ }
+
+ if cgroup.Resources.KernelMemory != 0 {
+ if err := setKernelMemory(path, cgroup.Resources.KernelMemory); err != nil {
+ return err
+ }
+ }
+
+ if cgroup.Resources.MemoryReservation != 0 {
+ if err := writeFile(path, "memory.soft_limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemoryReservation, 10)); err != nil {
+ return err
+ }
+ }
+
+ if cgroup.Resources.KernelMemoryTCP != 0 {
+ if err := writeFile(path, "memory.kmem.tcp.limit_in_bytes", strconv.FormatInt(cgroup.Resources.KernelMemoryTCP, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.OomKillDisable {
+ if err := writeFile(path, "memory.oom_control", "1"); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.MemorySwappiness == nil || int64(*cgroup.Resources.MemorySwappiness) == -1 {
+ return nil
+ } else if *cgroup.Resources.MemorySwappiness <= 100 {
+ if err := writeFile(path, "memory.swappiness", strconv.FormatUint(*cgroup.Resources.MemorySwappiness, 10)); err != nil {
+ return err
+ }
+ } else {
+ return fmt.Errorf("invalid value:%d. valid memory swappiness range is 0-100", *cgroup.Resources.MemorySwappiness)
+ }
+
+ return nil
+}
+
+func (s *MemoryGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("memory"))
+}
+
+func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error {
+ // Set stats from memory.stat.
+ statsFile, err := os.Open(filepath.Join(path, "memory.stat"))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+ defer statsFile.Close()
+
+ sc := bufio.NewScanner(statsFile)
+ for sc.Scan() {
+ t, v, err := getCgroupParamKeyValue(sc.Text())
+ if err != nil {
+ return fmt.Errorf("failed to parse memory.stat (%q) - %v", sc.Text(), err)
+ }
+ stats.MemoryStats.Stats[t] = v
+ }
+ stats.MemoryStats.Cache = stats.MemoryStats.Stats["cache"]
+
+ memoryUsage, err := getMemoryData(path, "")
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.Usage = memoryUsage
+ swapUsage, err := getMemoryData(path, "memsw")
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.SwapUsage = swapUsage
+ kernelUsage, err := getMemoryData(path, "kmem")
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.KernelUsage = kernelUsage
+ kernelTCPUsage, err := getMemoryData(path, "kmem.tcp")
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.KernelTCPUsage = kernelTCPUsage
+
+ useHierarchy := strings.Join([]string{"memory", "use_hierarchy"}, ".")
+ value, err := getCgroupParamUint(path, useHierarchy)
+ if err != nil {
+ return err
+ }
+ if value == 1 {
+ stats.MemoryStats.UseHierarchy = true
+ }
+ return nil
+}
+
+func memoryAssigned(cgroup *configs.Cgroup) bool {
+ return cgroup.Resources.Memory != 0 ||
+ cgroup.Resources.MemoryReservation != 0 ||
+ cgroup.Resources.MemorySwap > 0 ||
+ cgroup.Resources.KernelMemory > 0 ||
+ cgroup.Resources.KernelMemoryTCP > 0 ||
+ cgroup.Resources.OomKillDisable ||
+ (cgroup.Resources.MemorySwappiness != nil && int64(*cgroup.Resources.MemorySwappiness) != -1)
+}
+
+func getMemoryData(path, name string) (cgroups.MemoryData, error) {
+ memoryData := cgroups.MemoryData{}
+
+ moduleName := "memory"
+ if name != "" {
+ moduleName = strings.Join([]string{"memory", name}, ".")
+ }
+ usage := strings.Join([]string{moduleName, "usage_in_bytes"}, ".")
+ maxUsage := strings.Join([]string{moduleName, "max_usage_in_bytes"}, ".")
+ failcnt := strings.Join([]string{moduleName, "failcnt"}, ".")
+ limit := strings.Join([]string{moduleName, "limit_in_bytes"}, ".")
+
+ value, err := getCgroupParamUint(path, usage)
+ if err != nil {
+ if moduleName != "memory" && os.IsNotExist(err) {
+ return cgroups.MemoryData{}, nil
+ }
+ return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", usage, err)
+ }
+ memoryData.Usage = value
+ value, err = getCgroupParamUint(path, maxUsage)
+ if err != nil {
+ if moduleName != "memory" && os.IsNotExist(err) {
+ return cgroups.MemoryData{}, nil
+ }
+ return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", maxUsage, err)
+ }
+ memoryData.MaxUsage = value
+ value, err = getCgroupParamUint(path, failcnt)
+ if err != nil {
+ if moduleName != "memory" && os.IsNotExist(err) {
+ return cgroups.MemoryData{}, nil
+ }
+ return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", failcnt, err)
+ }
+ memoryData.Failcnt = value
+ value, err = getCgroupParamUint(path, limit)
+ if err != nil {
+ if moduleName != "memory" && os.IsNotExist(err) {
+ return cgroups.MemoryData{}, nil
+ }
+ return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", limit, err)
+ }
+ memoryData.Limit = value
+
+ return memoryData, nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go
new file mode 100644
index 000000000..d8cf1d87c
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go
@@ -0,0 +1,40 @@
+// +build linux
+
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type NameGroup struct {
+ GroupName string
+ Join bool
+}
+
+func (s *NameGroup) Name() string {
+ return s.GroupName
+}
+
+func (s *NameGroup) Apply(d *cgroupData) error {
+ if s.Join {
+ // ignore errors if the named cgroup does not exist
+ d.join(s.GroupName)
+ }
+ return nil
+}
+
+func (s *NameGroup) Set(path string, cgroup *configs.Cgroup) error {
+ return nil
+}
+
+func (s *NameGroup) Remove(d *cgroupData) error {
+ if s.Join {
+ removePath(d.path(s.GroupName))
+ }
+ return nil
+}
+
+func (s *NameGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go
new file mode 100644
index 000000000..8e74b645e
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go
@@ -0,0 +1,43 @@
+// +build linux
+
+package fs
+
+import (
+ "strconv"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type NetClsGroup struct {
+}
+
+func (s *NetClsGroup) Name() string {
+ return "net_cls"
+}
+
+func (s *NetClsGroup) Apply(d *cgroupData) error {
+ _, err := d.join("net_cls")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *NetClsGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.NetClsClassid != 0 {
+ if err := writeFile(path, "net_cls.classid", strconv.FormatUint(uint64(cgroup.Resources.NetClsClassid), 10)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *NetClsGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("net_cls"))
+}
+
+func (s *NetClsGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go
new file mode 100644
index 000000000..d0ab2af89
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go
@@ -0,0 +1,41 @@
+// +build linux
+
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type NetPrioGroup struct {
+}
+
+func (s *NetPrioGroup) Name() string {
+ return "net_prio"
+}
+
+func (s *NetPrioGroup) Apply(d *cgroupData) error {
+ _, err := d.join("net_prio")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *NetPrioGroup) Set(path string, cgroup *configs.Cgroup) error {
+ for _, prioMap := range cgroup.Resources.NetPrioIfpriomap {
+ if err := writeFile(path, "net_prio.ifpriomap", prioMap.CgroupString()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *NetPrioGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("net_prio"))
+}
+
+func (s *NetPrioGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go
new file mode 100644
index 000000000..5693676d3
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go
@@ -0,0 +1,35 @@
+// +build linux
+
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type PerfEventGroup struct {
+}
+
+func (s *PerfEventGroup) Name() string {
+ return "perf_event"
+}
+
+func (s *PerfEventGroup) Apply(d *cgroupData) error {
+ // we just want to join this group even though we don't set anything
+ if _, err := d.join("perf_event"); err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *PerfEventGroup) Set(path string, cgroup *configs.Cgroup) error {
+ return nil
+}
+
+func (s *PerfEventGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("perf_event"))
+}
+
+func (s *PerfEventGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go
new file mode 100644
index 000000000..f1e372055
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go
@@ -0,0 +1,73 @@
+// +build linux
+
+package fs
+
+import (
+ "fmt"
+ "path/filepath"
+ "strconv"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type PidsGroup struct {
+}
+
+func (s *PidsGroup) Name() string {
+ return "pids"
+}
+
+func (s *PidsGroup) Apply(d *cgroupData) error {
+ _, err := d.join("pids")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *PidsGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.PidsLimit != 0 {
+ // "max" is the fallback value.
+ limit := "max"
+
+ if cgroup.Resources.PidsLimit > 0 {
+ limit = strconv.FormatInt(cgroup.Resources.PidsLimit, 10)
+ }
+
+ if err := writeFile(path, "pids.max", limit); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *PidsGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("pids"))
+}
+
+func (s *PidsGroup) GetStats(path string, stats *cgroups.Stats) error {
+ current, err := getCgroupParamUint(path, "pids.current")
+ if err != nil {
+ return fmt.Errorf("failed to parse pids.current - %s", err)
+ }
+
+ maxString, err := getCgroupParamString(path, "pids.max")
+ if err != nil {
+ return fmt.Errorf("failed to parse pids.max - %s", err)
+ }
+
+ // Default if pids.max == "max" is 0 -- which represents "no limit".
+ var max uint64
+ if maxString != "max" {
+ max, err = parseUint(maxString, 10, 64)
+ if err != nil {
+ return fmt.Errorf("failed to parse pids.max - unable to parse %q as a uint from Cgroup file %q", maxString, filepath.Join(path, "pids.max"))
+ }
+ }
+
+ stats.PidsStats.Current = current
+ stats.PidsStats.Limit = max
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go
new file mode 100644
index 000000000..5ff0a1615
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go
@@ -0,0 +1,78 @@
+// +build linux
+
+package fs
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+var (
+ ErrNotValidFormat = errors.New("line is not a valid key value format")
+)
+
+// Saturates negative values at zero and returns a uint64.
+// Due to kernel bugs, some of the memory cgroup stats can be negative.
+func parseUint(s string, base, bitSize int) (uint64, error) {
+ value, err := strconv.ParseUint(s, base, bitSize)
+ if err != nil {
+ intValue, intErr := strconv.ParseInt(s, base, bitSize)
+ // 1. Handle negative values greater than MinInt64 (and)
+ // 2. Handle negative values lesser than MinInt64
+ if intErr == nil && intValue < 0 {
+ return 0, nil
+ } else if intErr != nil && intErr.(*strconv.NumError).Err == strconv.ErrRange && intValue < 0 {
+ return 0, nil
+ }
+
+ return value, err
+ }
+
+ return value, nil
+}
+
+// Parses a cgroup param and returns as name, value
+// i.e. "io_service_bytes 1234" will return as io_service_bytes, 1234
+func getCgroupParamKeyValue(t string) (string, uint64, error) {
+ parts := strings.Fields(t)
+ switch len(parts) {
+ case 2:
+ value, err := parseUint(parts[1], 10, 64)
+ if err != nil {
+ return "", 0, fmt.Errorf("unable to convert param value (%q) to uint64: %v", parts[1], err)
+ }
+
+ return parts[0], value, nil
+ default:
+ return "", 0, ErrNotValidFormat
+ }
+}
+
+// Gets a single uint64 value from the specified cgroup file.
+func getCgroupParamUint(cgroupPath, cgroupFile string) (uint64, error) {
+ fileName := filepath.Join(cgroupPath, cgroupFile)
+ contents, err := ioutil.ReadFile(fileName)
+ if err != nil {
+ return 0, err
+ }
+
+ res, err := parseUint(strings.TrimSpace(string(contents)), 10, 64)
+ if err != nil {
+ return res, fmt.Errorf("unable to parse %q as a uint from Cgroup file %q", string(contents), fileName)
+ }
+ return res, nil
+}
+
+// Gets a string value from the specified cgroup file
+func getCgroupParamString(cgroupPath, cgroupFile string) (string, error) {
+ contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile))
+ if err != nil {
+ return "", err
+ }
+
+ return strings.TrimSpace(string(contents)), nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/rootless.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/rootless.go
new file mode 100644
index 000000000..b1efbfd99
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/rootless.go
@@ -0,0 +1,128 @@
+// +build linux
+
+package rootless
+
+import (
+ "fmt"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fs"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/configs/validate"
+)
+
+// TODO: This is copied from libcontainer/cgroups/fs, which duplicates this code
+// needlessly. We should probably export this list.
+
+var subsystems = []subsystem{
+ &fs.CpusetGroup{},
+ &fs.DevicesGroup{},
+ &fs.MemoryGroup{},
+ &fs.CpuGroup{},
+ &fs.CpuacctGroup{},
+ &fs.PidsGroup{},
+ &fs.BlkioGroup{},
+ &fs.HugetlbGroup{},
+ &fs.NetClsGroup{},
+ &fs.NetPrioGroup{},
+ &fs.PerfEventGroup{},
+ &fs.FreezerGroup{},
+ &fs.NameGroup{GroupName: "name=systemd"},
+}
+
+type subsystem interface {
+ // Name returns the name of the subsystem.
+ Name() string
+
+ // Returns the stats, as 'stats', corresponding to the cgroup under 'path'.
+ GetStats(path string, stats *cgroups.Stats) error
+}
+
+// The noop cgroup manager is used for rootless containers, because we currently
+// cannot manage cgroups if we are in a rootless setup. This manager is chosen
+// by factory if we are in rootless mode. We error out if any cgroup options are
+// set in the config -- this may change in the future with upcoming kernel features
+// like the cgroup namespace.
+
+type Manager struct {
+ Cgroups *configs.Cgroup
+ Paths map[string]string
+}
+
+func (m *Manager) Apply(pid int) error {
+ // If there are no cgroup settings, there's nothing to do.
+ if m.Cgroups == nil {
+ return nil
+ }
+
+ // We can't set paths.
+ // TODO(cyphar): Implement the case where the runner of a rootless container
+ // owns their own cgroup, which would allow us to set up a
+ // cgroup for each path.
+ if m.Cgroups.Paths != nil {
+ return fmt.Errorf("cannot change cgroup path in rootless container")
+ }
+
+ // We load the paths into the manager.
+ paths := make(map[string]string)
+ for _, sys := range subsystems {
+ name := sys.Name()
+
+ path, err := cgroups.GetOwnCgroupPath(name)
+ if err != nil {
+ // Ignore paths we couldn't resolve.
+ continue
+ }
+
+ paths[name] = path
+ }
+
+ m.Paths = paths
+ return nil
+}
+
+func (m *Manager) GetPaths() map[string]string {
+ return m.Paths
+}
+
+func (m *Manager) Set(container *configs.Config) error {
+ // We have to re-do the validation here, since someone might decide to
+ // update a rootless container.
+ return validate.New().Validate(container)
+}
+
+func (m *Manager) GetPids() ([]int, error) {
+ dir, err := cgroups.GetOwnCgroupPath("devices")
+ if err != nil {
+ return nil, err
+ }
+ return cgroups.GetPids(dir)
+}
+
+func (m *Manager) GetAllPids() ([]int, error) {
+ dir, err := cgroups.GetOwnCgroupPath("devices")
+ if err != nil {
+ return nil, err
+ }
+ return cgroups.GetAllPids(dir)
+}
+
+func (m *Manager) GetStats() (*cgroups.Stats, error) {
+ // TODO(cyphar): We can make this work if we figure out a way to allow usage
+ // of cgroups with a rootless container. While this doesn't
+ // actually require write access to a cgroup directory, the
+ // statistics are not useful if they can be affected by
+ // non-container processes.
+ return nil, fmt.Errorf("cannot get cgroup stats in rootless container")
+}
+
+func (m *Manager) Freeze(state configs.FreezerState) error {
+ // TODO(cyphar): We can make this work if we figure out a way to allow usage
+ // of cgroups with a rootless container.
+ return fmt.Errorf("cannot use freezer cgroup in rootless container")
+}
+
+func (m *Manager) Destroy() error {
+ // We don't have to do anything here because we didn't do any setup.
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go
new file mode 100644
index 000000000..8eeedc55b
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go
@@ -0,0 +1,108 @@
+// +build linux
+
+package cgroups
+
+type ThrottlingData struct {
+ // Number of periods with throttling active
+ Periods uint64 `json:"periods,omitempty"`
+ // Number of periods when the container hit its throttling limit.
+ ThrottledPeriods uint64 `json:"throttled_periods,omitempty"`
+ // Aggregate time the container was throttled for in nanoseconds.
+ ThrottledTime uint64 `json:"throttled_time,omitempty"`
+}
+
+// CpuUsage denotes the usage of a CPU.
+// All CPU stats are aggregate since container inception.
+type CpuUsage struct {
+ // Total CPU time consumed.
+ // Units: nanoseconds.
+ TotalUsage uint64 `json:"total_usage,omitempty"`
+ // Total CPU time consumed per core.
+ // Units: nanoseconds.
+ PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
+ // Time spent by tasks of the cgroup in kernel mode.
+ // Units: nanoseconds.
+ UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
+ // Time spent by tasks of the cgroup in user mode.
+ // Units: nanoseconds.
+ UsageInUsermode uint64 `json:"usage_in_usermode"`
+}
+
+type CpuStats struct {
+ CpuUsage CpuUsage `json:"cpu_usage,omitempty"`
+ ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
+}
+
+type MemoryData struct {
+ Usage uint64 `json:"usage,omitempty"`
+ MaxUsage uint64 `json:"max_usage,omitempty"`
+ Failcnt uint64 `json:"failcnt"`
+ Limit uint64 `json:"limit"`
+}
+
+type MemoryStats struct {
+ // memory used for cache
+ Cache uint64 `json:"cache,omitempty"`
+ // usage of memory
+ Usage MemoryData `json:"usage,omitempty"`
+ // usage of memory + swap
+ SwapUsage MemoryData `json:"swap_usage,omitempty"`
+ // usage of kernel memory
+ KernelUsage MemoryData `json:"kernel_usage,omitempty"`
+ // usage of kernel TCP memory
+ KernelTCPUsage MemoryData `json:"kernel_tcp_usage,omitempty"`
+ // if true, memory usage is accounted for throughout a hierarchy of cgroups.
+ UseHierarchy bool `json:"use_hierarchy"`
+
+ Stats map[string]uint64 `json:"stats,omitempty"`
+}
+
+type PidsStats struct {
+ // number of pids in the cgroup
+ Current uint64 `json:"current,omitempty"`
+ // active pids hard limit
+ Limit uint64 `json:"limit,omitempty"`
+}
+
+type BlkioStatEntry struct {
+ Major uint64 `json:"major,omitempty"`
+ Minor uint64 `json:"minor,omitempty"`
+ Op string `json:"op,omitempty"`
+ Value uint64 `json:"value,omitempty"`
+}
+
+type BlkioStats struct {
+ // number of bytes tranferred to and from the block device
+ IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"`
+ IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive,omitempty"`
+ IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"`
+ IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive,omitempty"`
+ IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive,omitempty"`
+ IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive,omitempty"`
+ IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive,omitempty"`
+ SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"`
+}
+
+type HugetlbStats struct {
+ // current res_counter usage for hugetlb
+ Usage uint64 `json:"usage,omitempty"`
+ // maximum usage ever recorded.
+ MaxUsage uint64 `json:"max_usage,omitempty"`
+ // number of times hugetlb usage allocation failure.
+ Failcnt uint64 `json:"failcnt"`
+}
+
+type Stats struct {
+ CpuStats CpuStats `json:"cpu_stats,omitempty"`
+ MemoryStats MemoryStats `json:"memory_stats,omitempty"`
+ PidsStats PidsStats `json:"pids_stats,omitempty"`
+ BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
+ // the map is in the format "size of hugepage: stats of the hugepage"
+ HugetlbStats map[string]HugetlbStats `json:"hugetlb_stats,omitempty"`
+}
+
+func NewStats() *Stats {
+ memoryStats := MemoryStats{Stats: make(map[string]uint64)}
+ hugetlbStats := make(map[string]HugetlbStats)
+ return &Stats{MemoryStats: memoryStats, HugetlbStats: hugetlbStats}
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go
new file mode 100644
index 000000000..7de9ae605
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go
@@ -0,0 +1,55 @@
+// +build !linux
+
+package systemd
+
+import (
+ "fmt"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type Manager struct {
+ Cgroups *configs.Cgroup
+ Paths map[string]string
+}
+
+func UseSystemd() bool {
+ return false
+}
+
+func (m *Manager) Apply(pid int) error {
+ return fmt.Errorf("Systemd not supported")
+}
+
+func (m *Manager) GetPids() ([]int, error) {
+ return nil, fmt.Errorf("Systemd not supported")
+}
+
+func (m *Manager) GetAllPids() ([]int, error) {
+ return nil, fmt.Errorf("Systemd not supported")
+}
+
+func (m *Manager) Destroy() error {
+ return fmt.Errorf("Systemd not supported")
+}
+
+func (m *Manager) GetPaths() map[string]string {
+ return nil
+}
+
+func (m *Manager) GetStats() (*cgroups.Stats, error) {
+ return nil, fmt.Errorf("Systemd not supported")
+}
+
+func (m *Manager) Set(container *configs.Config) error {
+ return nil, fmt.Errorf("Systemd not supported")
+}
+
+func (m *Manager) Freeze(state configs.FreezerState) error {
+ return fmt.Errorf("Systemd not supported")
+}
+
+func Freeze(c *configs.Cgroup, state configs.FreezerState) error {
+ return fmt.Errorf("Systemd not supported")
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go
new file mode 100644
index 000000000..b010b4b32
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go
@@ -0,0 +1,553 @@
+// +build linux
+
+package systemd
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ systemdDbus "github.com/coreos/go-systemd/dbus"
+ systemdUtil "github.com/coreos/go-systemd/util"
+ "github.com/godbus/dbus"
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fs"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type Manager struct {
+ mu sync.Mutex
+ Cgroups *configs.Cgroup
+ Paths map[string]string
+}
+
+type subsystem interface {
+ // Name returns the name of the subsystem.
+ Name() string
+ // Returns the stats, as 'stats', corresponding to the cgroup under 'path'.
+ GetStats(path string, stats *cgroups.Stats) error
+ // Set the cgroup represented by cgroup.
+ Set(path string, cgroup *configs.Cgroup) error
+}
+
+var errSubsystemDoesNotExist = errors.New("cgroup: subsystem does not exist")
+
+type subsystemSet []subsystem
+
+func (s subsystemSet) Get(name string) (subsystem, error) {
+ for _, ss := range s {
+ if ss.Name() == name {
+ return ss, nil
+ }
+ }
+ return nil, errSubsystemDoesNotExist
+}
+
+var subsystems = subsystemSet{
+ &fs.CpusetGroup{},
+ &fs.DevicesGroup{},
+ &fs.MemoryGroup{},
+ &fs.CpuGroup{},
+ &fs.CpuacctGroup{},
+ &fs.PidsGroup{},
+ &fs.BlkioGroup{},
+ &fs.HugetlbGroup{},
+ &fs.PerfEventGroup{},
+ &fs.FreezerGroup{},
+ &fs.NetPrioGroup{},
+ &fs.NetClsGroup{},
+ &fs.NameGroup{GroupName: "name=systemd"},
+}
+
+const (
+ testScopeWait = 4
+ testSliceWait = 4
+)
+
+var (
+ connLock sync.Mutex
+ theConn *systemdDbus.Conn
+ hasStartTransientUnit bool
+ hasStartTransientSliceUnit bool
+ hasTransientDefaultDependencies bool
+ hasDelegate bool
+)
+
+func newProp(name string, units interface{}) systemdDbus.Property {
+ return systemdDbus.Property{
+ Name: name,
+ Value: dbus.MakeVariant(units),
+ }
+}
+
+func UseSystemd() bool {
+ if !systemdUtil.IsRunningSystemd() {
+ return false
+ }
+
+ connLock.Lock()
+ defer connLock.Unlock()
+
+ if theConn == nil {
+ var err error
+ theConn, err = systemdDbus.New()
+ if err != nil {
+ return false
+ }
+
+ // Assume we have StartTransientUnit
+ hasStartTransientUnit = true
+
+ // But if we get UnknownMethod error we don't
+ if _, err := theConn.StartTransientUnit("test.scope", "invalid", nil, nil); err != nil {
+ if dbusError, ok := err.(dbus.Error); ok {
+ if dbusError.Name == "org.freedesktop.DBus.Error.UnknownMethod" {
+ hasStartTransientUnit = false
+ return hasStartTransientUnit
+ }
+ }
+ }
+
+ // Ensure the scope name we use doesn't exist. Use the Pid to
+ // avoid collisions between multiple libcontainer users on a
+ // single host.
+ scope := fmt.Sprintf("libcontainer-%d-systemd-test-default-dependencies.scope", os.Getpid())
+ testScopeExists := true
+ for i := 0; i <= testScopeWait; i++ {
+ if _, err := theConn.StopUnit(scope, "replace", nil); err != nil {
+ if dbusError, ok := err.(dbus.Error); ok {
+ if strings.Contains(dbusError.Name, "org.freedesktop.systemd1.NoSuchUnit") {
+ testScopeExists = false
+ break
+ }
+ }
+ }
+ time.Sleep(time.Millisecond)
+ }
+
+ // Bail out if we can't kill this scope without testing for DefaultDependencies
+ if testScopeExists {
+ return hasStartTransientUnit
+ }
+
+ // Assume StartTransientUnit on a scope allows DefaultDependencies
+ hasTransientDefaultDependencies = true
+ ddf := newProp("DefaultDependencies", false)
+ if _, err := theConn.StartTransientUnit(scope, "replace", []systemdDbus.Property{ddf}, nil); err != nil {
+ if dbusError, ok := err.(dbus.Error); ok {
+ if strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.PropertyReadOnly") {
+ hasTransientDefaultDependencies = false
+ }
+ }
+ }
+
+ // Not critical because of the stop unit logic above.
+ theConn.StopUnit(scope, "replace", nil)
+
+ // Assume StartTransientUnit on a scope allows Delegate
+ hasDelegate = true
+ dl := newProp("Delegate", true)
+ if _, err := theConn.StartTransientUnit(scope, "replace", []systemdDbus.Property{dl}, nil); err != nil {
+ if dbusError, ok := err.(dbus.Error); ok {
+ if strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.PropertyReadOnly") {
+ hasDelegate = false
+ }
+ }
+ }
+
+ // Assume we have the ability to start a transient unit as a slice
+ // This was broken until systemd v229, but has been back-ported on RHEL environments >= 219
+ // For details, see: https://bugzilla.redhat.com/show_bug.cgi?id=1370299
+ hasStartTransientSliceUnit = true
+
+ // To ensure simple clean-up, we create a slice off the root with no hierarchy
+ slice := fmt.Sprintf("libcontainer_%d_systemd_test_default.slice", os.Getpid())
+ if _, err := theConn.StartTransientUnit(slice, "replace", nil, nil); err != nil {
+ if _, ok := err.(dbus.Error); ok {
+ hasStartTransientSliceUnit = false
+ }
+ }
+
+ for i := 0; i <= testSliceWait; i++ {
+ if _, err := theConn.StopUnit(slice, "replace", nil); err != nil {
+ if dbusError, ok := err.(dbus.Error); ok {
+ if strings.Contains(dbusError.Name, "org.freedesktop.systemd1.NoSuchUnit") {
+ hasStartTransientSliceUnit = false
+ break
+ }
+ }
+ } else {
+ break
+ }
+ time.Sleep(time.Millisecond)
+ }
+
+ // Not critical because of the stop unit logic above.
+ theConn.StopUnit(scope, "replace", nil)
+ theConn.StopUnit(slice, "replace", nil)
+ }
+ return hasStartTransientUnit
+}
+
+func (m *Manager) Apply(pid int) error {
+ var (
+ c = m.Cgroups
+ unitName = getUnitName(c)
+ slice = "system.slice"
+ properties []systemdDbus.Property
+ )
+
+ if c.Paths != nil {
+ paths := make(map[string]string)
+ for name, path := range c.Paths {
+ _, err := getSubsystemPath(m.Cgroups, name)
+ if err != nil {
+ // Don't fail if a cgroup hierarchy was not found, just skip this subsystem
+ if cgroups.IsNotFound(err) {
+ continue
+ }
+ return err
+ }
+ paths[name] = path
+ }
+ m.Paths = paths
+ return cgroups.EnterPid(m.Paths, pid)
+ }
+
+ if c.Parent != "" {
+ slice = c.Parent
+ }
+
+ properties = append(properties, systemdDbus.PropDescription("libcontainer container "+c.Name))
+
+ // if we create a slice, the parent is defined via a Wants=
+ if strings.HasSuffix(unitName, ".slice") {
+ // This was broken until systemd v229, but has been back-ported on RHEL environments >= 219
+ if !hasStartTransientSliceUnit {
+ return fmt.Errorf("systemd version does not support ability to start a slice as transient unit")
+ }
+ properties = append(properties, systemdDbus.PropWants(slice))
+ } else {
+ // otherwise, we use Slice=
+ properties = append(properties, systemdDbus.PropSlice(slice))
+ }
+
+ // only add pid if its valid, -1 is used w/ general slice creation.
+ if pid != -1 {
+ properties = append(properties, newProp("PIDs", []uint32{uint32(pid)}))
+ }
+
+ if hasDelegate {
+ // This is only supported on systemd versions 218 and above.
+ properties = append(properties, newProp("Delegate", true))
+ }
+
+ // Always enable accounting, this gets us the same behaviour as the fs implementation,
+ // plus the kernel has some problems with joining the memory cgroup at a later time.
+ properties = append(properties,
+ newProp("MemoryAccounting", true),
+ newProp("CPUAccounting", true),
+ newProp("BlockIOAccounting", true))
+
+ if hasTransientDefaultDependencies {
+ properties = append(properties,
+ newProp("DefaultDependencies", false))
+ }
+
+ if c.Resources.Memory != 0 {
+ properties = append(properties,
+ newProp("MemoryLimit", c.Resources.Memory))
+ }
+
+ if c.Resources.CpuShares != 0 {
+ properties = append(properties,
+ newProp("CPUShares", c.Resources.CpuShares))
+ }
+
+ // cpu.cfs_quota_us and cpu.cfs_period_us are controlled by systemd.
+ if c.Resources.CpuQuota != 0 && c.Resources.CpuPeriod != 0 {
+ cpuQuotaPerSecUSec := uint64(c.Resources.CpuQuota*1000000) / c.Resources.CpuPeriod
+ properties = append(properties,
+ newProp("CPUQuotaPerSecUSec", cpuQuotaPerSecUSec))
+ }
+
+ if c.Resources.BlkioWeight != 0 {
+ properties = append(properties,
+ newProp("BlockIOWeight", uint64(c.Resources.BlkioWeight)))
+ }
+
+ // We have to set kernel memory here, as we can't change it once
+ // processes have been attached to the cgroup.
+ if c.Resources.KernelMemory != 0 {
+ if err := setKernelMemory(c); err != nil {
+ return err
+ }
+ }
+
+ if _, err := theConn.StartTransientUnit(unitName, "replace", properties, nil); err != nil && !isUnitExists(err) {
+ return err
+ }
+
+ if err := joinCgroups(c, pid); err != nil {
+ return err
+ }
+
+ paths := make(map[string]string)
+ for _, s := range subsystems {
+ subsystemPath, err := getSubsystemPath(m.Cgroups, s.Name())
+ if err != nil {
+ // Don't fail if a cgroup hierarchy was not found, just skip this subsystem
+ if cgroups.IsNotFound(err) {
+ continue
+ }
+ return err
+ }
+ paths[s.Name()] = subsystemPath
+ }
+ m.Paths = paths
+ return nil
+}
+
+func (m *Manager) Destroy() error {
+ if m.Cgroups.Paths != nil {
+ return nil
+ }
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ theConn.StopUnit(getUnitName(m.Cgroups), "replace", nil)
+ if err := cgroups.RemovePaths(m.Paths); err != nil {
+ return err
+ }
+ m.Paths = make(map[string]string)
+ return nil
+}
+
+func (m *Manager) GetPaths() map[string]string {
+ m.mu.Lock()
+ paths := m.Paths
+ m.mu.Unlock()
+ return paths
+}
+
+func join(c *configs.Cgroup, subsystem string, pid int) (string, error) {
+ path, err := getSubsystemPath(c, subsystem)
+ if err != nil {
+ return "", err
+ }
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return "", err
+ }
+ if err := cgroups.WriteCgroupProc(path, pid); err != nil {
+ return "", err
+ }
+ return path, nil
+}
+
+func joinCgroups(c *configs.Cgroup, pid int) error {
+ for _, sys := range subsystems {
+ name := sys.Name()
+ switch name {
+ case "name=systemd":
+ // let systemd handle this
+ case "cpuset":
+ path, err := getSubsystemPath(c, name)
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ s := &fs.CpusetGroup{}
+ if err := s.ApplyDir(path, c, pid); err != nil {
+ return err
+ }
+ default:
+ _, err := join(c, name, pid)
+ if err != nil {
+ // Even if it's `not found` error, we'll return err
+ // because devices cgroup is hard requirement for
+ // container security.
+ if name == "devices" {
+ return err
+ }
+ // For other subsystems, omit the `not found` error
+ // because they are optional.
+ if !cgroups.IsNotFound(err) {
+ return err
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// systemd represents slice hierarchy using `-`, so we need to follow suit when
+// generating the path of slice. Essentially, test-a-b.slice becomes
+// test.slice/test-a.slice/test-a-b.slice.
+func ExpandSlice(slice string) (string, error) {
+ suffix := ".slice"
+ // Name has to end with ".slice", but can't be just ".slice".
+ if len(slice) < len(suffix) || !strings.HasSuffix(slice, suffix) {
+ return "", fmt.Errorf("invalid slice name: %s", slice)
+ }
+
+ // Path-separators are not allowed.
+ if strings.Contains(slice, "/") {
+ return "", fmt.Errorf("invalid slice name: %s", slice)
+ }
+
+ var path, prefix string
+ sliceName := strings.TrimSuffix(slice, suffix)
+ // if input was -.slice, we should just return root now
+ if sliceName == "-" {
+ return "/", nil
+ }
+ for _, component := range strings.Split(sliceName, "-") {
+ // test--a.slice isn't permitted, nor is -test.slice.
+ if component == "" {
+ return "", fmt.Errorf("invalid slice name: %s", slice)
+ }
+
+ // Append the component to the path and to the prefix.
+ path += prefix + component + suffix + "/"
+ prefix += component + "-"
+ }
+
+ return path, nil
+}
+
+func getSubsystemPath(c *configs.Cgroup, subsystem string) (string, error) {
+ mountpoint, err := cgroups.FindCgroupMountpoint(subsystem)
+ if err != nil {
+ return "", err
+ }
+
+ initPath, err := cgroups.GetInitCgroup(subsystem)
+ if err != nil {
+ return "", err
+ }
+ // if pid 1 is systemd 226 or later, it will be in init.scope, not the root
+ initPath = strings.TrimSuffix(filepath.Clean(initPath), "init.scope")
+
+ slice := "system.slice"
+ if c.Parent != "" {
+ slice = c.Parent
+ }
+
+ slice, err = ExpandSlice(slice)
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(mountpoint, initPath, slice, getUnitName(c)), nil
+}
+
+func (m *Manager) Freeze(state configs.FreezerState) error {
+ path, err := getSubsystemPath(m.Cgroups, "freezer")
+ if err != nil {
+ return err
+ }
+ prevState := m.Cgroups.Resources.Freezer
+ m.Cgroups.Resources.Freezer = state
+ freezer, err := subsystems.Get("freezer")
+ if err != nil {
+ return err
+ }
+ err = freezer.Set(path, m.Cgroups)
+ if err != nil {
+ m.Cgroups.Resources.Freezer = prevState
+ return err
+ }
+ return nil
+}
+
+func (m *Manager) GetPids() ([]int, error) {
+ path, err := getSubsystemPath(m.Cgroups, "devices")
+ if err != nil {
+ return nil, err
+ }
+ return cgroups.GetPids(path)
+}
+
+func (m *Manager) GetAllPids() ([]int, error) {
+ path, err := getSubsystemPath(m.Cgroups, "devices")
+ if err != nil {
+ return nil, err
+ }
+ return cgroups.GetAllPids(path)
+}
+
+func (m *Manager) GetStats() (*cgroups.Stats, error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ stats := cgroups.NewStats()
+ for name, path := range m.Paths {
+ sys, err := subsystems.Get(name)
+ if err == errSubsystemDoesNotExist || !cgroups.PathExists(path) {
+ continue
+ }
+ if err := sys.GetStats(path, stats); err != nil {
+ return nil, err
+ }
+ }
+
+ return stats, nil
+}
+
+func (m *Manager) Set(container *configs.Config) error {
+ // If Paths are set, then we are just joining cgroups paths
+ // and there is no need to set any values.
+ if m.Cgroups.Paths != nil {
+ return nil
+ }
+ for _, sys := range subsystems {
+ // Get the subsystem path, but don't error out for not found cgroups.
+ path, err := getSubsystemPath(container.Cgroups, sys.Name())
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+
+ if err := sys.Set(path, container.Cgroups); err != nil {
+ return err
+ }
+ }
+
+ if m.Paths["cpu"] != "" {
+ if err := fs.CheckCpushares(m.Paths["cpu"], container.Cgroups.Resources.CpuShares); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func getUnitName(c *configs.Cgroup) string {
+ // by default, we create a scope unless the user explicitly asks for a slice.
+ if !strings.HasSuffix(c.Name, ".slice") {
+ return fmt.Sprintf("%s-%s.scope", c.ScopePrefix, c.Name)
+ }
+ return c.Name
+}
+
+func setKernelMemory(c *configs.Cgroup) error {
+ path, err := getSubsystemPath(c, "memory")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+ return fs.EnableKernelMemoryAccounting(path)
+}
+
+// isUnitExists returns true if the error is that a systemd unit already exists.
+func isUnitExists(err error) bool {
+ if err != nil {
+ if dbusError, ok := err.(dbus.Error); ok {
+ return strings.Contains(dbusError.Name, "org.freedesktop.systemd1.UnitExists")
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
new file mode 100644
index 000000000..7c995efee
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
@@ -0,0 +1,462 @@
+// +build linux
+
+package cgroups
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/docker/go-units"
+)
+
+const (
+ cgroupNamePrefix = "name="
+ CgroupProcesses = "cgroup.procs"
+)
+
+// https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt
+func FindCgroupMountpoint(subsystem string) (string, error) {
+ mnt, _, err := FindCgroupMountpointAndRoot(subsystem)
+ return mnt, err
+}
+
+func FindCgroupMountpointAndRoot(subsystem string) (string, string, error) {
+ // We are not using mount.GetMounts() because it's super-inefficient,
+ // parsing it directly sped up x10 times because of not using Sscanf.
+ // It was one of two major performance drawbacks in container start.
+ if !isSubsystemAvailable(subsystem) {
+ return "", "", NewNotFoundError(subsystem)
+ }
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return "", "", err
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ txt := scanner.Text()
+ fields := strings.Split(txt, " ")
+ for _, opt := range strings.Split(fields[len(fields)-1], ",") {
+ if opt == subsystem {
+ return fields[4], fields[3], nil
+ }
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return "", "", err
+ }
+
+ return "", "", NewNotFoundError(subsystem)
+}
+
+func isSubsystemAvailable(subsystem string) bool {
+ cgroups, err := ParseCgroupFile("/proc/self/cgroup")
+ if err != nil {
+ return false
+ }
+ _, avail := cgroups[subsystem]
+ return avail
+}
+
+func GetClosestMountpointAncestor(dir, mountinfo string) string {
+ deepestMountPoint := ""
+ for _, mountInfoEntry := range strings.Split(mountinfo, "\n") {
+ mountInfoParts := strings.Fields(mountInfoEntry)
+ if len(mountInfoParts) < 5 {
+ continue
+ }
+ mountPoint := mountInfoParts[4]
+ if strings.HasPrefix(mountPoint, deepestMountPoint) && strings.HasPrefix(dir, mountPoint) {
+ deepestMountPoint = mountPoint
+ }
+ }
+ return deepestMountPoint
+}
+
+func FindCgroupMountpointDir() (string, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ text := scanner.Text()
+ fields := strings.Split(text, " ")
+ // Safe as mountinfo encodes mountpoints with spaces as \040.
+ index := strings.Index(text, " - ")
+ postSeparatorFields := strings.Fields(text[index+3:])
+ numPostFields := len(postSeparatorFields)
+
+ // This is an error as we can't detect if the mount is for "cgroup"
+ if numPostFields == 0 {
+ return "", fmt.Errorf("Found no fields post '-' in %q", text)
+ }
+
+ if postSeparatorFields[0] == "cgroup" {
+ // Check that the mount is properly formated.
+ if numPostFields < 3 {
+ return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
+ }
+
+ return filepath.Dir(fields[4]), nil
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return "", err
+ }
+
+ return "", NewNotFoundError("cgroup")
+}
+
+type Mount struct {
+ Mountpoint string
+ Root string
+ Subsystems []string
+}
+
+func (m Mount) GetOwnCgroup(cgroups map[string]string) (string, error) {
+ if len(m.Subsystems) == 0 {
+ return "", fmt.Errorf("no subsystem for mount")
+ }
+
+ return getControllerPath(m.Subsystems[0], cgroups)
+}
+
+func getCgroupMountsHelper(ss map[string]bool, mi io.Reader, all bool) ([]Mount, error) {
+ res := make([]Mount, 0, len(ss))
+ scanner := bufio.NewScanner(mi)
+ numFound := 0
+ for scanner.Scan() && numFound < len(ss) {
+ txt := scanner.Text()
+ sepIdx := strings.Index(txt, " - ")
+ if sepIdx == -1 {
+ return nil, fmt.Errorf("invalid mountinfo format")
+ }
+ if txt[sepIdx+3:sepIdx+10] == "cgroup2" || txt[sepIdx+3:sepIdx+9] != "cgroup" {
+ continue
+ }
+ fields := strings.Split(txt, " ")
+ m := Mount{
+ Mountpoint: fields[4],
+ Root: fields[3],
+ }
+ for _, opt := range strings.Split(fields[len(fields)-1], ",") {
+ if !ss[opt] {
+ continue
+ }
+ if strings.HasPrefix(opt, cgroupNamePrefix) {
+ m.Subsystems = append(m.Subsystems, opt[len(cgroupNamePrefix):])
+ } else {
+ m.Subsystems = append(m.Subsystems, opt)
+ }
+ if !all {
+ numFound++
+ }
+ }
+ res = append(res, m)
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+// GetCgroupMounts returns the mounts for the cgroup subsystems.
+// all indicates whether to return just the first instance or all the mounts.
+func GetCgroupMounts(all bool) ([]Mount, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ allSubsystems, err := ParseCgroupFile("/proc/self/cgroup")
+ if err != nil {
+ return nil, err
+ }
+
+ allMap := make(map[string]bool)
+ for s := range allSubsystems {
+ allMap[s] = true
+ }
+ return getCgroupMountsHelper(allMap, f, all)
+}
+
+// GetAllSubsystems returns all the cgroup subsystems supported by the kernel
+func GetAllSubsystems() ([]string, error) {
+ f, err := os.Open("/proc/cgroups")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ subsystems := []string{}
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ text := s.Text()
+ if text[0] != '#' {
+ parts := strings.Fields(text)
+ if len(parts) >= 4 && parts[3] != "0" {
+ subsystems = append(subsystems, parts[0])
+ }
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return subsystems, nil
+}
+
+// GetOwnCgroup returns the relative path to the cgroup docker is running in.
+func GetOwnCgroup(subsystem string) (string, error) {
+ cgroups, err := ParseCgroupFile("/proc/self/cgroup")
+ if err != nil {
+ return "", err
+ }
+
+ return getControllerPath(subsystem, cgroups)
+}
+
+func GetOwnCgroupPath(subsystem string) (string, error) {
+ cgroup, err := GetOwnCgroup(subsystem)
+ if err != nil {
+ return "", err
+ }
+
+ return getCgroupPathHelper(subsystem, cgroup)
+}
+
+func GetInitCgroup(subsystem string) (string, error) {
+ cgroups, err := ParseCgroupFile("/proc/1/cgroup")
+ if err != nil {
+ return "", err
+ }
+
+ return getControllerPath(subsystem, cgroups)
+}
+
+func GetInitCgroupPath(subsystem string) (string, error) {
+ cgroup, err := GetInitCgroup(subsystem)
+ if err != nil {
+ return "", err
+ }
+
+ return getCgroupPathHelper(subsystem, cgroup)
+}
+
+func getCgroupPathHelper(subsystem, cgroup string) (string, error) {
+ mnt, root, err := FindCgroupMountpointAndRoot(subsystem)
+ if err != nil {
+ return "", err
+ }
+
+ // This is needed for nested containers, because in /proc/self/cgroup we
+ // see pathes from host, which don't exist in container.
+ relCgroup, err := filepath.Rel(root, cgroup)
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(mnt, relCgroup), nil
+}
+
+func readProcsFile(dir string) ([]int, error) {
+ f, err := os.Open(filepath.Join(dir, CgroupProcesses))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ var (
+ s = bufio.NewScanner(f)
+ out = []int{}
+ )
+
+ for s.Scan() {
+ if t := s.Text(); t != "" {
+ pid, err := strconv.Atoi(t)
+ if err != nil {
+ return nil, err
+ }
+ out = append(out, pid)
+ }
+ }
+ return out, nil
+}
+
+// ParseCgroupFile parses the given cgroup file, typically from
+// /proc/<pid>/cgroup, into a map of subgroups to cgroup names.
+func ParseCgroupFile(path string) (map[string]string, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseCgroupFromReader(f)
+}
+
+// helper function for ParseCgroupFile to make testing easier
+func parseCgroupFromReader(r io.Reader) (map[string]string, error) {
+ s := bufio.NewScanner(r)
+ cgroups := make(map[string]string)
+
+ for s.Scan() {
+ text := s.Text()
+ // from cgroups(7):
+ // /proc/[pid]/cgroup
+ // ...
+ // For each cgroup hierarchy ... there is one entry
+ // containing three colon-separated fields of the form:
+ // hierarchy-ID:subsystem-list:cgroup-path
+ parts := strings.SplitN(text, ":", 3)
+ if len(parts) < 3 {
+ return nil, fmt.Errorf("invalid cgroup entry: must contain at least two colons: %v", text)
+ }
+
+ for _, subs := range strings.Split(parts[1], ",") {
+ cgroups[subs] = parts[2]
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ return cgroups, nil
+}
+
+func getControllerPath(subsystem string, cgroups map[string]string) (string, error) {
+
+ if p, ok := cgroups[subsystem]; ok {
+ return p, nil
+ }
+
+ if p, ok := cgroups[cgroupNamePrefix+subsystem]; ok {
+ return p, nil
+ }
+
+ return "", NewNotFoundError(subsystem)
+}
+
+func PathExists(path string) bool {
+ if _, err := os.Stat(path); err != nil {
+ return false
+ }
+ return true
+}
+
+func EnterPid(cgroupPaths map[string]string, pid int) error {
+ for _, path := range cgroupPaths {
+ if PathExists(path) {
+ if err := WriteCgroupProc(path, pid); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// RemovePaths iterates over the provided paths removing them.
+// We trying to remove all paths five times with increasing delay between tries.
+// If after all there are not removed cgroups - appropriate error will be
+// returned.
+func RemovePaths(paths map[string]string) (err error) {
+ delay := 10 * time.Millisecond
+ for i := 0; i < 5; i++ {
+ if i != 0 {
+ time.Sleep(delay)
+ delay *= 2
+ }
+ for s, p := range paths {
+ os.RemoveAll(p)
+ // TODO: here probably should be logging
+ _, err := os.Stat(p)
+ // We need this strange way of checking cgroups existence because
+ // RemoveAll almost always returns error, even on already removed
+ // cgroups
+ if os.IsNotExist(err) {
+ delete(paths, s)
+ }
+ }
+ if len(paths) == 0 {
+ return nil
+ }
+ }
+ return fmt.Errorf("Failed to remove paths: %v", paths)
+}
+
+func GetHugePageSize() ([]string, error) {
+ var pageSizes []string
+ sizeList := []string{"B", "kB", "MB", "GB", "TB", "PB"}
+ files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages")
+ if err != nil {
+ return pageSizes, err
+ }
+ for _, st := range files {
+ nameArray := strings.Split(st.Name(), "-")
+ pageSize, err := units.RAMInBytes(nameArray[1])
+ if err != nil {
+ return []string{}, err
+ }
+ sizeString := units.CustomSize("%g%s", float64(pageSize), 1024.0, sizeList)
+ pageSizes = append(pageSizes, sizeString)
+ }
+
+ return pageSizes, nil
+}
+
+// GetPids returns all pids, that were added to cgroup at path.
+func GetPids(path string) ([]int, error) {
+ return readProcsFile(path)
+}
+
+// GetAllPids returns all pids, that were added to cgroup at path and to all its
+// subcgroups.
+func GetAllPids(path string) ([]int, error) {
+ var pids []int
+ // collect pids from all sub-cgroups
+ err := filepath.Walk(path, func(p string, info os.FileInfo, iErr error) error {
+ dir, file := filepath.Split(p)
+ if file != CgroupProcesses {
+ return nil
+ }
+ if iErr != nil {
+ return iErr
+ }
+ cPids, err := readProcsFile(dir)
+ if err != nil {
+ return err
+ }
+ pids = append(pids, cPids...)
+ return nil
+ })
+ return pids, err
+}
+
+// WriteCgroupProc writes the specified pid into the cgroup's cgroup.procs file
+func WriteCgroupProc(dir string, pid int) error {
+ // Normally dir should not be empty, one case is that cgroup subsystem
+ // is not mounted, we will get empty dir, and we want it fail here.
+ if dir == "" {
+ return fmt.Errorf("no such directory for %s", CgroupProcesses)
+ }
+
+ // Dont attach any pid to the cgroup if -1 is specified as a pid
+ if pid != -1 {
+ if err := ioutil.WriteFile(filepath.Join(dir, CgroupProcesses), []byte(strconv.Itoa(pid)), 0700); err != nil {
+ return fmt.Errorf("failed to write %v to %v: %v", pid, CgroupProcesses, err)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go
new file mode 100644
index 000000000..c7bdf1f60
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go
@@ -0,0 +1,10 @@
+// +build linux,!go1.5
+
+package libcontainer
+
+import "syscall"
+
+// GidMappingsEnableSetgroups was added in Go 1.5, so do nothing when building
+// with earlier versions
+func enableSetgroups(sys *syscall.SysProcAttr) {
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go
new file mode 100644
index 000000000..e0f3ca165
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go
@@ -0,0 +1,61 @@
+package configs
+
+import "fmt"
+
+// blockIODevice holds major:minor format supported in blkio cgroup
+type blockIODevice struct {
+ // Major is the device's major number
+ Major int64 `json:"major"`
+ // Minor is the device's minor number
+ Minor int64 `json:"minor"`
+}
+
+// WeightDevice struct holds a `major:minor weight`|`major:minor leaf_weight` pair
+type WeightDevice struct {
+ blockIODevice
+ // Weight is the bandwidth rate for the device, range is from 10 to 1000
+ Weight uint16 `json:"weight"`
+ // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only
+ LeafWeight uint16 `json:"leafWeight"`
+}
+
+// NewWeightDevice returns a configured WeightDevice pointer
+func NewWeightDevice(major, minor int64, weight, leafWeight uint16) *WeightDevice {
+ wd := &WeightDevice{}
+ wd.Major = major
+ wd.Minor = minor
+ wd.Weight = weight
+ wd.LeafWeight = leafWeight
+ return wd
+}
+
+// WeightString formats the struct to be writable to the cgroup specific file
+func (wd *WeightDevice) WeightString() string {
+ return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.Weight)
+}
+
+// LeafWeightString formats the struct to be writable to the cgroup specific file
+func (wd *WeightDevice) LeafWeightString() string {
+ return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.LeafWeight)
+}
+
+// ThrottleDevice struct holds a `major:minor rate_per_second` pair
+type ThrottleDevice struct {
+ blockIODevice
+ // Rate is the IO rate limit per cgroup per device
+ Rate uint64 `json:"rate"`
+}
+
+// NewThrottleDevice returns a configured ThrottleDevice pointer
+func NewThrottleDevice(major, minor int64, rate uint64) *ThrottleDevice {
+ td := &ThrottleDevice{}
+ td.Major = major
+ td.Minor = minor
+ td.Rate = rate
+ return td
+}
+
+// String formats the struct to be writable to the cgroup specific file
+func (td *ThrottleDevice) String() string {
+ return fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go
new file mode 100644
index 000000000..e15a662f5
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go
@@ -0,0 +1,122 @@
+package configs
+
+type FreezerState string
+
+const (
+ Undefined FreezerState = ""
+ Frozen FreezerState = "FROZEN"
+ Thawed FreezerState = "THAWED"
+)
+
+type Cgroup struct {
+ // Deprecated, use Path instead
+ Name string `json:"name,omitempty"`
+
+ // name of parent of cgroup or slice
+ // Deprecated, use Path instead
+ Parent string `json:"parent,omitempty"`
+
+ // Path specifies the path to cgroups that are created and/or joined by the container.
+ // The path is assumed to be relative to the host system cgroup mountpoint.
+ Path string `json:"path"`
+
+ // ScopePrefix describes prefix for the scope name
+ ScopePrefix string `json:"scope_prefix"`
+
+ // Paths represent the absolute cgroups paths to join.
+ // This takes precedence over Path.
+ Paths map[string]string
+
+ // Resources contains various cgroups settings to apply
+ *Resources
+}
+
+type Resources struct {
+ // If this is true allow access to any kind of device within the container. If false, allow access only to devices explicitly listed in the allowed_devices list.
+ // Deprecated
+ AllowAllDevices *bool `json:"allow_all_devices,omitempty"`
+ // Deprecated
+ AllowedDevices []*Device `json:"allowed_devices,omitempty"`
+ // Deprecated
+ DeniedDevices []*Device `json:"denied_devices,omitempty"`
+
+ Devices []*Device `json:"devices"`
+
+ // Memory limit (in bytes)
+ Memory int64 `json:"memory"`
+
+ // Memory reservation or soft_limit (in bytes)
+ MemoryReservation int64 `json:"memory_reservation"`
+
+ // Total memory usage (memory + swap); set `-1` to enable unlimited swap
+ MemorySwap int64 `json:"memory_swap"`
+
+ // Kernel memory limit (in bytes)
+ KernelMemory int64 `json:"kernel_memory"`
+
+ // Kernel memory limit for TCP use (in bytes)
+ KernelMemoryTCP int64 `json:"kernel_memory_tcp"`
+
+ // CPU shares (relative weight vs. other containers)
+ CpuShares uint64 `json:"cpu_shares"`
+
+ // CPU hardcap limit (in usecs). Allowed cpu time in a given period.
+ CpuQuota int64 `json:"cpu_quota"`
+
+ // CPU period to be used for hardcapping (in usecs). 0 to use system default.
+ CpuPeriod uint64 `json:"cpu_period"`
+
+ // How many time CPU will use in realtime scheduling (in usecs).
+ CpuRtRuntime int64 `json:"cpu_rt_quota"`
+
+ // CPU period to be used for realtime scheduling (in usecs).
+ CpuRtPeriod uint64 `json:"cpu_rt_period"`
+
+ // CPU to use
+ CpusetCpus string `json:"cpuset_cpus"`
+
+ // MEM to use
+ CpusetMems string `json:"cpuset_mems"`
+
+ // Process limit; set <= `0' to disable limit.
+ PidsLimit int64 `json:"pids_limit"`
+
+ // Specifies per cgroup weight, range is from 10 to 1000.
+ BlkioWeight uint16 `json:"blkio_weight"`
+
+ // Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only
+ BlkioLeafWeight uint16 `json:"blkio_leaf_weight"`
+
+ // Weight per cgroup per device, can override BlkioWeight.
+ BlkioWeightDevice []*WeightDevice `json:"blkio_weight_device"`
+
+ // IO read rate limit per cgroup per device, bytes per second.
+ BlkioThrottleReadBpsDevice []*ThrottleDevice `json:"blkio_throttle_read_bps_device"`
+
+ // IO write rate limit per cgroup per device, bytes per second.
+ BlkioThrottleWriteBpsDevice []*ThrottleDevice `json:"blkio_throttle_write_bps_device"`
+
+ // IO read rate limit per cgroup per device, IO per second.
+ BlkioThrottleReadIOPSDevice []*ThrottleDevice `json:"blkio_throttle_read_iops_device"`
+
+ // IO write rate limit per cgroup per device, IO per second.
+ BlkioThrottleWriteIOPSDevice []*ThrottleDevice `json:"blkio_throttle_write_iops_device"`
+
+ // set the freeze value for the process
+ Freezer FreezerState `json:"freezer"`
+
+ // Hugetlb limit (in bytes)
+ HugetlbLimit []*HugepageLimit `json:"hugetlb_limit"`
+
+ // Whether to disable OOM Killer
+ OomKillDisable bool `json:"oom_kill_disable"`
+
+ // Tuning swappiness behaviour per cgroup
+ MemorySwappiness *uint64 `json:"memory_swappiness"`
+
+ // Set priority of network traffic for container
+ NetPrioIfpriomap []*IfPrioMap `json:"net_prio_ifpriomap"`
+
+ // Set class identifier for container's network packets
+ NetClsClassid uint32 `json:"net_cls_classid_u"`
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go
new file mode 100644
index 000000000..95e2830a4
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go
@@ -0,0 +1,6 @@
+// +build !windows,!linux,!freebsd
+
+package configs
+
+type Cgroup struct {
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go
new file mode 100644
index 000000000..d74847b0d
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go
@@ -0,0 +1,6 @@
+package configs
+
+// TODO Windows: This can ultimately be entirely factored out on Windows as
+// cgroups are a Unix-specific construct.
+type Cgroup struct {
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
new file mode 100644
index 000000000..269fffff3
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
@@ -0,0 +1,344 @@
+package configs
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os/exec"
+ "time"
+
+ "github.com/opencontainers/runtime-spec/specs-go"
+
+ "github.com/sirupsen/logrus"
+)
+
+type Rlimit struct {
+ Type int `json:"type"`
+ Hard uint64 `json:"hard"`
+ Soft uint64 `json:"soft"`
+}
+
+// IDMap represents UID/GID Mappings for User Namespaces.
+type IDMap struct {
+ ContainerID int `json:"container_id"`
+ HostID int `json:"host_id"`
+ Size int `json:"size"`
+}
+
+// Seccomp represents syscall restrictions
+// By default, only the native architecture of the kernel is allowed to be used
+// for syscalls. Additional architectures can be added by specifying them in
+// Architectures.
+type Seccomp struct {
+ DefaultAction Action `json:"default_action"`
+ Architectures []string `json:"architectures"`
+ Syscalls []*Syscall `json:"syscalls"`
+}
+
+// Action is taken upon rule match in Seccomp
+type Action int
+
+const (
+ Kill Action = iota + 1
+ Errno
+ Trap
+ Allow
+ Trace
+)
+
+// Operator is a comparison operator to be used when matching syscall arguments in Seccomp
+type Operator int
+
+const (
+ EqualTo Operator = iota + 1
+ NotEqualTo
+ GreaterThan
+ GreaterThanOrEqualTo
+ LessThan
+ LessThanOrEqualTo
+ MaskEqualTo
+)
+
+// Arg is a rule to match a specific syscall argument in Seccomp
+type Arg struct {
+ Index uint `json:"index"`
+ Value uint64 `json:"value"`
+ ValueTwo uint64 `json:"value_two"`
+ Op Operator `json:"op"`
+}
+
+// Syscall is a rule to match a syscall in Seccomp
+type Syscall struct {
+ Name string `json:"name"`
+ Action Action `json:"action"`
+ Args []*Arg `json:"args"`
+}
+
+// TODO Windows. Many of these fields should be factored out into those parts
+// which are common across platforms, and those which are platform specific.
+
+// Config defines configuration options for executing a process inside a contained environment.
+type Config struct {
+ // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs
+ // This is a common option when the container is running in ramdisk
+ NoPivotRoot bool `json:"no_pivot_root"`
+
+ // ParentDeathSignal specifies the signal that is sent to the container's process in the case
+ // that the parent process dies.
+ ParentDeathSignal int `json:"parent_death_signal"`
+
+ // Path to a directory containing the container's root filesystem.
+ Rootfs string `json:"rootfs"`
+
+ // Readonlyfs will remount the container's rootfs as readonly where only externally mounted
+ // bind mounts are writtable.
+ Readonlyfs bool `json:"readonlyfs"`
+
+ // Specifies the mount propagation flags to be applied to /.
+ RootPropagation int `json:"rootPropagation"`
+
+ // Mounts specify additional source and destination paths that will be mounted inside the container's
+ // rootfs and mount namespace if specified
+ Mounts []*Mount `json:"mounts"`
+
+ // The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well!
+ Devices []*Device `json:"devices"`
+
+ MountLabel string `json:"mount_label"`
+
+ // Hostname optionally sets the container's hostname if provided
+ Hostname string `json:"hostname"`
+
+ // Namespaces specifies the container's namespaces that it should setup when cloning the init process
+ // If a namespace is not provided that namespace is shared from the container's parent process
+ Namespaces Namespaces `json:"namespaces"`
+
+ // Capabilities specify the capabilities to keep when executing the process inside the container
+ // All capabilities not specified will be dropped from the processes capability mask
+ Capabilities *Capabilities `json:"capabilities"`
+
+ // Networks specifies the container's network setup to be created
+ Networks []*Network `json:"networks"`
+
+ // Routes can be specified to create entries in the route table as the container is started
+ Routes []*Route `json:"routes"`
+
+ // Cgroups specifies specific cgroup settings for the various subsystems that the container is
+ // placed into to limit the resources the container has available
+ Cgroups *Cgroup `json:"cgroups"`
+
+ // AppArmorProfile specifies the profile to apply to the process running in the container and is
+ // change at the time the process is execed
+ AppArmorProfile string `json:"apparmor_profile,omitempty"`
+
+ // ProcessLabel specifies the label to apply to the process running in the container. It is
+ // commonly used by selinux
+ ProcessLabel string `json:"process_label,omitempty"`
+
+ // Rlimits specifies the resource limits, such as max open files, to set in the container
+ // If Rlimits are not set, the container will inherit rlimits from the parent process
+ Rlimits []Rlimit `json:"rlimits,omitempty"`
+
+ // OomScoreAdj specifies the adjustment to be made by the kernel when calculating oom scores
+ // for a process. Valid values are between the range [-1000, '1000'], where processes with
+ // higher scores are preferred for being killed.
+ // More information about kernel oom score calculation here: https://lwn.net/Articles/317814/
+ OomScoreAdj int `json:"oom_score_adj"`
+
+ // UidMappings is an array of User ID mappings for User Namespaces
+ UidMappings []IDMap `json:"uid_mappings"`
+
+ // GidMappings is an array of Group ID mappings for User Namespaces
+ GidMappings []IDMap `json:"gid_mappings"`
+
+ // MaskPaths specifies paths within the container's rootfs to mask over with a bind
+ // mount pointing to /dev/null as to prevent reads of the file.
+ MaskPaths []string `json:"mask_paths"`
+
+ // ReadonlyPaths specifies paths within the container's rootfs to remount as read-only
+ // so that these files prevent any writes.
+ ReadonlyPaths []string `json:"readonly_paths"`
+
+ // Sysctl is a map of properties and their values. It is the equivalent of using
+ // sysctl -w my.property.name value in Linux.
+ Sysctl map[string]string `json:"sysctl"`
+
+ // Seccomp allows actions to be taken whenever a syscall is made within the container.
+ // A number of rules are given, each having an action to be taken if a syscall matches it.
+ // A default action to be taken if no rules match is also given.
+ Seccomp *Seccomp `json:"seccomp"`
+
+ // NoNewPrivileges controls whether processes in the container can gain additional privileges.
+ NoNewPrivileges bool `json:"no_new_privileges,omitempty"`
+
+ // Hooks are a collection of actions to perform at various container lifecycle events.
+ // CommandHooks are serialized to JSON, but other hooks are not.
+ Hooks *Hooks
+
+ // Version is the version of opencontainer specification that is supported.
+ Version string `json:"version"`
+
+ // Labels are user defined metadata that is stored in the config and populated on the state
+ Labels []string `json:"labels"`
+
+ // NoNewKeyring will not allocated a new session keyring for the container. It will use the
+ // callers keyring in this case.
+ NoNewKeyring bool `json:"no_new_keyring"`
+
+ // Rootless specifies whether the container is a rootless container.
+ Rootless bool `json:"rootless"`
+}
+
+type Hooks struct {
+ // Prestart commands are executed after the container namespaces are created,
+ // but before the user supplied command is executed from init.
+ Prestart []Hook
+
+ // Poststart commands are executed after the container init process starts.
+ Poststart []Hook
+
+ // Poststop commands are executed after the container init process exits.
+ Poststop []Hook
+}
+
+type Capabilities struct {
+ // Bounding is the set of capabilities checked by the kernel.
+ Bounding []string
+ // Effective is the set of capabilities checked by the kernel.
+ Effective []string
+ // Inheritable is the capabilities preserved across execve.
+ Inheritable []string
+ // Permitted is the limiting superset for effective capabilities.
+ Permitted []string
+ // Ambient is the ambient set of capabilities that are kept.
+ Ambient []string
+}
+
+func (hooks *Hooks) UnmarshalJSON(b []byte) error {
+ var state struct {
+ Prestart []CommandHook
+ Poststart []CommandHook
+ Poststop []CommandHook
+ }
+
+ if err := json.Unmarshal(b, &state); err != nil {
+ return err
+ }
+
+ deserialize := func(shooks []CommandHook) (hooks []Hook) {
+ for _, shook := range shooks {
+ hooks = append(hooks, shook)
+ }
+
+ return hooks
+ }
+
+ hooks.Prestart = deserialize(state.Prestart)
+ hooks.Poststart = deserialize(state.Poststart)
+ hooks.Poststop = deserialize(state.Poststop)
+ return nil
+}
+
+func (hooks Hooks) MarshalJSON() ([]byte, error) {
+ serialize := func(hooks []Hook) (serializableHooks []CommandHook) {
+ for _, hook := range hooks {
+ switch chook := hook.(type) {
+ case CommandHook:
+ serializableHooks = append(serializableHooks, chook)
+ default:
+ logrus.Warnf("cannot serialize hook of type %T, skipping", hook)
+ }
+ }
+
+ return serializableHooks
+ }
+
+ return json.Marshal(map[string]interface{}{
+ "prestart": serialize(hooks.Prestart),
+ "poststart": serialize(hooks.Poststart),
+ "poststop": serialize(hooks.Poststop),
+ })
+}
+
+// HookState is the payload provided to a hook on execution.
+type HookState specs.State
+
+type Hook interface {
+ // Run executes the hook with the provided state.
+ Run(HookState) error
+}
+
+// NewFunctionHook will call the provided function when the hook is run.
+func NewFunctionHook(f func(HookState) error) FuncHook {
+ return FuncHook{
+ run: f,
+ }
+}
+
+type FuncHook struct {
+ run func(HookState) error
+}
+
+func (f FuncHook) Run(s HookState) error {
+ return f.run(s)
+}
+
+type Command struct {
+ Path string `json:"path"`
+ Args []string `json:"args"`
+ Env []string `json:"env"`
+ Dir string `json:"dir"`
+ Timeout *time.Duration `json:"timeout"`
+}
+
+// NewCommandHook will execute the provided command when the hook is run.
+func NewCommandHook(cmd Command) CommandHook {
+ return CommandHook{
+ Command: cmd,
+ }
+}
+
+type CommandHook struct {
+ Command
+}
+
+func (c Command) Run(s HookState) error {
+ b, err := json.Marshal(s)
+ if err != nil {
+ return err
+ }
+ var stdout, stderr bytes.Buffer
+ cmd := exec.Cmd{
+ Path: c.Path,
+ Args: c.Args,
+ Env: c.Env,
+ Stdin: bytes.NewReader(b),
+ Stdout: &stdout,
+ Stderr: &stderr,
+ }
+ if err := cmd.Start(); err != nil {
+ return err
+ }
+ errC := make(chan error, 1)
+ go func() {
+ err := cmd.Wait()
+ if err != nil {
+ err = fmt.Errorf("error running hook: %v, stdout: %s, stderr: %s", err, stdout.String(), stderr.String())
+ }
+ errC <- err
+ }()
+ var timerCh <-chan time.Time
+ if c.Timeout != nil {
+ timer := time.NewTimer(*c.Timeout)
+ defer timer.Stop()
+ timerCh = timer.C
+ }
+ select {
+ case err := <-errC:
+ return err
+ case <-timerCh:
+ cmd.Process.Kill()
+ cmd.Wait()
+ return fmt.Errorf("hook ran past specified timeout of %.1fs", c.Timeout.Seconds())
+ }
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go
new file mode 100644
index 000000000..07da10804
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go
@@ -0,0 +1,61 @@
+package configs
+
+import "fmt"
+
+// HostUID gets the translated uid for the process on host which could be
+// different when user namespaces are enabled.
+func (c Config) HostUID(containerId int) (int, error) {
+ if c.Namespaces.Contains(NEWUSER) {
+ if c.UidMappings == nil {
+ return -1, fmt.Errorf("User namespaces enabled, but no uid mappings found.")
+ }
+ id, found := c.hostIDFromMapping(containerId, c.UidMappings)
+ if !found {
+ return -1, fmt.Errorf("User namespaces enabled, but no user mapping found.")
+ }
+ return id, nil
+ }
+ // Return unchanged id.
+ return containerId, nil
+}
+
+// HostRootUID gets the root uid for the process on host which could be non-zero
+// when user namespaces are enabled.
+func (c Config) HostRootUID() (int, error) {
+ return c.HostUID(0)
+}
+
+// HostGID gets the translated gid for the process on host which could be
+// different when user namespaces are enabled.
+func (c Config) HostGID(containerId int) (int, error) {
+ if c.Namespaces.Contains(NEWUSER) {
+ if c.GidMappings == nil {
+ return -1, fmt.Errorf("User namespaces enabled, but no gid mappings found.")
+ }
+ id, found := c.hostIDFromMapping(containerId, c.GidMappings)
+ if !found {
+ return -1, fmt.Errorf("User namespaces enabled, but no group mapping found.")
+ }
+ return id, nil
+ }
+ // Return unchanged id.
+ return containerId, nil
+}
+
+// HostRootGID gets the root gid for the process on host which could be non-zero
+// when user namespaces are enabled.
+func (c Config) HostRootGID() (int, error) {
+ return c.HostGID(0)
+}
+
+// Utility function that gets a host ID for a container ID from user namespace map
+// if that ID is present in the map.
+func (c Config) hostIDFromMapping(containerID int, uMap []IDMap) (int, bool) {
+ for _, m := range uMap {
+ if (containerID >= m.ContainerID) && (containerID <= (m.ContainerID + m.Size - 1)) {
+ hostID := m.HostID + (containerID - m.ContainerID)
+ return hostID, true
+ }
+ }
+ return -1, false
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go
new file mode 100644
index 000000000..8701bb212
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go
@@ -0,0 +1,57 @@
+package configs
+
+import (
+ "fmt"
+ "os"
+)
+
+const (
+ Wildcard = -1
+)
+
+// TODO Windows: This can be factored out in the future
+
+type Device struct {
+ // Device type, block, char, etc.
+ Type rune `json:"type"`
+
+ // Path to the device.
+ Path string `json:"path"`
+
+ // Major is the device's major number.
+ Major int64 `json:"major"`
+
+ // Minor is the device's minor number.
+ Minor int64 `json:"minor"`
+
+ // Cgroup permissions format, rwm.
+ Permissions string `json:"permissions"`
+
+ // FileMode permission bits for the device.
+ FileMode os.FileMode `json:"file_mode"`
+
+ // Uid of the device.
+ Uid uint32 `json:"uid"`
+
+ // Gid of the device.
+ Gid uint32 `json:"gid"`
+
+ // Write the file to the allowed list
+ Allow bool `json:"allow"`
+}
+
+func (d *Device) CgroupString() string {
+ return fmt.Sprintf("%c %s:%s %s", d.Type, deviceNumberString(d.Major), deviceNumberString(d.Minor), d.Permissions)
+}
+
+func (d *Device) Mkdev() int {
+ return int((d.Major << 8) | (d.Minor & 0xff) | ((d.Minor & 0xfff00) << 12))
+}
+
+// deviceNumberString converts the device number to a string return result.
+func deviceNumberString(number int64) string {
+ if number == Wildcard {
+ return "*"
+ }
+ return fmt.Sprint(number)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go
new file mode 100644
index 000000000..4d348d217
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go
@@ -0,0 +1,111 @@
+// +build linux freebsd
+
+package configs
+
+var (
+ // DefaultSimpleDevices are devices that are to be both allowed and created.
+ DefaultSimpleDevices = []*Device{
+ // /dev/null and zero
+ {
+ Path: "/dev/null",
+ Type: 'c',
+ Major: 1,
+ Minor: 3,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+ {
+ Path: "/dev/zero",
+ Type: 'c',
+ Major: 1,
+ Minor: 5,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+
+ {
+ Path: "/dev/full",
+ Type: 'c',
+ Major: 1,
+ Minor: 7,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+
+ // consoles and ttys
+ {
+ Path: "/dev/tty",
+ Type: 'c',
+ Major: 5,
+ Minor: 0,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+
+ // /dev/urandom,/dev/random
+ {
+ Path: "/dev/urandom",
+ Type: 'c',
+ Major: 1,
+ Minor: 9,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+ {
+ Path: "/dev/random",
+ Type: 'c',
+ Major: 1,
+ Minor: 8,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+ }
+ DefaultAllowedDevices = append([]*Device{
+ // allow mknod for any device
+ {
+ Type: 'c',
+ Major: Wildcard,
+ Minor: Wildcard,
+ Permissions: "m",
+ },
+ {
+ Type: 'b',
+ Major: Wildcard,
+ Minor: Wildcard,
+ Permissions: "m",
+ },
+
+ {
+ Path: "/dev/console",
+ Type: 'c',
+ Major: 5,
+ Minor: 1,
+ Permissions: "rwm",
+ },
+ // /dev/pts/ - pts namespaces are "coming soon"
+ {
+ Path: "",
+ Type: 'c',
+ Major: 136,
+ Minor: Wildcard,
+ Permissions: "rwm",
+ },
+ {
+ Path: "",
+ Type: 'c',
+ Major: 5,
+ Minor: 2,
+ Permissions: "rwm",
+ },
+
+ // tuntap
+ {
+ Path: "",
+ Type: 'c',
+ Major: 10,
+ Minor: 200,
+ Permissions: "rwm",
+ },
+ }, DefaultSimpleDevices...)
+ DefaultAutoCreatedDevices = append([]*Device{}, DefaultSimpleDevices...)
+)
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go
new file mode 100644
index 000000000..d30216380
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go
@@ -0,0 +1,9 @@
+package configs
+
+type HugepageLimit struct {
+ // which type of hugepage to limit.
+ Pagesize string `json:"page_size"`
+
+ // usage limit for hugepage.
+ Limit uint64 `json:"limit"`
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go
new file mode 100644
index 000000000..9a0395eaf
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go
@@ -0,0 +1,14 @@
+package configs
+
+import (
+ "fmt"
+)
+
+type IfPrioMap struct {
+ Interface string `json:"interface"`
+ Priority int64 `json:"priority"`
+}
+
+func (i *IfPrioMap) CgroupString() string {
+ return fmt.Sprintf("%s %d", i.Interface, i.Priority)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go
new file mode 100644
index 000000000..670757ddb
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go
@@ -0,0 +1,39 @@
+package configs
+
+const (
+ // EXT_COPYUP is a directive to copy up the contents of a directory when
+ // a tmpfs is mounted over it.
+ EXT_COPYUP = 1 << iota
+)
+
+type Mount struct {
+ // Source path for the mount.
+ Source string `json:"source"`
+
+ // Destination path for the mount inside the container.
+ Destination string `json:"destination"`
+
+ // Device the mount is for.
+ Device string `json:"device"`
+
+ // Mount flags.
+ Flags int `json:"flags"`
+
+ // Propagation Flags
+ PropagationFlags []int `json:"propagation_flags"`
+
+ // Mount data applied to the mount.
+ Data string `json:"data"`
+
+ // Relabel source if set, "z" indicates shared, "Z" indicates unshared.
+ Relabel string `json:"relabel"`
+
+ // Extensions are additional flags that are specific to runc.
+ Extensions int `json:"extensions"`
+
+ // Optional Command to be run before Source is mounted.
+ PremountCmds []Command `json:"premount_cmds"`
+
+ // Optional Command to be run after Source is mounted.
+ PostmountCmds []Command `json:"postmount_cmds"`
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go
new file mode 100644
index 000000000..a3329a31a
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go
@@ -0,0 +1,5 @@
+package configs
+
+type NamespaceType string
+
+type Namespaces []Namespace
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go
new file mode 100644
index 000000000..5fc171a57
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go
@@ -0,0 +1,122 @@
+package configs
+
+import (
+ "fmt"
+ "os"
+ "sync"
+)
+
+const (
+ NEWNET NamespaceType = "NEWNET"
+ NEWPID NamespaceType = "NEWPID"
+ NEWNS NamespaceType = "NEWNS"
+ NEWUTS NamespaceType = "NEWUTS"
+ NEWIPC NamespaceType = "NEWIPC"
+ NEWUSER NamespaceType = "NEWUSER"
+)
+
+var (
+ nsLock sync.Mutex
+ supportedNamespaces = make(map[NamespaceType]bool)
+)
+
+// NsName converts the namespace type to its filename
+func NsName(ns NamespaceType) string {
+ switch ns {
+ case NEWNET:
+ return "net"
+ case NEWNS:
+ return "mnt"
+ case NEWPID:
+ return "pid"
+ case NEWIPC:
+ return "ipc"
+ case NEWUSER:
+ return "user"
+ case NEWUTS:
+ return "uts"
+ }
+ return ""
+}
+
+// IsNamespaceSupported returns whether a namespace is available or
+// not
+func IsNamespaceSupported(ns NamespaceType) bool {
+ nsLock.Lock()
+ defer nsLock.Unlock()
+ supported, ok := supportedNamespaces[ns]
+ if ok {
+ return supported
+ }
+ nsFile := NsName(ns)
+ // if the namespace type is unknown, just return false
+ if nsFile == "" {
+ return false
+ }
+ _, err := os.Stat(fmt.Sprintf("/proc/self/ns/%s", nsFile))
+ // a namespace is supported if it exists and we have permissions to read it
+ supported = err == nil
+ supportedNamespaces[ns] = supported
+ return supported
+}
+
+func NamespaceTypes() []NamespaceType {
+ return []NamespaceType{
+ NEWUSER, // Keep user NS always first, don't move it.
+ NEWIPC,
+ NEWUTS,
+ NEWNET,
+ NEWPID,
+ NEWNS,
+ }
+}
+
+// Namespace defines configuration for each namespace. It specifies an
+// alternate path that is able to be joined via setns.
+type Namespace struct {
+ Type NamespaceType `json:"type"`
+ Path string `json:"path"`
+}
+
+func (n *Namespace) GetPath(pid int) string {
+ return fmt.Sprintf("/proc/%d/ns/%s", pid, NsName(n.Type))
+}
+
+func (n *Namespaces) Remove(t NamespaceType) bool {
+ i := n.index(t)
+ if i == -1 {
+ return false
+ }
+ *n = append((*n)[:i], (*n)[i+1:]...)
+ return true
+}
+
+func (n *Namespaces) Add(t NamespaceType, path string) {
+ i := n.index(t)
+ if i == -1 {
+ *n = append(*n, Namespace{Type: t, Path: path})
+ return
+ }
+ (*n)[i].Path = path
+}
+
+func (n *Namespaces) index(t NamespaceType) int {
+ for i, ns := range *n {
+ if ns.Type == t {
+ return i
+ }
+ }
+ return -1
+}
+
+func (n *Namespaces) Contains(t NamespaceType) bool {
+ return n.index(t) != -1
+}
+
+func (n *Namespaces) PathOf(t NamespaceType) string {
+ i := n.index(t)
+ if i == -1 {
+ return ""
+ }
+ return (*n)[i].Path
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go
new file mode 100644
index 000000000..4ce6813d2
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go
@@ -0,0 +1,31 @@
+// +build linux
+
+package configs
+
+import "golang.org/x/sys/unix"
+
+func (n *Namespace) Syscall() int {
+ return namespaceInfo[n.Type]
+}
+
+var namespaceInfo = map[NamespaceType]int{
+ NEWNET: unix.CLONE_NEWNET,
+ NEWNS: unix.CLONE_NEWNS,
+ NEWUSER: unix.CLONE_NEWUSER,
+ NEWIPC: unix.CLONE_NEWIPC,
+ NEWUTS: unix.CLONE_NEWUTS,
+ NEWPID: unix.CLONE_NEWPID,
+}
+
+// CloneFlags parses the container's Namespaces options to set the correct
+// flags on clone, unshare. This function returns flags only for new namespaces.
+func (n *Namespaces) CloneFlags() uintptr {
+ var flag int
+ for _, v := range *n {
+ if v.Path != "" {
+ continue
+ }
+ flag |= namespaceInfo[v.Type]
+ }
+ return uintptr(flag)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go
new file mode 100644
index 000000000..5d9a5c81f
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go
@@ -0,0 +1,13 @@
+// +build !linux,!windows
+
+package configs
+
+func (n *Namespace) Syscall() int {
+ panic("No namespace syscall support")
+}
+
+// CloneFlags parses the container's Namespaces options to set the correct
+// flags on clone, unshare. This function returns flags only for new namespaces.
+func (n *Namespaces) CloneFlags() uintptr {
+ panic("No namespace syscall support")
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go
new file mode 100644
index 000000000..19bf713de
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go
@@ -0,0 +1,8 @@
+// +build !linux
+
+package configs
+
+// Namespace defines configuration for each namespace. It specifies an
+// alternate path that is able to be joined via setns.
+type Namespace struct {
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go
new file mode 100644
index 000000000..ccdb228e1
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go
@@ -0,0 +1,72 @@
+package configs
+
+// Network defines configuration for a container's networking stack
+//
+// The network configuration can be omitted from a container causing the
+// container to be setup with the host's networking stack
+type Network struct {
+ // Type sets the networks type, commonly veth and loopback
+ Type string `json:"type"`
+
+ // Name of the network interface
+ Name string `json:"name"`
+
+ // The bridge to use.
+ Bridge string `json:"bridge"`
+
+ // MacAddress contains the MAC address to set on the network interface
+ MacAddress string `json:"mac_address"`
+
+ // Address contains the IPv4 and mask to set on the network interface
+ Address string `json:"address"`
+
+ // Gateway sets the gateway address that is used as the default for the interface
+ Gateway string `json:"gateway"`
+
+ // IPv6Address contains the IPv6 and mask to set on the network interface
+ IPv6Address string `json:"ipv6_address"`
+
+ // IPv6Gateway sets the ipv6 gateway address that is used as the default for the interface
+ IPv6Gateway string `json:"ipv6_gateway"`
+
+ // Mtu sets the mtu value for the interface and will be mirrored on both the host and
+ // container's interfaces if a pair is created, specifically in the case of type veth
+ // Note: This does not apply to loopback interfaces.
+ Mtu int `json:"mtu"`
+
+ // TxQueueLen sets the tx_queuelen value for the interface and will be mirrored on both the host and
+ // container's interfaces if a pair is created, specifically in the case of type veth
+ // Note: This does not apply to loopback interfaces.
+ TxQueueLen int `json:"txqueuelen"`
+
+ // HostInterfaceName is a unique name of a veth pair that resides on in the host interface of the
+ // container.
+ HostInterfaceName string `json:"host_interface_name"`
+
+ // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
+ // bridge port in the case of type veth
+ // Note: This is unsupported on some systems.
+ // Note: This does not apply to loopback interfaces.
+ HairpinMode bool `json:"hairpin_mode"`
+}
+
+// Routes can be specified to create entries in the route table as the container is started
+//
+// All of destination, source, and gateway should be either IPv4 or IPv6.
+// One of the three options must be present, and omitted entries will use their
+// IP family default for the route table. For IPv4 for example, setting the
+// gateway to 1.2.3.4 and the interface to eth0 will set up a standard
+// destination of 0.0.0.0(or *) when viewed in the route table.
+type Route struct {
+ // Sets the destination and mask, should be a CIDR. Accepts IPv4 and IPv6
+ Destination string `json:"destination"`
+
+ // Sets the source and mask, should be a CIDR. Accepts IPv4 and IPv6
+ Source string `json:"source"`
+
+ // Sets the gateway. Accepts IPv4 and IPv6
+ Gateway string `json:"gateway"`
+
+ // The device to set this route up for, for example: eth0
+ InterfaceName string `json:"interface_name"`
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/rootless.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/rootless.go
new file mode 100644
index 000000000..0cebfaf80
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/rootless.go
@@ -0,0 +1,117 @@
+package validate
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+var (
+ geteuid = os.Geteuid
+ getegid = os.Getegid
+)
+
+func (v *ConfigValidator) rootless(config *configs.Config) error {
+ if err := rootlessMappings(config); err != nil {
+ return err
+ }
+ if err := rootlessMount(config); err != nil {
+ return err
+ }
+ // Currently, cgroups cannot effectively be used in rootless containers.
+ // The new cgroup namespace doesn't really help us either because it doesn't
+ // have nice interactions with the user namespace (we're working with upstream
+ // to fix this).
+ if err := rootlessCgroup(config); err != nil {
+ return err
+ }
+
+ // XXX: We currently can't verify the user config at all, because
+ // configs.Config doesn't store the user-related configs. So this
+ // has to be verified by setupUser() in init_linux.go.
+
+ return nil
+}
+
+func rootlessMappings(config *configs.Config) error {
+ rootuid, err := config.HostRootUID()
+ if err != nil {
+ return fmt.Errorf("failed to get root uid from uidMappings: %v", err)
+ }
+ if euid := geteuid(); euid != 0 {
+ if !config.Namespaces.Contains(configs.NEWUSER) {
+ return fmt.Errorf("rootless containers require user namespaces")
+ }
+ if rootuid != euid {
+ return fmt.Errorf("rootless containers cannot map container root to a different host user")
+ }
+ }
+
+ rootgid, err := config.HostRootGID()
+ if err != nil {
+ return fmt.Errorf("failed to get root gid from gidMappings: %v", err)
+ }
+
+ // Similar to the above test, we need to make sure that we aren't trying to
+ // map to a group ID that we don't have the right to be.
+ if rootgid != getegid() {
+ return fmt.Errorf("rootless containers cannot map container root to a different host group")
+ }
+
+ // We can only map one user and group inside a container (our own).
+ if len(config.UidMappings) != 1 || config.UidMappings[0].Size != 1 {
+ return fmt.Errorf("rootless containers cannot map more than one user")
+ }
+ if len(config.GidMappings) != 1 || config.GidMappings[0].Size != 1 {
+ return fmt.Errorf("rootless containers cannot map more than one group")
+ }
+
+ return nil
+}
+
+// cgroup verifies that the user isn't trying to set any cgroup limits or paths.
+func rootlessCgroup(config *configs.Config) error {
+ // Nothing set at all.
+ if config.Cgroups == nil || config.Cgroups.Resources == nil {
+ return nil
+ }
+
+ // Used for comparing to the zero value.
+ left := reflect.ValueOf(*config.Cgroups.Resources)
+ right := reflect.Zero(left.Type())
+
+ // This is all we need to do, since specconv won't add cgroup options in
+ // rootless mode.
+ if !reflect.DeepEqual(left.Interface(), right.Interface()) {
+ return fmt.Errorf("cannot specify resource limits in rootless container")
+ }
+
+ return nil
+}
+
+// mount verifies that the user isn't trying to set up any mounts they don't have
+// the rights to do. In addition, it makes sure that no mount has a `uid=` or
+// `gid=` option that doesn't resolve to root.
+func rootlessMount(config *configs.Config) error {
+ // XXX: We could whitelist allowed devices at this point, but I'm not
+ // convinced that's a good idea. The kernel is the best arbiter of
+ // access control.
+
+ for _, mount := range config.Mounts {
+ // Check that the options list doesn't contain any uid= or gid= entries
+ // that don't resolve to root.
+ for _, opt := range strings.Split(mount.Data, ",") {
+ if strings.HasPrefix(opt, "uid=") && opt != "uid=0" {
+ return fmt.Errorf("cannot specify uid= mount options in rootless containers where argument isn't 0")
+ }
+ if strings.HasPrefix(opt, "gid=") && opt != "gid=0" {
+ return fmt.Errorf("cannot specify gid= mount options in rootless containers where argument isn't 0")
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go
new file mode 100644
index 000000000..828434544
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go
@@ -0,0 +1,195 @@
+package validate
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+ selinux "github.com/opencontainers/selinux/go-selinux"
+)
+
+type Validator interface {
+ Validate(*configs.Config) error
+}
+
+func New() Validator {
+ return &ConfigValidator{}
+}
+
+type ConfigValidator struct {
+}
+
+func (v *ConfigValidator) Validate(config *configs.Config) error {
+ if err := v.rootfs(config); err != nil {
+ return err
+ }
+ if err := v.network(config); err != nil {
+ return err
+ }
+ if err := v.hostname(config); err != nil {
+ return err
+ }
+ if err := v.security(config); err != nil {
+ return err
+ }
+ if err := v.usernamespace(config); err != nil {
+ return err
+ }
+ if err := v.sysctl(config); err != nil {
+ return err
+ }
+ if config.Rootless {
+ if err := v.rootless(config); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// rootfs validates if the rootfs is an absolute path and is not a symlink
+// to the container's root filesystem.
+func (v *ConfigValidator) rootfs(config *configs.Config) error {
+ if _, err := os.Stat(config.Rootfs); err != nil {
+ if os.IsNotExist(err) {
+ return fmt.Errorf("rootfs (%s) does not exist", config.Rootfs)
+ }
+ return err
+ }
+ cleaned, err := filepath.Abs(config.Rootfs)
+ if err != nil {
+ return err
+ }
+ if cleaned, err = filepath.EvalSymlinks(cleaned); err != nil {
+ return err
+ }
+ if filepath.Clean(config.Rootfs) != cleaned {
+ return fmt.Errorf("%s is not an absolute path or is a symlink", config.Rootfs)
+ }
+ return nil
+}
+
+func (v *ConfigValidator) network(config *configs.Config) error {
+ if !config.Namespaces.Contains(configs.NEWNET) {
+ if len(config.Networks) > 0 || len(config.Routes) > 0 {
+ return fmt.Errorf("unable to apply network settings without a private NET namespace")
+ }
+ }
+ return nil
+}
+
+func (v *ConfigValidator) hostname(config *configs.Config) error {
+ if config.Hostname != "" && !config.Namespaces.Contains(configs.NEWUTS) {
+ return fmt.Errorf("unable to set hostname without a private UTS namespace")
+ }
+ return nil
+}
+
+func (v *ConfigValidator) security(config *configs.Config) error {
+ // restrict sys without mount namespace
+ if (len(config.MaskPaths) > 0 || len(config.ReadonlyPaths) > 0) &&
+ !config.Namespaces.Contains(configs.NEWNS) {
+ return fmt.Errorf("unable to restrict sys entries without a private MNT namespace")
+ }
+ if config.ProcessLabel != "" && !selinux.GetEnabled() {
+ return fmt.Errorf("selinux label is specified in config, but selinux is disabled or not supported")
+ }
+
+ return nil
+}
+
+func (v *ConfigValidator) usernamespace(config *configs.Config) error {
+ if config.Namespaces.Contains(configs.NEWUSER) {
+ if _, err := os.Stat("/proc/self/ns/user"); os.IsNotExist(err) {
+ return fmt.Errorf("USER namespaces aren't enabled in the kernel")
+ }
+ } else {
+ if config.UidMappings != nil || config.GidMappings != nil {
+ return fmt.Errorf("User namespace mappings specified, but USER namespace isn't enabled in the config")
+ }
+ }
+ return nil
+}
+
+// sysctl validates that the specified sysctl keys are valid or not.
+// /proc/sys isn't completely namespaced and depending on which namespaces
+// are specified, a subset of sysctls are permitted.
+func (v *ConfigValidator) sysctl(config *configs.Config) error {
+ validSysctlMap := map[string]bool{
+ "kernel.msgmax": true,
+ "kernel.msgmnb": true,
+ "kernel.msgmni": true,
+ "kernel.sem": true,
+ "kernel.shmall": true,
+ "kernel.shmmax": true,
+ "kernel.shmmni": true,
+ "kernel.shm_rmid_forced": true,
+ }
+
+ for s := range config.Sysctl {
+ if validSysctlMap[s] || strings.HasPrefix(s, "fs.mqueue.") {
+ if config.Namespaces.Contains(configs.NEWIPC) {
+ continue
+ } else {
+ return fmt.Errorf("sysctl %q is not allowed in the hosts ipc namespace", s)
+ }
+ }
+ if strings.HasPrefix(s, "net.") {
+ if config.Namespaces.Contains(configs.NEWNET) {
+ if path := config.Namespaces.PathOf(configs.NEWNET); path != "" {
+ if err := checkHostNs(s, path); err != nil {
+ return err
+ }
+ }
+ continue
+ } else {
+ return fmt.Errorf("sysctl %q is not allowed in the hosts network namespace", s)
+ }
+ }
+ return fmt.Errorf("sysctl %q is not in a separate kernel namespace", s)
+ }
+
+ return nil
+}
+
+func isSymbolicLink(path string) (bool, error) {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return false, err
+ }
+
+ return fi.Mode()&os.ModeSymlink == os.ModeSymlink, nil
+}
+
+// checkHostNs checks whether network sysctl is used in host namespace.
+func checkHostNs(sysctlConfig string, path string) error {
+ var currentProcessNetns = "/proc/self/ns/net"
+ // readlink on the current processes network namespace
+ destOfCurrentProcess, err := os.Readlink(currentProcessNetns)
+ if err != nil {
+ return fmt.Errorf("read soft link %q error", currentProcessNetns)
+ }
+
+ // First check if the provided path is a symbolic link
+ symLink, err := isSymbolicLink(path)
+ if err != nil {
+ return fmt.Errorf("could not check that %q is a symlink: %v", path, err)
+ }
+
+ if symLink == false {
+ // The provided namespace is not a symbolic link,
+ // it is not the host namespace.
+ return nil
+ }
+
+ // readlink on the path provided in the struct
+ destOfContainer, err := os.Readlink(path)
+ if err != nil {
+ return fmt.Errorf("read soft link %q error", path)
+ }
+ if destOfContainer == destOfCurrentProcess {
+ return fmt.Errorf("sysctl %q is not allowed in the hosts network namespace", sysctlConfig)
+ }
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console.go b/vendor/github.com/opencontainers/runc/libcontainer/console.go
new file mode 100644
index 000000000..917acc702
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/console.go
@@ -0,0 +1,17 @@
+package libcontainer
+
+import (
+ "io"
+ "os"
+)
+
+// Console represents a pseudo TTY.
+type Console interface {
+ io.ReadWriteCloser
+
+ // Path returns the filesystem path to the slave side of the pty.
+ Path() string
+
+ // Fd returns the fd for the master of the pty.
+ File() *os.File
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go b/vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go
new file mode 100644
index 000000000..b7166a31f
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go
@@ -0,0 +1,13 @@
+// +build freebsd
+
+package libcontainer
+
+import (
+ "errors"
+)
+
+// newConsole returns an initialized console that can be used within a container by copying bytes
+// from the master side to the slave that is attached as the tty for the container's init process.
+func newConsole() (Console, error) {
+ return nil, errors.New("libcontainer console is not supported on FreeBSD")
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go
new file mode 100644
index 000000000..f70de3848
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go
@@ -0,0 +1,152 @@
+package libcontainer
+
+import (
+ "fmt"
+ "os"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+func ConsoleFromFile(f *os.File) Console {
+ return &linuxConsole{
+ master: f,
+ }
+}
+
+// newConsole returns an initialized console that can be used within a container by copying bytes
+// from the master side to the slave that is attached as the tty for the container's init process.
+func newConsole() (Console, error) {
+ master, err := os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return nil, err
+ }
+ console, err := ptsname(master)
+ if err != nil {
+ return nil, err
+ }
+ if err := unlockpt(master); err != nil {
+ return nil, err
+ }
+ return &linuxConsole{
+ slavePath: console,
+ master: master,
+ }, nil
+}
+
+// linuxConsole is a linux pseudo TTY for use within a container.
+type linuxConsole struct {
+ master *os.File
+ slavePath string
+}
+
+func (c *linuxConsole) File() *os.File {
+ return c.master
+}
+
+func (c *linuxConsole) Path() string {
+ return c.slavePath
+}
+
+func (c *linuxConsole) Read(b []byte) (int, error) {
+ return c.master.Read(b)
+}
+
+func (c *linuxConsole) Write(b []byte) (int, error) {
+ return c.master.Write(b)
+}
+
+func (c *linuxConsole) Close() error {
+ if m := c.master; m != nil {
+ return m.Close()
+ }
+ return nil
+}
+
+// mount initializes the console inside the rootfs mounting with the specified mount label
+// and applying the correct ownership of the console.
+func (c *linuxConsole) mount() error {
+ oldMask := unix.Umask(0000)
+ defer unix.Umask(oldMask)
+ f, err := os.Create("/dev/console")
+ if err != nil && !os.IsExist(err) {
+ return err
+ }
+ if f != nil {
+ f.Close()
+ }
+ return unix.Mount(c.slavePath, "/dev/console", "bind", unix.MS_BIND, "")
+}
+
+// dupStdio opens the slavePath for the console and dups the fds to the current
+// processes stdio, fd 0,1,2.
+func (c *linuxConsole) dupStdio() error {
+ slave, err := c.open(unix.O_RDWR)
+ if err != nil {
+ return err
+ }
+ fd := int(slave.Fd())
+ for _, i := range []int{0, 1, 2} {
+ if err := unix.Dup3(fd, i, 0); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// open is a clone of os.OpenFile without the O_CLOEXEC used to open the pty slave.
+func (c *linuxConsole) open(flag int) (*os.File, error) {
+ r, e := unix.Open(c.slavePath, flag, 0)
+ if e != nil {
+ return nil, &os.PathError{
+ Op: "open",
+ Path: c.slavePath,
+ Err: e,
+ }
+ }
+ return os.NewFile(uintptr(r), c.slavePath), nil
+}
+
+func ioctl(fd uintptr, flag, data uintptr) error {
+ if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 {
+ return err
+ }
+ return nil
+}
+
+// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
+// unlockpt should be called before opening the slave side of a pty.
+func unlockpt(f *os.File) error {
+ var u int32
+ return ioctl(f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u)))
+}
+
+// ptsname retrieves the name of the first available pts for the given master.
+func ptsname(f *os.File) (string, error) {
+ n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN)
+ if err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("/dev/pts/%d", n), nil
+}
+
+// SaneTerminal sets the necessary tty_ioctl(4)s to ensure that a pty pair
+// created by us acts normally. In particular, a not-very-well-known default of
+// Linux unix98 ptys is that they have +onlcr by default. While this isn't a
+// problem for terminal emulators, because we relay data from the terminal we
+// also relay that funky line discipline.
+func SaneTerminal(terminal *os.File) error {
+ termios, err := unix.IoctlGetTermios(int(terminal.Fd()), unix.TCGETS)
+ if err != nil {
+ return fmt.Errorf("ioctl(tty, tcgets): %s", err.Error())
+ }
+
+ // Set -onlcr so we don't have to deal with \r.
+ termios.Oflag &^= unix.ONLCR
+
+ if err := unix.IoctlSetTermios(int(terminal.Fd()), unix.TCSETS, termios); err != nil {
+ return fmt.Errorf("ioctl(tty, tcsets): %s", err.Error())
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go b/vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go
new file mode 100644
index 000000000..e5ca54599
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go
@@ -0,0 +1,11 @@
+package libcontainer
+
+import (
+ "errors"
+)
+
+// newConsole returns an initialized console that can be used within a container by copying bytes
+// from the master side to the slave that is attached as the tty for the container's init process.
+func newConsole() (Console, error) {
+ return nil, errors.New("libcontainer console is not supported on Solaris")
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/console_windows.go
new file mode 100644
index 000000000..c61e866a5
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/console_windows.go
@@ -0,0 +1,30 @@
+package libcontainer
+
+// newConsole returns an initialized console that can be used within a container
+func newConsole() (Console, error) {
+ return &windowsConsole{}, nil
+}
+
+// windowsConsole is a Windows pseudo TTY for use within a container.
+type windowsConsole struct {
+}
+
+func (c *windowsConsole) Fd() uintptr {
+ return 0
+}
+
+func (c *windowsConsole) Path() string {
+ return ""
+}
+
+func (c *windowsConsole) Read(b []byte) (int, error) {
+ return 0, nil
+}
+
+func (c *windowsConsole) Write(b []byte) (int, error) {
+ return 0, nil
+}
+
+func (c *windowsConsole) Close() error {
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container.go b/vendor/github.com/opencontainers/runc/libcontainer/container.go
new file mode 100644
index 000000000..2e31b4d4f
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/container.go
@@ -0,0 +1,166 @@
+// Package libcontainer provides a native Go implementation for creating containers
+// with namespaces, cgroups, capabilities, and filesystem access controls.
+// It allows you to manage the lifecycle of the container performing additional operations
+// after the container is created.
+package libcontainer
+
+import (
+ "os"
+ "time"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+// Status is the status of a container.
+type Status int
+
+const (
+ // Created is the status that denotes the container exists but has not been run yet.
+ Created Status = iota
+ // Running is the status that denotes the container exists and is running.
+ Running
+ // Pausing is the status that denotes the container exists, it is in the process of being paused.
+ Pausing
+ // Paused is the status that denotes the container exists, but all its processes are paused.
+ Paused
+ // Stopped is the status that denotes the container does not have a created or running process.
+ Stopped
+)
+
+func (s Status) String() string {
+ switch s {
+ case Created:
+ return "created"
+ case Running:
+ return "running"
+ case Pausing:
+ return "pausing"
+ case Paused:
+ return "paused"
+ case Stopped:
+ return "stopped"
+ default:
+ return "unknown"
+ }
+}
+
+// BaseState represents the platform agnostic pieces relating to a
+// running container's state
+type BaseState struct {
+ // ID is the container ID.
+ ID string `json:"id"`
+
+ // InitProcessPid is the init process id in the parent namespace.
+ InitProcessPid int `json:"init_process_pid"`
+
+ // InitProcessStartTime is the init process start time in clock cycles since boot time.
+ InitProcessStartTime uint64 `json:"init_process_start"`
+
+ // Created is the unix timestamp for the creation time of the container in UTC
+ Created time.Time `json:"created"`
+
+ // Config is the container's configuration.
+ Config configs.Config `json:"config"`
+}
+
+// BaseContainer is a libcontainer container object.
+//
+// Each container is thread-safe within the same process. Since a container can
+// be destroyed by a separate process, any function may return that the container
+// was not found. BaseContainer includes methods that are platform agnostic.
+type BaseContainer interface {
+ // Returns the ID of the container
+ ID() string
+
+ // Returns the current status of the container.
+ //
+ // errors:
+ // ContainerNotExists - Container no longer exists,
+ // Systemerror - System error.
+ Status() (Status, error)
+
+ // State returns the current container's state information.
+ //
+ // errors:
+ // SystemError - System error.
+ State() (*State, error)
+
+ // Returns the current config of the container.
+ Config() configs.Config
+
+ // Returns the PIDs inside this container. The PIDs are in the namespace of the calling process.
+ //
+ // errors:
+ // ContainerNotExists - Container no longer exists,
+ // Systemerror - System error.
+ //
+ // Some of the returned PIDs may no longer refer to processes in the Container, unless
+ // the Container state is PAUSED in which case every PID in the slice is valid.
+ Processes() ([]int, error)
+
+ // Returns statistics for the container.
+ //
+ // errors:
+ // ContainerNotExists - Container no longer exists,
+ // Systemerror - System error.
+ Stats() (*Stats, error)
+
+ // Set resources of container as configured
+ //
+ // We can use this to change resources when containers are running.
+ //
+ // errors:
+ // SystemError - System error.
+ Set(config configs.Config) error
+
+ // Start a process inside the container. Returns error if process fails to
+ // start. You can track process lifecycle with passed Process structure.
+ //
+ // errors:
+ // ContainerNotExists - Container no longer exists,
+ // ConfigInvalid - config is invalid,
+ // ContainerPaused - Container is paused,
+ // SystemError - System error.
+ Start(process *Process) (err error)
+
+ // Run immediately starts the process inside the container. Returns error if process
+ // fails to start. It does not block waiting for the exec fifo after start returns but
+ // opens the fifo after start returns.
+ //
+ // errors:
+ // ContainerNotExists - Container no longer exists,
+ // ConfigInvalid - config is invalid,
+ // ContainerPaused - Container is paused,
+ // SystemError - System error.
+ Run(process *Process) (err error)
+
+ // Destroys the container, if its in a valid state, after killing any
+ // remaining running processes.
+ //
+ // Any event registrations are removed before the container is destroyed.
+ // No error is returned if the container is already destroyed.
+ //
+ // Running containers must first be stopped using Signal(..).
+ // Paused containers must first be resumed using Resume(..).
+ //
+ // errors:
+ // ContainerNotStopped - Container is still running,
+ // ContainerPaused - Container is paused,
+ // SystemError - System error.
+ Destroy() error
+
+ // Signal sends the provided signal code to the container's initial process.
+ //
+ // If all is specified the signal is sent to all processes in the container
+ // including the initial process.
+ //
+ // errors:
+ // SystemError - System error.
+ Signal(s os.Signal, all bool) error
+
+ // Exec signals the container to exec the users process at the end of the init.
+ //
+ // errors:
+ // SystemError - System error.
+ Exec() error
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go
new file mode 100644
index 000000000..d7e7516e5
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go
@@ -0,0 +1,1654 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "sync"
+ "syscall" // only for SysProcAttr and Signal
+ "time"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/criurpc"
+ "github.com/opencontainers/runc/libcontainer/system"
+ "github.com/opencontainers/runc/libcontainer/utils"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/sirupsen/logrus"
+ "github.com/syndtr/gocapability/capability"
+ "github.com/vishvananda/netlink/nl"
+ "golang.org/x/sys/unix"
+)
+
+const stdioFdCount = 3
+
+type linuxContainer struct {
+ id string
+ root string
+ config *configs.Config
+ cgroupManager cgroups.Manager
+ initArgs []string
+ initProcess parentProcess
+ initProcessStartTime uint64
+ criuPath string
+ m sync.Mutex
+ criuVersion int
+ state containerState
+ created time.Time
+}
+
+// State represents a running container's state
+type State struct {
+ BaseState
+
+ // Platform specific fields below here
+
+ // Specifies if the container was started under the rootless mode.
+ Rootless bool `json:"rootless"`
+
+ // Path to all the cgroups setup for a container. Key is cgroup subsystem name
+ // with the value as the path.
+ CgroupPaths map[string]string `json:"cgroup_paths"`
+
+ // NamespacePaths are filepaths to the container's namespaces. Key is the namespace type
+ // with the value as the path.
+ NamespacePaths map[configs.NamespaceType]string `json:"namespace_paths"`
+
+ // Container's standard descriptors (std{in,out,err}), needed for checkpoint and restore
+ ExternalDescriptors []string `json:"external_descriptors,omitempty"`
+}
+
+// Container is a libcontainer container object.
+//
+// Each container is thread-safe within the same process. Since a container can
+// be destroyed by a separate process, any function may return that the container
+// was not found.
+type Container interface {
+ BaseContainer
+
+ // Methods below here are platform specific
+
+ // Checkpoint checkpoints the running container's state to disk using the criu(8) utility.
+ //
+ // errors:
+ // Systemerror - System error.
+ Checkpoint(criuOpts *CriuOpts) error
+
+ // Restore restores the checkpointed container to a running state using the criu(8) utility.
+ //
+ // errors:
+ // Systemerror - System error.
+ Restore(process *Process, criuOpts *CriuOpts) error
+
+ // If the Container state is RUNNING or CREATED, sets the Container state to PAUSING and pauses
+ // the execution of any user processes. Asynchronously, when the container finished being paused the
+ // state is changed to PAUSED.
+ // If the Container state is PAUSED, do nothing.
+ //
+ // errors:
+ // ContainerNotExists - Container no longer exists,
+ // ContainerNotRunning - Container not running or created,
+ // Systemerror - System error.
+ Pause() error
+
+ // If the Container state is PAUSED, resumes the execution of any user processes in the
+ // Container before setting the Container state to RUNNING.
+ // If the Container state is RUNNING, do nothing.
+ //
+ // errors:
+ // ContainerNotExists - Container no longer exists,
+ // ContainerNotPaused - Container is not paused,
+ // Systemerror - System error.
+ Resume() error
+
+ // NotifyOOM returns a read-only channel signaling when the container receives an OOM notification.
+ //
+ // errors:
+ // Systemerror - System error.
+ NotifyOOM() (<-chan struct{}, error)
+
+ // NotifyMemoryPressure returns a read-only channel signaling when the container reaches a given pressure level
+ //
+ // errors:
+ // Systemerror - System error.
+ NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error)
+}
+
+// ID returns the container's unique ID
+func (c *linuxContainer) ID() string {
+ return c.id
+}
+
+// Config returns the container's configuration
+func (c *linuxContainer) Config() configs.Config {
+ return *c.config
+}
+
+func (c *linuxContainer) Status() (Status, error) {
+ c.m.Lock()
+ defer c.m.Unlock()
+ return c.currentStatus()
+}
+
+func (c *linuxContainer) State() (*State, error) {
+ c.m.Lock()
+ defer c.m.Unlock()
+ return c.currentState()
+}
+
+func (c *linuxContainer) Processes() ([]int, error) {
+ pids, err := c.cgroupManager.GetAllPids()
+ if err != nil {
+ return nil, newSystemErrorWithCause(err, "getting all container pids from cgroups")
+ }
+ return pids, nil
+}
+
+func (c *linuxContainer) Stats() (*Stats, error) {
+ var (
+ err error
+ stats = &Stats{}
+ )
+ if stats.CgroupStats, err = c.cgroupManager.GetStats(); err != nil {
+ return stats, newSystemErrorWithCause(err, "getting container stats from cgroups")
+ }
+ for _, iface := range c.config.Networks {
+ switch iface.Type {
+ case "veth":
+ istats, err := getNetworkInterfaceStats(iface.HostInterfaceName)
+ if err != nil {
+ return stats, newSystemErrorWithCausef(err, "getting network stats for interface %q", iface.HostInterfaceName)
+ }
+ stats.Interfaces = append(stats.Interfaces, istats)
+ }
+ }
+ return stats, nil
+}
+
+func (c *linuxContainer) Set(config configs.Config) error {
+ c.m.Lock()
+ defer c.m.Unlock()
+ status, err := c.currentStatus()
+ if err != nil {
+ return err
+ }
+ if status == Stopped {
+ return newGenericError(fmt.Errorf("container not running"), ContainerNotRunning)
+ }
+ c.config = &config
+ return c.cgroupManager.Set(c.config)
+}
+
+func (c *linuxContainer) Start(process *Process) error {
+ c.m.Lock()
+ defer c.m.Unlock()
+ status, err := c.currentStatus()
+ if err != nil {
+ return err
+ }
+ if status == Stopped {
+ if err := c.createExecFifo(); err != nil {
+ return err
+ }
+ }
+ if err := c.start(process, status == Stopped); err != nil {
+ if status == Stopped {
+ c.deleteExecFifo()
+ }
+ return err
+ }
+ return nil
+}
+
+func (c *linuxContainer) Run(process *Process) error {
+ c.m.Lock()
+ status, err := c.currentStatus()
+ if err != nil {
+ c.m.Unlock()
+ return err
+ }
+ c.m.Unlock()
+ if err := c.Start(process); err != nil {
+ return err
+ }
+ if status == Stopped {
+ return c.exec()
+ }
+ return nil
+}
+
+func (c *linuxContainer) Exec() error {
+ c.m.Lock()
+ defer c.m.Unlock()
+ return c.exec()
+}
+
+func (c *linuxContainer) exec() error {
+ path := filepath.Join(c.root, execFifoFilename)
+ f, err := os.OpenFile(path, os.O_RDONLY, 0)
+ if err != nil {
+ return newSystemErrorWithCause(err, "open exec fifo for reading")
+ }
+ defer f.Close()
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return err
+ }
+ if len(data) > 0 {
+ os.Remove(path)
+ return nil
+ }
+ return fmt.Errorf("cannot start an already running container")
+}
+
+func (c *linuxContainer) start(process *Process, isInit bool) error {
+ parent, err := c.newParentProcess(process, isInit)
+ if err != nil {
+ return newSystemErrorWithCause(err, "creating new parent process")
+ }
+ if err := parent.start(); err != nil {
+ // terminate the process to ensure that it properly is reaped.
+ if err := parent.terminate(); err != nil {
+ logrus.Warn(err)
+ }
+ return newSystemErrorWithCause(err, "starting container process")
+ }
+ // generate a timestamp indicating when the container was started
+ c.created = time.Now().UTC()
+ if isInit {
+ c.state = &createdState{
+ c: c,
+ }
+ state, err := c.updateState(parent)
+ if err != nil {
+ return err
+ }
+ c.initProcessStartTime = state.InitProcessStartTime
+
+ if c.config.Hooks != nil {
+ s := configs.HookState{
+ Version: c.config.Version,
+ ID: c.id,
+ Pid: parent.pid(),
+ Bundle: utils.SearchLabels(c.config.Labels, "bundle"),
+ }
+ for i, hook := range c.config.Hooks.Poststart {
+ if err := hook.Run(s); err != nil {
+ if err := parent.terminate(); err != nil {
+ logrus.Warn(err)
+ }
+ return newSystemErrorWithCausef(err, "running poststart hook %d", i)
+ }
+ }
+ }
+ } else {
+ c.state = &runningState{
+ c: c,
+ }
+ }
+ return nil
+}
+
+func (c *linuxContainer) Signal(s os.Signal, all bool) error {
+ if all {
+ return signalAllProcesses(c.cgroupManager, s)
+ }
+ if err := c.initProcess.signal(s); err != nil {
+ return newSystemErrorWithCause(err, "signaling init process")
+ }
+ return nil
+}
+
+func (c *linuxContainer) createExecFifo() error {
+ rootuid, err := c.Config().HostRootUID()
+ if err != nil {
+ return err
+ }
+ rootgid, err := c.Config().HostRootGID()
+ if err != nil {
+ return err
+ }
+
+ fifoName := filepath.Join(c.root, execFifoFilename)
+ if _, err := os.Stat(fifoName); err == nil {
+ return fmt.Errorf("exec fifo %s already exists", fifoName)
+ }
+ oldMask := unix.Umask(0000)
+ if err := unix.Mkfifo(fifoName, 0622); err != nil {
+ unix.Umask(oldMask)
+ return err
+ }
+ unix.Umask(oldMask)
+ if err := os.Chown(fifoName, rootuid, rootgid); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *linuxContainer) deleteExecFifo() {
+ fifoName := filepath.Join(c.root, execFifoFilename)
+ os.Remove(fifoName)
+}
+
+func (c *linuxContainer) newParentProcess(p *Process, doInit bool) (parentProcess, error) {
+ parentPipe, childPipe, err := utils.NewSockPair("init")
+ if err != nil {
+ return nil, newSystemErrorWithCause(err, "creating new init pipe")
+ }
+ cmd, err := c.commandTemplate(p, childPipe)
+ if err != nil {
+ return nil, newSystemErrorWithCause(err, "creating new command template")
+ }
+ if !doInit {
+ return c.newSetnsProcess(p, cmd, parentPipe, childPipe)
+ }
+
+ // We only set up rootDir if we're not doing a `runc exec`. The reason for
+ // this is to avoid cases where a racing, unprivileged process inside the
+ // container can get access to the statedir file descriptor (which would
+ // allow for container rootfs escape).
+ rootDir, err := os.Open(c.root)
+ if err != nil {
+ return nil, err
+ }
+ cmd.ExtraFiles = append(cmd.ExtraFiles, rootDir)
+ cmd.Env = append(cmd.Env,
+ fmt.Sprintf("_LIBCONTAINER_STATEDIR=%d", stdioFdCount+len(cmd.ExtraFiles)-1))
+ return c.newInitProcess(p, cmd, parentPipe, childPipe, rootDir)
+}
+
+func (c *linuxContainer) commandTemplate(p *Process, childPipe *os.File) (*exec.Cmd, error) {
+ cmd := exec.Command(c.initArgs[0], c.initArgs[1:]...)
+ cmd.Stdin = p.Stdin
+ cmd.Stdout = p.Stdout
+ cmd.Stderr = p.Stderr
+ cmd.Dir = c.config.Rootfs
+ if cmd.SysProcAttr == nil {
+ cmd.SysProcAttr = &syscall.SysProcAttr{}
+ }
+ cmd.ExtraFiles = append(cmd.ExtraFiles, p.ExtraFiles...)
+ if p.ConsoleSocket != nil {
+ cmd.ExtraFiles = append(cmd.ExtraFiles, p.ConsoleSocket)
+ cmd.Env = append(cmd.Env,
+ fmt.Sprintf("_LIBCONTAINER_CONSOLE=%d", stdioFdCount+len(cmd.ExtraFiles)-1),
+ )
+ }
+ cmd.ExtraFiles = append(cmd.ExtraFiles, childPipe)
+ cmd.Env = append(cmd.Env,
+ fmt.Sprintf("_LIBCONTAINER_INITPIPE=%d", stdioFdCount+len(cmd.ExtraFiles)-1),
+ )
+ // NOTE: when running a container with no PID namespace and the parent process spawning the container is
+ // PID1 the pdeathsig is being delivered to the container's init process by the kernel for some reason
+ // even with the parent still running.
+ if c.config.ParentDeathSignal > 0 {
+ cmd.SysProcAttr.Pdeathsig = syscall.Signal(c.config.ParentDeathSignal)
+ }
+ return cmd, nil
+}
+
+func (c *linuxContainer) newInitProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe, rootDir *os.File) (*initProcess, error) {
+ cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initStandard))
+ nsMaps := make(map[configs.NamespaceType]string)
+ for _, ns := range c.config.Namespaces {
+ if ns.Path != "" {
+ nsMaps[ns.Type] = ns.Path
+ }
+ }
+ _, sharePidns := nsMaps[configs.NEWPID]
+ data, err := c.bootstrapData(c.config.Namespaces.CloneFlags(), nsMaps)
+ if err != nil {
+ return nil, err
+ }
+ return &initProcess{
+ cmd: cmd,
+ childPipe: childPipe,
+ parentPipe: parentPipe,
+ manager: c.cgroupManager,
+ config: c.newInitConfig(p),
+ container: c,
+ process: p,
+ bootstrapData: data,
+ sharePidns: sharePidns,
+ rootDir: rootDir,
+ }, nil
+}
+
+func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe *os.File) (*setnsProcess, error) {
+ cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initSetns))
+ state, err := c.currentState()
+ if err != nil {
+ return nil, newSystemErrorWithCause(err, "getting container's current state")
+ }
+ // for setns process, we don't have to set cloneflags as the process namespaces
+ // will only be set via setns syscall
+ data, err := c.bootstrapData(0, state.NamespacePaths)
+ if err != nil {
+ return nil, err
+ }
+ return &setnsProcess{
+ cmd: cmd,
+ cgroupPaths: c.cgroupManager.GetPaths(),
+ childPipe: childPipe,
+ parentPipe: parentPipe,
+ config: c.newInitConfig(p),
+ process: p,
+ bootstrapData: data,
+ }, nil
+}
+
+func (c *linuxContainer) newInitConfig(process *Process) *initConfig {
+ cfg := &initConfig{
+ Config: c.config,
+ Args: process.Args,
+ Env: process.Env,
+ User: process.User,
+ AdditionalGroups: process.AdditionalGroups,
+ Cwd: process.Cwd,
+ Capabilities: process.Capabilities,
+ PassedFilesCount: len(process.ExtraFiles),
+ ContainerId: c.ID(),
+ NoNewPrivileges: c.config.NoNewPrivileges,
+ Rootless: c.config.Rootless,
+ AppArmorProfile: c.config.AppArmorProfile,
+ ProcessLabel: c.config.ProcessLabel,
+ Rlimits: c.config.Rlimits,
+ }
+ if process.NoNewPrivileges != nil {
+ cfg.NoNewPrivileges = *process.NoNewPrivileges
+ }
+ if process.AppArmorProfile != "" {
+ cfg.AppArmorProfile = process.AppArmorProfile
+ }
+ if process.Label != "" {
+ cfg.ProcessLabel = process.Label
+ }
+ if len(process.Rlimits) > 0 {
+ cfg.Rlimits = process.Rlimits
+ }
+ cfg.CreateConsole = process.ConsoleSocket != nil
+ return cfg
+}
+
+func (c *linuxContainer) Destroy() error {
+ c.m.Lock()
+ defer c.m.Unlock()
+ return c.state.destroy()
+}
+
+func (c *linuxContainer) Pause() error {
+ c.m.Lock()
+ defer c.m.Unlock()
+ status, err := c.currentStatus()
+ if err != nil {
+ return err
+ }
+ switch status {
+ case Running, Created:
+ if err := c.cgroupManager.Freeze(configs.Frozen); err != nil {
+ return err
+ }
+ return c.state.transition(&pausedState{
+ c: c,
+ })
+ }
+ return newGenericError(fmt.Errorf("container not running or created: %s", status), ContainerNotRunning)
+}
+
+func (c *linuxContainer) Resume() error {
+ c.m.Lock()
+ defer c.m.Unlock()
+ status, err := c.currentStatus()
+ if err != nil {
+ return err
+ }
+ if status != Paused {
+ return newGenericError(fmt.Errorf("container not paused"), ContainerNotPaused)
+ }
+ if err := c.cgroupManager.Freeze(configs.Thawed); err != nil {
+ return err
+ }
+ return c.state.transition(&runningState{
+ c: c,
+ })
+}
+
+func (c *linuxContainer) NotifyOOM() (<-chan struct{}, error) {
+ // XXX(cyphar): This requires cgroups.
+ if c.config.Rootless {
+ return nil, fmt.Errorf("cannot get OOM notifications from rootless container")
+ }
+ return notifyOnOOM(c.cgroupManager.GetPaths())
+}
+
+func (c *linuxContainer) NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error) {
+ // XXX(cyphar): This requires cgroups.
+ if c.config.Rootless {
+ return nil, fmt.Errorf("cannot get memory pressure notifications from rootless container")
+ }
+ return notifyMemoryPressure(c.cgroupManager.GetPaths(), level)
+}
+
+var criuFeatures *criurpc.CriuFeatures
+
+func (c *linuxContainer) checkCriuFeatures(criuOpts *CriuOpts, rpcOpts *criurpc.CriuOpts, criuFeat *criurpc.CriuFeatures) error {
+
+ var t criurpc.CriuReqType
+ t = criurpc.CriuReqType_FEATURE_CHECK
+
+ // criu 1.8 => 10800
+ if err := c.checkCriuVersion(10800); err != nil {
+ // Feature checking was introduced with CRIU 1.8.
+ // Ignore the feature check if an older CRIU version is used
+ // and just act as before.
+ // As all automated PR testing is done using CRIU 1.7 this
+ // code will not be tested by automated PR testing.
+ return nil
+ }
+
+ // make sure the features we are looking for are really not from
+ // some previous check
+ criuFeatures = nil
+
+ req := &criurpc.CriuReq{
+ Type: &t,
+ // Theoretically this should not be necessary but CRIU
+ // segfaults if Opts is empty.
+ // Fixed in CRIU 2.12
+ Opts: rpcOpts,
+ Features: criuFeat,
+ }
+
+ err := c.criuSwrk(nil, req, criuOpts, false)
+ if err != nil {
+ logrus.Debugf("%s", err)
+ return fmt.Errorf("CRIU feature check failed")
+ }
+
+ logrus.Debugf("Feature check says: %s", criuFeatures)
+ missingFeatures := false
+
+ if *criuFeat.MemTrack && !*criuFeatures.MemTrack {
+ missingFeatures = true
+ logrus.Debugf("CRIU does not support MemTrack")
+ }
+
+ if missingFeatures {
+ return fmt.Errorf("CRIU is missing features")
+ }
+
+ return nil
+}
+
+func parseCriuVersion(path string) (int, error) {
+ var x, y, z int
+
+ out, err := exec.Command(path, "-V").Output()
+ if err != nil {
+ return 0, fmt.Errorf("Unable to execute CRIU command: %s", path)
+ }
+
+ x = 0
+ y = 0
+ z = 0
+ if ep := strings.Index(string(out), "-"); ep >= 0 {
+ // criu Git version format
+ var version string
+ if sp := strings.Index(string(out), "GitID"); sp > 0 {
+ version = string(out)[sp:ep]
+ } else {
+ return 0, fmt.Errorf("Unable to parse the CRIU version: %s", path)
+ }
+
+ n, err := fmt.Sscanf(string(version), "GitID: v%d.%d.%d", &x, &y, &z) // 1.5.2
+ if err != nil {
+ n, err = fmt.Sscanf(string(version), "GitID: v%d.%d", &x, &y) // 1.6
+ y++
+ } else {
+ z++
+ }
+ if n < 2 || err != nil {
+ return 0, fmt.Errorf("Unable to parse the CRIU version: %s %d %s", version, n, err)
+ }
+ } else {
+ // criu release version format
+ n, err := fmt.Sscanf(string(out), "Version: %d.%d.%d\n", &x, &y, &z) // 1.5.2
+ if err != nil {
+ n, err = fmt.Sscanf(string(out), "Version: %d.%d\n", &x, &y) // 1.6
+ }
+ if n < 2 || err != nil {
+ return 0, fmt.Errorf("Unable to parse the CRIU version: %s %d %s", out, n, err)
+ }
+ }
+
+ return x*10000 + y*100 + z, nil
+}
+
+func compareCriuVersion(criuVersion int, minVersion int) error {
+ // simple function to perform the actual version compare
+ if criuVersion < minVersion {
+ return fmt.Errorf("CRIU version %d must be %d or higher", criuVersion, minVersion)
+ }
+
+ return nil
+}
+
+// This is used to store the result of criu version RPC
+var criuVersionRPC *criurpc.CriuVersion
+
+// checkCriuVersion checks Criu version greater than or equal to minVersion
+func (c *linuxContainer) checkCriuVersion(minVersion int) error {
+
+ // If the version of criu has already been determined there is no need
+ // to ask criu for the version again. Use the value from c.criuVersion.
+ if c.criuVersion != 0 {
+ return compareCriuVersion(c.criuVersion, minVersion)
+ }
+
+ // First try if this version of CRIU support the version RPC.
+ // The CRIU version RPC was introduced with CRIU 3.0.
+
+ // First, reset the variable for the RPC answer to nil
+ criuVersionRPC = nil
+
+ var t criurpc.CriuReqType
+ t = criurpc.CriuReqType_VERSION
+ req := &criurpc.CriuReq{
+ Type: &t,
+ }
+
+ err := c.criuSwrk(nil, req, nil, false)
+ if err != nil {
+ return fmt.Errorf("CRIU version check failed: %s", err)
+ }
+
+ if criuVersionRPC != nil {
+ logrus.Debugf("CRIU version: %s", criuVersionRPC)
+ // major and minor are always set
+ c.criuVersion = int(*criuVersionRPC.Major) * 10000
+ c.criuVersion += int(*criuVersionRPC.Minor) * 100
+ if criuVersionRPC.Sublevel != nil {
+ c.criuVersion += int(*criuVersionRPC.Sublevel)
+ }
+ if criuVersionRPC.Gitid != nil {
+ // runc's convention is that a CRIU git release is
+ // always the same as increasing the minor by 1
+ c.criuVersion -= (c.criuVersion % 100)
+ c.criuVersion += 100
+ }
+ return compareCriuVersion(c.criuVersion, minVersion)
+ }
+
+ // This is CRIU without the version RPC and therefore
+ // older than 3.0. Parsing the output is required.
+
+ // This can be remove once runc does not work with criu older than 3.0
+
+ c.criuVersion, err = parseCriuVersion(c.criuPath)
+ if err != nil {
+ return err
+ }
+
+ return compareCriuVersion(c.criuVersion, minVersion)
+}
+
+const descriptorsFilename = "descriptors.json"
+
+func (c *linuxContainer) addCriuDumpMount(req *criurpc.CriuReq, m *configs.Mount) {
+ mountDest := m.Destination
+ if strings.HasPrefix(mountDest, c.config.Rootfs) {
+ mountDest = mountDest[len(c.config.Rootfs):]
+ }
+
+ extMnt := &criurpc.ExtMountMap{
+ Key: proto.String(mountDest),
+ Val: proto.String(mountDest),
+ }
+ req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
+}
+
+func (c *linuxContainer) addMaskPaths(req *criurpc.CriuReq) error {
+ for _, path := range c.config.MaskPaths {
+ fi, err := os.Stat(fmt.Sprintf("/proc/%d/root/%s", c.initProcess.pid(), path))
+ if err != nil {
+ if os.IsNotExist(err) {
+ continue
+ }
+ return err
+ }
+ if fi.IsDir() {
+ continue
+ }
+
+ extMnt := &criurpc.ExtMountMap{
+ Key: proto.String(path),
+ Val: proto.String("/dev/null"),
+ }
+ req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
+ }
+
+ return nil
+}
+
+func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ // TODO(avagin): Figure out how to make this work nicely. CRIU 2.0 has
+ // support for doing unprivileged dumps, but the setup of
+ // rootless containers might make this complicated.
+ if c.config.Rootless {
+ return fmt.Errorf("cannot checkpoint a rootless container")
+ }
+
+ // criu 1.5.2 => 10502
+ if err := c.checkCriuVersion(10502); err != nil {
+ return err
+ }
+
+ if criuOpts.ImagesDirectory == "" {
+ return fmt.Errorf("invalid directory to save checkpoint")
+ }
+
+ // Since a container can be C/R'ed multiple times,
+ // the checkpoint directory may already exist.
+ if err := os.Mkdir(criuOpts.ImagesDirectory, 0755); err != nil && !os.IsExist(err) {
+ return err
+ }
+
+ if criuOpts.WorkDirectory == "" {
+ criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work")
+ }
+
+ if err := os.Mkdir(criuOpts.WorkDirectory, 0755); err != nil && !os.IsExist(err) {
+ return err
+ }
+
+ workDir, err := os.Open(criuOpts.WorkDirectory)
+ if err != nil {
+ return err
+ }
+ defer workDir.Close()
+
+ imageDir, err := os.Open(criuOpts.ImagesDirectory)
+ if err != nil {
+ return err
+ }
+ defer imageDir.Close()
+
+ rpcOpts := criurpc.CriuOpts{
+ ImagesDirFd: proto.Int32(int32(imageDir.Fd())),
+ WorkDirFd: proto.Int32(int32(workDir.Fd())),
+ LogLevel: proto.Int32(4),
+ LogFile: proto.String("dump.log"),
+ Root: proto.String(c.config.Rootfs),
+ ManageCgroups: proto.Bool(true),
+ NotifyScripts: proto.Bool(true),
+ Pid: proto.Int32(int32(c.initProcess.pid())),
+ ShellJob: proto.Bool(criuOpts.ShellJob),
+ LeaveRunning: proto.Bool(criuOpts.LeaveRunning),
+ TcpEstablished: proto.Bool(criuOpts.TcpEstablished),
+ ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections),
+ FileLocks: proto.Bool(criuOpts.FileLocks),
+ EmptyNs: proto.Uint32(criuOpts.EmptyNs),
+ OrphanPtsMaster: proto.Bool(true),
+ }
+
+ fcg := c.cgroupManager.GetPaths()["freezer"]
+ if fcg != "" {
+ rpcOpts.FreezeCgroup = proto.String(fcg)
+ }
+
+ // append optional criu opts, e.g., page-server and port
+ if criuOpts.PageServer.Address != "" && criuOpts.PageServer.Port != 0 {
+ rpcOpts.Ps = &criurpc.CriuPageServerInfo{
+ Address: proto.String(criuOpts.PageServer.Address),
+ Port: proto.Int32(criuOpts.PageServer.Port),
+ }
+ }
+
+ //pre-dump may need parentImage param to complete iterative migration
+ if criuOpts.ParentImage != "" {
+ rpcOpts.ParentImg = proto.String(criuOpts.ParentImage)
+ rpcOpts.TrackMem = proto.Bool(true)
+ }
+
+ // append optional manage cgroups mode
+ if criuOpts.ManageCgroupsMode != 0 {
+ // criu 1.7 => 10700
+ if err := c.checkCriuVersion(10700); err != nil {
+ return err
+ }
+ mode := criurpc.CriuCgMode(criuOpts.ManageCgroupsMode)
+ rpcOpts.ManageCgroupsMode = &mode
+ }
+
+ var t criurpc.CriuReqType
+ if criuOpts.PreDump {
+ feat := criurpc.CriuFeatures{
+ MemTrack: proto.Bool(true),
+ }
+
+ if err := c.checkCriuFeatures(criuOpts, &rpcOpts, &feat); err != nil {
+ return err
+ }
+
+ t = criurpc.CriuReqType_PRE_DUMP
+ } else {
+ t = criurpc.CriuReqType_DUMP
+ }
+ req := &criurpc.CriuReq{
+ Type: &t,
+ Opts: &rpcOpts,
+ }
+
+ //no need to dump these information in pre-dump
+ if !criuOpts.PreDump {
+ for _, m := range c.config.Mounts {
+ switch m.Device {
+ case "bind":
+ c.addCriuDumpMount(req, m)
+ case "cgroup":
+ binds, err := getCgroupMounts(m)
+ if err != nil {
+ return err
+ }
+ for _, b := range binds {
+ c.addCriuDumpMount(req, b)
+ }
+ }
+ }
+
+ if err := c.addMaskPaths(req); err != nil {
+ return err
+ }
+
+ for _, node := range c.config.Devices {
+ m := &configs.Mount{Destination: node.Path, Source: node.Path}
+ c.addCriuDumpMount(req, m)
+ }
+
+ // Write the FD info to a file in the image directory
+ fdsJSON, err := json.Marshal(c.initProcess.externalDescriptors())
+ if err != nil {
+ return err
+ }
+
+ err = ioutil.WriteFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename), fdsJSON, 0655)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = c.criuSwrk(nil, req, criuOpts, false)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *linuxContainer) addCriuRestoreMount(req *criurpc.CriuReq, m *configs.Mount) {
+ mountDest := m.Destination
+ if strings.HasPrefix(mountDest, c.config.Rootfs) {
+ mountDest = mountDest[len(c.config.Rootfs):]
+ }
+
+ extMnt := &criurpc.ExtMountMap{
+ Key: proto.String(mountDest),
+ Val: proto.String(m.Source),
+ }
+ req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
+}
+
+func (c *linuxContainer) restoreNetwork(req *criurpc.CriuReq, criuOpts *CriuOpts) {
+ for _, iface := range c.config.Networks {
+ switch iface.Type {
+ case "veth":
+ veth := new(criurpc.CriuVethPair)
+ veth.IfOut = proto.String(iface.HostInterfaceName)
+ veth.IfIn = proto.String(iface.Name)
+ req.Opts.Veths = append(req.Opts.Veths, veth)
+ case "loopback":
+ // Do nothing
+ }
+ }
+ for _, i := range criuOpts.VethPairs {
+ veth := new(criurpc.CriuVethPair)
+ veth.IfOut = proto.String(i.HostInterfaceName)
+ veth.IfIn = proto.String(i.ContainerInterfaceName)
+ req.Opts.Veths = append(req.Opts.Veths, veth)
+ }
+}
+
+func (c *linuxContainer) Restore(process *Process, criuOpts *CriuOpts) error {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ // TODO(avagin): Figure out how to make this work nicely. CRIU doesn't have
+ // support for unprivileged restore at the moment.
+ if c.config.Rootless {
+ return fmt.Errorf("cannot restore a rootless container")
+ }
+
+ // criu 1.5.2 => 10502
+ if err := c.checkCriuVersion(10502); err != nil {
+ return err
+ }
+ if criuOpts.WorkDirectory == "" {
+ criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work")
+ }
+ // Since a container can be C/R'ed multiple times,
+ // the work directory may already exist.
+ if err := os.Mkdir(criuOpts.WorkDirectory, 0655); err != nil && !os.IsExist(err) {
+ return err
+ }
+ workDir, err := os.Open(criuOpts.WorkDirectory)
+ if err != nil {
+ return err
+ }
+ defer workDir.Close()
+ if criuOpts.ImagesDirectory == "" {
+ return fmt.Errorf("invalid directory to restore checkpoint")
+ }
+ imageDir, err := os.Open(criuOpts.ImagesDirectory)
+ if err != nil {
+ return err
+ }
+ defer imageDir.Close()
+ // CRIU has a few requirements for a root directory:
+ // * it must be a mount point
+ // * its parent must not be overmounted
+ // c.config.Rootfs is bind-mounted to a temporary directory
+ // to satisfy these requirements.
+ root := filepath.Join(c.root, "criu-root")
+ if err := os.Mkdir(root, 0755); err != nil {
+ return err
+ }
+ defer os.Remove(root)
+ root, err = filepath.EvalSymlinks(root)
+ if err != nil {
+ return err
+ }
+ err = unix.Mount(c.config.Rootfs, root, "", unix.MS_BIND|unix.MS_REC, "")
+ if err != nil {
+ return err
+ }
+ defer unix.Unmount(root, unix.MNT_DETACH)
+ t := criurpc.CriuReqType_RESTORE
+ req := &criurpc.CriuReq{
+ Type: &t,
+ Opts: &criurpc.CriuOpts{
+ ImagesDirFd: proto.Int32(int32(imageDir.Fd())),
+ WorkDirFd: proto.Int32(int32(workDir.Fd())),
+ EvasiveDevices: proto.Bool(true),
+ LogLevel: proto.Int32(4),
+ LogFile: proto.String("restore.log"),
+ RstSibling: proto.Bool(true),
+ Root: proto.String(root),
+ ManageCgroups: proto.Bool(true),
+ NotifyScripts: proto.Bool(true),
+ ShellJob: proto.Bool(criuOpts.ShellJob),
+ ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections),
+ TcpEstablished: proto.Bool(criuOpts.TcpEstablished),
+ FileLocks: proto.Bool(criuOpts.FileLocks),
+ EmptyNs: proto.Uint32(criuOpts.EmptyNs),
+ OrphanPtsMaster: proto.Bool(true),
+ },
+ }
+
+ for _, m := range c.config.Mounts {
+ switch m.Device {
+ case "bind":
+ c.addCriuRestoreMount(req, m)
+ case "cgroup":
+ binds, err := getCgroupMounts(m)
+ if err != nil {
+ return err
+ }
+ for _, b := range binds {
+ c.addCriuRestoreMount(req, b)
+ }
+ }
+ }
+
+ if len(c.config.MaskPaths) > 0 {
+ m := &configs.Mount{Destination: "/dev/null", Source: "/dev/null"}
+ c.addCriuRestoreMount(req, m)
+ }
+
+ for _, node := range c.config.Devices {
+ m := &configs.Mount{Destination: node.Path, Source: node.Path}
+ c.addCriuRestoreMount(req, m)
+ }
+
+ if criuOpts.EmptyNs&unix.CLONE_NEWNET == 0 {
+ c.restoreNetwork(req, criuOpts)
+ }
+
+ // append optional manage cgroups mode
+ if criuOpts.ManageCgroupsMode != 0 {
+ // criu 1.7 => 10700
+ if err := c.checkCriuVersion(10700); err != nil {
+ return err
+ }
+ mode := criurpc.CriuCgMode(criuOpts.ManageCgroupsMode)
+ req.Opts.ManageCgroupsMode = &mode
+ }
+
+ var (
+ fds []string
+ fdJSON []byte
+ )
+ if fdJSON, err = ioutil.ReadFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename)); err != nil {
+ return err
+ }
+
+ if err := json.Unmarshal(fdJSON, &fds); err != nil {
+ return err
+ }
+ for i := range fds {
+ if s := fds[i]; strings.Contains(s, "pipe:") {
+ inheritFd := new(criurpc.InheritFd)
+ inheritFd.Key = proto.String(s)
+ inheritFd.Fd = proto.Int32(int32(i))
+ req.Opts.InheritFd = append(req.Opts.InheritFd, inheritFd)
+ }
+ }
+ return c.criuSwrk(process, req, criuOpts, true)
+}
+
+func (c *linuxContainer) criuApplyCgroups(pid int, req *criurpc.CriuReq) error {
+ // XXX: Do we need to deal with this case? AFAIK criu still requires root.
+ if err := c.cgroupManager.Apply(pid); err != nil {
+ return err
+ }
+
+ if err := c.cgroupManager.Set(c.config); err != nil {
+ return newSystemError(err)
+ }
+
+ path := fmt.Sprintf("/proc/%d/cgroup", pid)
+ cgroupsPaths, err := cgroups.ParseCgroupFile(path)
+ if err != nil {
+ return err
+ }
+
+ for c, p := range cgroupsPaths {
+ cgroupRoot := &criurpc.CgroupRoot{
+ Ctrl: proto.String(c),
+ Path: proto.String(p),
+ }
+ req.Opts.CgRoot = append(req.Opts.CgRoot, cgroupRoot)
+ }
+
+ return nil
+}
+
+func (c *linuxContainer) criuSwrk(process *Process, req *criurpc.CriuReq, opts *CriuOpts, applyCgroups bool) error {
+ fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_SEQPACKET|unix.SOCK_CLOEXEC, 0)
+ if err != nil {
+ return err
+ }
+
+ var logPath string
+ if opts != nil {
+ logPath = filepath.Join(opts.WorkDirectory, req.GetOpts().GetLogFile())
+ } else {
+ // For the VERSION RPC 'opts' is set to 'nil' and therefore
+ // opts.WorkDirectory does not exist. Set logPath to "".
+ logPath = ""
+ }
+ criuClient := os.NewFile(uintptr(fds[0]), "criu-transport-client")
+ criuClientFileCon, err := net.FileConn(criuClient)
+ criuClient.Close()
+ if err != nil {
+ return err
+ }
+
+ criuClientCon := criuClientFileCon.(*net.UnixConn)
+ defer criuClientCon.Close()
+
+ criuServer := os.NewFile(uintptr(fds[1]), "criu-transport-server")
+ defer criuServer.Close()
+
+ args := []string{"swrk", "3"}
+ if c.criuVersion != 0 {
+ // If the CRIU Version is still '0' then this is probably
+ // the initial CRIU run to detect the version. Skip it.
+ logrus.Debugf("Using CRIU %d at: %s", c.criuVersion, c.criuPath)
+ }
+ logrus.Debugf("Using CRIU with following args: %s", args)
+ cmd := exec.Command(c.criuPath, args...)
+ if process != nil {
+ cmd.Stdin = process.Stdin
+ cmd.Stdout = process.Stdout
+ cmd.Stderr = process.Stderr
+ }
+ cmd.ExtraFiles = append(cmd.ExtraFiles, criuServer)
+
+ if err := cmd.Start(); err != nil {
+ return err
+ }
+ criuServer.Close()
+
+ defer func() {
+ criuClientCon.Close()
+ _, err := cmd.Process.Wait()
+ if err != nil {
+ return
+ }
+ }()
+
+ if applyCgroups {
+ err := c.criuApplyCgroups(cmd.Process.Pid, req)
+ if err != nil {
+ return err
+ }
+ }
+
+ var extFds []string
+ if process != nil {
+ extFds, err = getPipeFds(cmd.Process.Pid)
+ if err != nil {
+ return err
+ }
+ }
+
+ logrus.Debugf("Using CRIU in %s mode", req.GetType().String())
+ // In the case of criurpc.CriuReqType_FEATURE_CHECK req.GetOpts()
+ // should be empty. For older CRIU versions it still will be
+ // available but empty. criurpc.CriuReqType_VERSION actually
+ // has no req.GetOpts().
+ if !(req.GetType() == criurpc.CriuReqType_FEATURE_CHECK ||
+ req.GetType() == criurpc.CriuReqType_VERSION) {
+
+ val := reflect.ValueOf(req.GetOpts())
+ v := reflect.Indirect(val)
+ for i := 0; i < v.NumField(); i++ {
+ st := v.Type()
+ name := st.Field(i).Name
+ if strings.HasPrefix(name, "XXX_") {
+ continue
+ }
+ value := val.MethodByName("Get" + name).Call([]reflect.Value{})
+ logrus.Debugf("CRIU option %s with value %v", name, value[0])
+ }
+ }
+ data, err := proto.Marshal(req)
+ if err != nil {
+ return err
+ }
+ _, err = criuClientCon.Write(data)
+ if err != nil {
+ return err
+ }
+
+ buf := make([]byte, 10*4096)
+ oob := make([]byte, 4096)
+ for true {
+ n, oobn, _, _, err := criuClientCon.ReadMsgUnix(buf, oob)
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return fmt.Errorf("unexpected EOF")
+ }
+ if n == len(buf) {
+ return fmt.Errorf("buffer is too small")
+ }
+
+ resp := new(criurpc.CriuResp)
+ err = proto.Unmarshal(buf[:n], resp)
+ if err != nil {
+ return err
+ }
+ if !resp.GetSuccess() {
+ typeString := req.GetType().String()
+ if typeString == "VERSION" {
+ // If the VERSION RPC fails this probably means that the CRIU
+ // version is too old for this RPC. Just return 'nil'.
+ return nil
+ }
+ return fmt.Errorf("criu failed: type %s errno %d\nlog file: %s", typeString, resp.GetCrErrno(), logPath)
+ }
+
+ t := resp.GetType()
+ switch {
+ case t == criurpc.CriuReqType_VERSION:
+ logrus.Debugf("CRIU version: %s", resp)
+ criuVersionRPC = resp.GetVersion()
+ break
+ case t == criurpc.CriuReqType_FEATURE_CHECK:
+ logrus.Debugf("Feature check says: %s", resp)
+ criuFeatures = resp.GetFeatures()
+ case t == criurpc.CriuReqType_NOTIFY:
+ if err := c.criuNotifications(resp, process, opts, extFds, oob[:oobn]); err != nil {
+ return err
+ }
+ t = criurpc.CriuReqType_NOTIFY
+ req = &criurpc.CriuReq{
+ Type: &t,
+ NotifySuccess: proto.Bool(true),
+ }
+ data, err = proto.Marshal(req)
+ if err != nil {
+ return err
+ }
+ _, err = criuClientCon.Write(data)
+ if err != nil {
+ return err
+ }
+ continue
+ case t == criurpc.CriuReqType_RESTORE:
+ case t == criurpc.CriuReqType_DUMP:
+ case t == criurpc.CriuReqType_PRE_DUMP:
+ default:
+ return fmt.Errorf("unable to parse the response %s", resp.String())
+ }
+
+ break
+ }
+
+ criuClientCon.CloseWrite()
+ // cmd.Wait() waits cmd.goroutines which are used for proxying file descriptors.
+ // Here we want to wait only the CRIU process.
+ st, err := cmd.Process.Wait()
+ if err != nil {
+ return err
+ }
+
+ // In pre-dump mode CRIU is in a loop and waits for
+ // the final DUMP command.
+ // The current runc pre-dump approach, however, is
+ // start criu in PRE_DUMP once for a single pre-dump
+ // and not the whole series of pre-dump, pre-dump, ...m, dump
+ // If we got the message CriuReqType_PRE_DUMP it means
+ // CRIU was successful and we need to forcefully stop CRIU
+ if !st.Success() && *req.Type != criurpc.CriuReqType_PRE_DUMP {
+ return fmt.Errorf("criu failed: %s\nlog file: %s", st.String(), logPath)
+ }
+ return nil
+}
+
+// block any external network activity
+func lockNetwork(config *configs.Config) error {
+ for _, config := range config.Networks {
+ strategy, err := getStrategy(config.Type)
+ if err != nil {
+ return err
+ }
+
+ if err := strategy.detach(config); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func unlockNetwork(config *configs.Config) error {
+ for _, config := range config.Networks {
+ strategy, err := getStrategy(config.Type)
+ if err != nil {
+ return err
+ }
+ if err = strategy.attach(config); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *linuxContainer) criuNotifications(resp *criurpc.CriuResp, process *Process, opts *CriuOpts, fds []string, oob []byte) error {
+ notify := resp.GetNotify()
+ if notify == nil {
+ return fmt.Errorf("invalid response: %s", resp.String())
+ }
+ logrus.Debugf("notify: %s\n", notify.GetScript())
+ switch {
+ case notify.GetScript() == "post-dump":
+ f, err := os.Create(filepath.Join(c.root, "checkpoint"))
+ if err != nil {
+ return err
+ }
+ f.Close()
+ case notify.GetScript() == "network-unlock":
+ if err := unlockNetwork(c.config); err != nil {
+ return err
+ }
+ case notify.GetScript() == "network-lock":
+ if err := lockNetwork(c.config); err != nil {
+ return err
+ }
+ case notify.GetScript() == "setup-namespaces":
+ if c.config.Hooks != nil {
+ s := configs.HookState{
+ Version: c.config.Version,
+ ID: c.id,
+ Pid: int(notify.GetPid()),
+ Bundle: utils.SearchLabels(c.config.Labels, "bundle"),
+ }
+ for i, hook := range c.config.Hooks.Prestart {
+ if err := hook.Run(s); err != nil {
+ return newSystemErrorWithCausef(err, "running prestart hook %d", i)
+ }
+ }
+ }
+ case notify.GetScript() == "post-restore":
+ pid := notify.GetPid()
+ r, err := newRestoredProcess(int(pid), fds)
+ if err != nil {
+ return err
+ }
+ process.ops = r
+ if err := c.state.transition(&restoredState{
+ imageDir: opts.ImagesDirectory,
+ c: c,
+ }); err != nil {
+ return err
+ }
+ // create a timestamp indicating when the restored checkpoint was started
+ c.created = time.Now().UTC()
+ if _, err := c.updateState(r); err != nil {
+ return err
+ }
+ if err := os.Remove(filepath.Join(c.root, "checkpoint")); err != nil {
+ if !os.IsNotExist(err) {
+ logrus.Error(err)
+ }
+ }
+ case notify.GetScript() == "orphan-pts-master":
+ scm, err := unix.ParseSocketControlMessage(oob)
+ if err != nil {
+ return err
+ }
+ fds, err := unix.ParseUnixRights(&scm[0])
+ if err != nil {
+ return err
+ }
+
+ master := os.NewFile(uintptr(fds[0]), "orphan-pts-master")
+ defer master.Close()
+
+ // While we can access console.master, using the API is a good idea.
+ if err := utils.SendFd(process.ConsoleSocket, master); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *linuxContainer) updateState(process parentProcess) (*State, error) {
+ c.initProcess = process
+ state, err := c.currentState()
+ if err != nil {
+ return nil, err
+ }
+ err = c.saveState(state)
+ if err != nil {
+ return nil, err
+ }
+ return state, nil
+}
+
+func (c *linuxContainer) saveState(s *State) error {
+ f, err := os.Create(filepath.Join(c.root, stateFilename))
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ return utils.WriteJSON(f, s)
+}
+
+func (c *linuxContainer) deleteState() error {
+ return os.Remove(filepath.Join(c.root, stateFilename))
+}
+
+func (c *linuxContainer) currentStatus() (Status, error) {
+ if err := c.refreshState(); err != nil {
+ return -1, err
+ }
+ return c.state.status(), nil
+}
+
+// refreshState needs to be called to verify that the current state on the
+// container is what is true. Because consumers of libcontainer can use it
+// out of process we need to verify the container's status based on runtime
+// information and not rely on our in process info.
+func (c *linuxContainer) refreshState() error {
+ paused, err := c.isPaused()
+ if err != nil {
+ return err
+ }
+ if paused {
+ return c.state.transition(&pausedState{c: c})
+ }
+ t, err := c.runType()
+ if err != nil {
+ return err
+ }
+ switch t {
+ case Created:
+ return c.state.transition(&createdState{c: c})
+ case Running:
+ return c.state.transition(&runningState{c: c})
+ }
+ return c.state.transition(&stoppedState{c: c})
+}
+
+func (c *linuxContainer) runType() (Status, error) {
+ if c.initProcess == nil {
+ return Stopped, nil
+ }
+ pid := c.initProcess.pid()
+ stat, err := system.Stat(pid)
+ if err != nil {
+ return Stopped, nil
+ }
+ if stat.StartTime != c.initProcessStartTime || stat.State == system.Zombie || stat.State == system.Dead {
+ return Stopped, nil
+ }
+ // We'll create exec fifo and blocking on it after container is created,
+ // and delete it after start container.
+ if _, err := os.Stat(filepath.Join(c.root, execFifoFilename)); err == nil {
+ return Created, nil
+ }
+ return Running, nil
+}
+
+func (c *linuxContainer) isPaused() (bool, error) {
+ fcg := c.cgroupManager.GetPaths()["freezer"]
+ if fcg == "" {
+ // A container doesn't have a freezer cgroup
+ return false, nil
+ }
+ data, err := ioutil.ReadFile(filepath.Join(fcg, "freezer.state"))
+ if err != nil {
+ // If freezer cgroup is not mounted, the container would just be not paused.
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, newSystemErrorWithCause(err, "checking if container is paused")
+ }
+ return bytes.Equal(bytes.TrimSpace(data), []byte("FROZEN")), nil
+}
+
+func (c *linuxContainer) currentState() (*State, error) {
+ var (
+ startTime uint64
+ externalDescriptors []string
+ pid = -1
+ )
+ if c.initProcess != nil {
+ pid = c.initProcess.pid()
+ startTime, _ = c.initProcess.startTime()
+ externalDescriptors = c.initProcess.externalDescriptors()
+ }
+ state := &State{
+ BaseState: BaseState{
+ ID: c.ID(),
+ Config: *c.config,
+ InitProcessPid: pid,
+ InitProcessStartTime: startTime,
+ Created: c.created,
+ },
+ Rootless: c.config.Rootless,
+ CgroupPaths: c.cgroupManager.GetPaths(),
+ NamespacePaths: make(map[configs.NamespaceType]string),
+ ExternalDescriptors: externalDescriptors,
+ }
+ if pid > 0 {
+ for _, ns := range c.config.Namespaces {
+ state.NamespacePaths[ns.Type] = ns.GetPath(pid)
+ }
+ for _, nsType := range configs.NamespaceTypes() {
+ if !configs.IsNamespaceSupported(nsType) {
+ continue
+ }
+ if _, ok := state.NamespacePaths[nsType]; !ok {
+ ns := configs.Namespace{Type: nsType}
+ state.NamespacePaths[ns.Type] = ns.GetPath(pid)
+ }
+ }
+ }
+ return state, nil
+}
+
+// orderNamespacePaths sorts namespace paths into a list of paths that we
+// can setns in order.
+func (c *linuxContainer) orderNamespacePaths(namespaces map[configs.NamespaceType]string) ([]string, error) {
+ paths := []string{}
+
+ for _, ns := range configs.NamespaceTypes() {
+
+ // Remove namespaces that we don't need to join.
+ if !c.config.Namespaces.Contains(ns) {
+ continue
+ }
+
+ if p, ok := namespaces[ns]; ok && p != "" {
+ // check if the requested namespace is supported
+ if !configs.IsNamespaceSupported(ns) {
+ return nil, newSystemError(fmt.Errorf("namespace %s is not supported", ns))
+ }
+ // only set to join this namespace if it exists
+ if _, err := os.Lstat(p); err != nil {
+ return nil, newSystemErrorWithCausef(err, "running lstat on namespace path %q", p)
+ }
+ // do not allow namespace path with comma as we use it to separate
+ // the namespace paths
+ if strings.ContainsRune(p, ',') {
+ return nil, newSystemError(fmt.Errorf("invalid path %s", p))
+ }
+ paths = append(paths, fmt.Sprintf("%s:%s", configs.NsName(ns), p))
+ }
+
+ }
+
+ return paths, nil
+}
+
+func encodeIDMapping(idMap []configs.IDMap) ([]byte, error) {
+ data := bytes.NewBuffer(nil)
+ for _, im := range idMap {
+ line := fmt.Sprintf("%d %d %d\n", im.ContainerID, im.HostID, im.Size)
+ if _, err := data.WriteString(line); err != nil {
+ return nil, err
+ }
+ }
+ return data.Bytes(), nil
+}
+
+// bootstrapData encodes the necessary data in netlink binary format
+// as a io.Reader.
+// Consumer can write the data to a bootstrap program
+// such as one that uses nsenter package to bootstrap the container's
+// init process correctly, i.e. with correct namespaces, uid/gid
+// mapping etc.
+func (c *linuxContainer) bootstrapData(cloneFlags uintptr, nsMaps map[configs.NamespaceType]string) (io.Reader, error) {
+ // create the netlink message
+ r := nl.NewNetlinkRequest(int(InitMsg), 0)
+
+ // write cloneFlags
+ r.AddData(&Int32msg{
+ Type: CloneFlagsAttr,
+ Value: uint32(cloneFlags),
+ })
+
+ // write custom namespace paths
+ if len(nsMaps) > 0 {
+ nsPaths, err := c.orderNamespacePaths(nsMaps)
+ if err != nil {
+ return nil, err
+ }
+ r.AddData(&Bytemsg{
+ Type: NsPathsAttr,
+ Value: []byte(strings.Join(nsPaths, ",")),
+ })
+ }
+
+ // write namespace paths only when we are not joining an existing user ns
+ _, joinExistingUser := nsMaps[configs.NEWUSER]
+ if !joinExistingUser {
+ // write uid mappings
+ if len(c.config.UidMappings) > 0 {
+ b, err := encodeIDMapping(c.config.UidMappings)
+ if err != nil {
+ return nil, err
+ }
+ r.AddData(&Bytemsg{
+ Type: UidmapAttr,
+ Value: b,
+ })
+ }
+
+ // write gid mappings
+ if len(c.config.GidMappings) > 0 {
+ b, err := encodeIDMapping(c.config.GidMappings)
+ if err != nil {
+ return nil, err
+ }
+ r.AddData(&Bytemsg{
+ Type: GidmapAttr,
+ Value: b,
+ })
+ // The following only applies if we are root.
+ if !c.config.Rootless {
+ // check if we have CAP_SETGID to setgroup properly
+ pid, err := capability.NewPid(os.Getpid())
+ if err != nil {
+ return nil, err
+ }
+ if !pid.Get(capability.EFFECTIVE, capability.CAP_SETGID) {
+ r.AddData(&Boolmsg{
+ Type: SetgroupAttr,
+ Value: true,
+ })
+ }
+ }
+ }
+ }
+
+ // write oom_score_adj
+ r.AddData(&Bytemsg{
+ Type: OomScoreAdjAttr,
+ Value: []byte(fmt.Sprintf("%d", c.config.OomScoreAdj)),
+ })
+
+ // write rootless
+ r.AddData(&Boolmsg{
+ Type: RootlessAttr,
+ Value: c.config.Rootless,
+ })
+
+ return bytes.NewReader(r.Serialize()), nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go b/vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go
new file mode 100644
index 000000000..bb84ff740
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go
@@ -0,0 +1,20 @@
+package libcontainer
+
+// State represents a running container's state
+type State struct {
+ BaseState
+
+ // Platform specific fields below here
+}
+
+// A libcontainer container object.
+//
+// Each container is thread-safe within the same process. Since a container can
+// be destroyed by a separate process, any function may return that the container
+// was not found.
+type Container interface {
+ BaseContainer
+
+ // Methods below here are platform specific
+
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/container_windows.go
new file mode 100644
index 000000000..bb84ff740
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/container_windows.go
@@ -0,0 +1,20 @@
+package libcontainer
+
+// State represents a running container's state
+type State struct {
+ BaseState
+
+ // Platform specific fields below here
+}
+
+// A libcontainer container object.
+//
+// Each container is thread-safe within the same process. Since a container can
+// be destroyed by a separate process, any function may return that the container
+// was not found.
+type Container interface {
+ BaseContainer
+
+ // Methods below here are platform specific
+
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_linux.go
new file mode 100644
index 000000000..9423d2464
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_linux.go
@@ -0,0 +1,37 @@
+package libcontainer
+
+// cgroup restoring strategy provided by criu
+type cgMode uint32
+
+const (
+ CRIU_CG_MODE_SOFT cgMode = 3 + iota // restore cgroup properties if only dir created by criu
+ CRIU_CG_MODE_FULL // always restore all cgroups and their properties
+ CRIU_CG_MODE_STRICT // restore all, requiring them to not present in the system
+ CRIU_CG_MODE_DEFAULT // the same as CRIU_CG_MODE_SOFT
+)
+
+type CriuPageServerInfo struct {
+ Address string // IP address of CRIU page server
+ Port int32 // port number of CRIU page server
+}
+
+type VethPairName struct {
+ ContainerInterfaceName string
+ HostInterfaceName string
+}
+
+type CriuOpts struct {
+ ImagesDirectory string // directory for storing image files
+ WorkDirectory string // directory to cd and write logs/pidfiles/stats to
+ ParentImage string // direcotry for storing parent image files in pre-dump and dump
+ LeaveRunning bool // leave container in running state after checkpoint
+ TcpEstablished bool // checkpoint/restore established TCP connections
+ ExternalUnixConnections bool // allow external unix connections
+ ShellJob bool // allow to dump and restore shell jobs
+ FileLocks bool // handle file locks, for safety
+ PreDump bool // call criu predump to perform iterative checkpoint
+ PageServer CriuPageServerInfo // allow to dump to criu page server
+ VethPairs []VethPairName // pass the veth to criu when restore
+ ManageCgroupsMode cgMode // dump or restore cgroup mode
+ EmptyNs uint32 // don't c/r properties for namespace from this mask
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go
new file mode 100644
index 000000000..bc9207703
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go
@@ -0,0 +1,6 @@
+package libcontainer
+
+// TODO Windows: This can ultimately be entirely factored out as criu is
+// a Unix concept not relevant on Windows.
+type CriuOpts struct {
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.pb.go b/vendor/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.pb.go
new file mode 100644
index 000000000..21af9db97
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.pb.go
@@ -0,0 +1,1178 @@
+// Code generated by protoc-gen-go.
+// source: criurpc.proto
+// DO NOT EDIT!
+
+/*
+Package criurpc is a generated protocol buffer package.
+
+It is generated from these files:
+ criurpc.proto
+
+It has these top-level messages:
+ CriuPageServerInfo
+ CriuVethPair
+ ExtMountMap
+ JoinNamespace
+ InheritFd
+ CgroupRoot
+ UnixSk
+ CriuOpts
+ CriuDumpResp
+ CriuRestoreResp
+ CriuNotify
+ CriuFeatures
+ CriuReq
+ CriuResp
+ CriuVersion
+*/
+package criurpc
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type CriuCgMode int32
+
+const (
+ CriuCgMode_IGNORE CriuCgMode = 0
+ CriuCgMode_CG_NONE CriuCgMode = 1
+ CriuCgMode_PROPS CriuCgMode = 2
+ CriuCgMode_SOFT CriuCgMode = 3
+ CriuCgMode_FULL CriuCgMode = 4
+ CriuCgMode_STRICT CriuCgMode = 5
+ CriuCgMode_DEFAULT CriuCgMode = 6
+)
+
+var CriuCgMode_name = map[int32]string{
+ 0: "IGNORE",
+ 1: "CG_NONE",
+ 2: "PROPS",
+ 3: "SOFT",
+ 4: "FULL",
+ 5: "STRICT",
+ 6: "DEFAULT",
+}
+var CriuCgMode_value = map[string]int32{
+ "IGNORE": 0,
+ "CG_NONE": 1,
+ "PROPS": 2,
+ "SOFT": 3,
+ "FULL": 4,
+ "STRICT": 5,
+ "DEFAULT": 6,
+}
+
+func (x CriuCgMode) Enum() *CriuCgMode {
+ p := new(CriuCgMode)
+ *p = x
+ return p
+}
+func (x CriuCgMode) String() string {
+ return proto.EnumName(CriuCgMode_name, int32(x))
+}
+func (x *CriuCgMode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CriuCgMode_value, data, "CriuCgMode")
+ if err != nil {
+ return err
+ }
+ *x = CriuCgMode(value)
+ return nil
+}
+func (CriuCgMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+type CriuReqType int32
+
+const (
+ CriuReqType_EMPTY CriuReqType = 0
+ CriuReqType_DUMP CriuReqType = 1
+ CriuReqType_RESTORE CriuReqType = 2
+ CriuReqType_CHECK CriuReqType = 3
+ CriuReqType_PRE_DUMP CriuReqType = 4
+ CriuReqType_PAGE_SERVER CriuReqType = 5
+ CriuReqType_NOTIFY CriuReqType = 6
+ CriuReqType_CPUINFO_DUMP CriuReqType = 7
+ CriuReqType_CPUINFO_CHECK CriuReqType = 8
+ CriuReqType_FEATURE_CHECK CriuReqType = 9
+ CriuReqType_VERSION CriuReqType = 10
+)
+
+var CriuReqType_name = map[int32]string{
+ 0: "EMPTY",
+ 1: "DUMP",
+ 2: "RESTORE",
+ 3: "CHECK",
+ 4: "PRE_DUMP",
+ 5: "PAGE_SERVER",
+ 6: "NOTIFY",
+ 7: "CPUINFO_DUMP",
+ 8: "CPUINFO_CHECK",
+ 9: "FEATURE_CHECK",
+ 10: "VERSION",
+}
+var CriuReqType_value = map[string]int32{
+ "EMPTY": 0,
+ "DUMP": 1,
+ "RESTORE": 2,
+ "CHECK": 3,
+ "PRE_DUMP": 4,
+ "PAGE_SERVER": 5,
+ "NOTIFY": 6,
+ "CPUINFO_DUMP": 7,
+ "CPUINFO_CHECK": 8,
+ "FEATURE_CHECK": 9,
+ "VERSION": 10,
+}
+
+func (x CriuReqType) Enum() *CriuReqType {
+ p := new(CriuReqType)
+ *p = x
+ return p
+}
+func (x CriuReqType) String() string {
+ return proto.EnumName(CriuReqType_name, int32(x))
+}
+func (x *CriuReqType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CriuReqType_value, data, "CriuReqType")
+ if err != nil {
+ return err
+ }
+ *x = CriuReqType(value)
+ return nil
+}
+func (CriuReqType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+type CriuPageServerInfo struct {
+ Address *string `protobuf:"bytes,1,opt,name=address" json:"address,omitempty"`
+ Port *int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"`
+ Pid *int32 `protobuf:"varint,3,opt,name=pid" json:"pid,omitempty"`
+ Fd *int32 `protobuf:"varint,4,opt,name=fd" json:"fd,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuPageServerInfo) Reset() { *m = CriuPageServerInfo{} }
+func (m *CriuPageServerInfo) String() string { return proto.CompactTextString(m) }
+func (*CriuPageServerInfo) ProtoMessage() {}
+func (*CriuPageServerInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *CriuPageServerInfo) GetAddress() string {
+ if m != nil && m.Address != nil {
+ return *m.Address
+ }
+ return ""
+}
+
+func (m *CriuPageServerInfo) GetPort() int32 {
+ if m != nil && m.Port != nil {
+ return *m.Port
+ }
+ return 0
+}
+
+func (m *CriuPageServerInfo) GetPid() int32 {
+ if m != nil && m.Pid != nil {
+ return *m.Pid
+ }
+ return 0
+}
+
+func (m *CriuPageServerInfo) GetFd() int32 {
+ if m != nil && m.Fd != nil {
+ return *m.Fd
+ }
+ return 0
+}
+
+type CriuVethPair struct {
+ IfIn *string `protobuf:"bytes,1,req,name=if_in,json=ifIn" json:"if_in,omitempty"`
+ IfOut *string `protobuf:"bytes,2,req,name=if_out,json=ifOut" json:"if_out,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuVethPair) Reset() { *m = CriuVethPair{} }
+func (m *CriuVethPair) String() string { return proto.CompactTextString(m) }
+func (*CriuVethPair) ProtoMessage() {}
+func (*CriuVethPair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *CriuVethPair) GetIfIn() string {
+ if m != nil && m.IfIn != nil {
+ return *m.IfIn
+ }
+ return ""
+}
+
+func (m *CriuVethPair) GetIfOut() string {
+ if m != nil && m.IfOut != nil {
+ return *m.IfOut
+ }
+ return ""
+}
+
+type ExtMountMap struct {
+ Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+ Val *string `protobuf:"bytes,2,req,name=val" json:"val,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ExtMountMap) Reset() { *m = ExtMountMap{} }
+func (m *ExtMountMap) String() string { return proto.CompactTextString(m) }
+func (*ExtMountMap) ProtoMessage() {}
+func (*ExtMountMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func (m *ExtMountMap) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *ExtMountMap) GetVal() string {
+ if m != nil && m.Val != nil {
+ return *m.Val
+ }
+ return ""
+}
+
+type JoinNamespace struct {
+ Ns *string `protobuf:"bytes,1,req,name=ns" json:"ns,omitempty"`
+ NsFile *string `protobuf:"bytes,2,req,name=ns_file,json=nsFile" json:"ns_file,omitempty"`
+ ExtraOpt *string `protobuf:"bytes,3,opt,name=extra_opt,json=extraOpt" json:"extra_opt,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *JoinNamespace) Reset() { *m = JoinNamespace{} }
+func (m *JoinNamespace) String() string { return proto.CompactTextString(m) }
+func (*JoinNamespace) ProtoMessage() {}
+func (*JoinNamespace) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *JoinNamespace) GetNs() string {
+ if m != nil && m.Ns != nil {
+ return *m.Ns
+ }
+ return ""
+}
+
+func (m *JoinNamespace) GetNsFile() string {
+ if m != nil && m.NsFile != nil {
+ return *m.NsFile
+ }
+ return ""
+}
+
+func (m *JoinNamespace) GetExtraOpt() string {
+ if m != nil && m.ExtraOpt != nil {
+ return *m.ExtraOpt
+ }
+ return ""
+}
+
+type InheritFd struct {
+ Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+ Fd *int32 `protobuf:"varint,2,req,name=fd" json:"fd,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *InheritFd) Reset() { *m = InheritFd{} }
+func (m *InheritFd) String() string { return proto.CompactTextString(m) }
+func (*InheritFd) ProtoMessage() {}
+func (*InheritFd) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *InheritFd) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *InheritFd) GetFd() int32 {
+ if m != nil && m.Fd != nil {
+ return *m.Fd
+ }
+ return 0
+}
+
+type CgroupRoot struct {
+ Ctrl *string `protobuf:"bytes,1,opt,name=ctrl" json:"ctrl,omitempty"`
+ Path *string `protobuf:"bytes,2,req,name=path" json:"path,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CgroupRoot) Reset() { *m = CgroupRoot{} }
+func (m *CgroupRoot) String() string { return proto.CompactTextString(m) }
+func (*CgroupRoot) ProtoMessage() {}
+func (*CgroupRoot) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+
+func (m *CgroupRoot) GetCtrl() string {
+ if m != nil && m.Ctrl != nil {
+ return *m.Ctrl
+ }
+ return ""
+}
+
+func (m *CgroupRoot) GetPath() string {
+ if m != nil && m.Path != nil {
+ return *m.Path
+ }
+ return ""
+}
+
+type UnixSk struct {
+ Inode *uint32 `protobuf:"varint,1,req,name=inode" json:"inode,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UnixSk) Reset() { *m = UnixSk{} }
+func (m *UnixSk) String() string { return proto.CompactTextString(m) }
+func (*UnixSk) ProtoMessage() {}
+func (*UnixSk) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+
+func (m *UnixSk) GetInode() uint32 {
+ if m != nil && m.Inode != nil {
+ return *m.Inode
+ }
+ return 0
+}
+
+type CriuOpts struct {
+ ImagesDirFd *int32 `protobuf:"varint,1,req,name=images_dir_fd,json=imagesDirFd" json:"images_dir_fd,omitempty"`
+ Pid *int32 `protobuf:"varint,2,opt,name=pid" json:"pid,omitempty"`
+ LeaveRunning *bool `protobuf:"varint,3,opt,name=leave_running,json=leaveRunning" json:"leave_running,omitempty"`
+ ExtUnixSk *bool `protobuf:"varint,4,opt,name=ext_unix_sk,json=extUnixSk" json:"ext_unix_sk,omitempty"`
+ TcpEstablished *bool `protobuf:"varint,5,opt,name=tcp_established,json=tcpEstablished" json:"tcp_established,omitempty"`
+ EvasiveDevices *bool `protobuf:"varint,6,opt,name=evasive_devices,json=evasiveDevices" json:"evasive_devices,omitempty"`
+ ShellJob *bool `protobuf:"varint,7,opt,name=shell_job,json=shellJob" json:"shell_job,omitempty"`
+ FileLocks *bool `protobuf:"varint,8,opt,name=file_locks,json=fileLocks" json:"file_locks,omitempty"`
+ LogLevel *int32 `protobuf:"varint,9,opt,name=log_level,json=logLevel,def=2" json:"log_level,omitempty"`
+ LogFile *string `protobuf:"bytes,10,opt,name=log_file,json=logFile" json:"log_file,omitempty"`
+ Ps *CriuPageServerInfo `protobuf:"bytes,11,opt,name=ps" json:"ps,omitempty"`
+ NotifyScripts *bool `protobuf:"varint,12,opt,name=notify_scripts,json=notifyScripts" json:"notify_scripts,omitempty"`
+ Root *string `protobuf:"bytes,13,opt,name=root" json:"root,omitempty"`
+ ParentImg *string `protobuf:"bytes,14,opt,name=parent_img,json=parentImg" json:"parent_img,omitempty"`
+ TrackMem *bool `protobuf:"varint,15,opt,name=track_mem,json=trackMem" json:"track_mem,omitempty"`
+ AutoDedup *bool `protobuf:"varint,16,opt,name=auto_dedup,json=autoDedup" json:"auto_dedup,omitempty"`
+ WorkDirFd *int32 `protobuf:"varint,17,opt,name=work_dir_fd,json=workDirFd" json:"work_dir_fd,omitempty"`
+ LinkRemap *bool `protobuf:"varint,18,opt,name=link_remap,json=linkRemap" json:"link_remap,omitempty"`
+ Veths []*CriuVethPair `protobuf:"bytes,19,rep,name=veths" json:"veths,omitempty"`
+ CpuCap *uint32 `protobuf:"varint,20,opt,name=cpu_cap,json=cpuCap,def=4294967295" json:"cpu_cap,omitempty"`
+ ForceIrmap *bool `protobuf:"varint,21,opt,name=force_irmap,json=forceIrmap" json:"force_irmap,omitempty"`
+ ExecCmd []string `protobuf:"bytes,22,rep,name=exec_cmd,json=execCmd" json:"exec_cmd,omitempty"`
+ ExtMnt []*ExtMountMap `protobuf:"bytes,23,rep,name=ext_mnt,json=extMnt" json:"ext_mnt,omitempty"`
+ ManageCgroups *bool `protobuf:"varint,24,opt,name=manage_cgroups,json=manageCgroups" json:"manage_cgroups,omitempty"`
+ CgRoot []*CgroupRoot `protobuf:"bytes,25,rep,name=cg_root,json=cgRoot" json:"cg_root,omitempty"`
+ RstSibling *bool `protobuf:"varint,26,opt,name=rst_sibling,json=rstSibling" json:"rst_sibling,omitempty"`
+ InheritFd []*InheritFd `protobuf:"bytes,27,rep,name=inherit_fd,json=inheritFd" json:"inherit_fd,omitempty"`
+ AutoExtMnt *bool `protobuf:"varint,28,opt,name=auto_ext_mnt,json=autoExtMnt" json:"auto_ext_mnt,omitempty"`
+ ExtSharing *bool `protobuf:"varint,29,opt,name=ext_sharing,json=extSharing" json:"ext_sharing,omitempty"`
+ ExtMasters *bool `protobuf:"varint,30,opt,name=ext_masters,json=extMasters" json:"ext_masters,omitempty"`
+ SkipMnt []string `protobuf:"bytes,31,rep,name=skip_mnt,json=skipMnt" json:"skip_mnt,omitempty"`
+ EnableFs []string `protobuf:"bytes,32,rep,name=enable_fs,json=enableFs" json:"enable_fs,omitempty"`
+ UnixSkIno []*UnixSk `protobuf:"bytes,33,rep,name=unix_sk_ino,json=unixSkIno" json:"unix_sk_ino,omitempty"`
+ ManageCgroupsMode *CriuCgMode `protobuf:"varint,34,opt,name=manage_cgroups_mode,json=manageCgroupsMode,enum=CriuCgMode" json:"manage_cgroups_mode,omitempty"`
+ GhostLimit *uint32 `protobuf:"varint,35,opt,name=ghost_limit,json=ghostLimit,def=1048576" json:"ghost_limit,omitempty"`
+ IrmapScanPaths []string `protobuf:"bytes,36,rep,name=irmap_scan_paths,json=irmapScanPaths" json:"irmap_scan_paths,omitempty"`
+ External []string `protobuf:"bytes,37,rep,name=external" json:"external,omitempty"`
+ EmptyNs *uint32 `protobuf:"varint,38,opt,name=empty_ns,json=emptyNs" json:"empty_ns,omitempty"`
+ JoinNs []*JoinNamespace `protobuf:"bytes,39,rep,name=join_ns,json=joinNs" json:"join_ns,omitempty"`
+ CgroupProps *string `protobuf:"bytes,41,opt,name=cgroup_props,json=cgroupProps" json:"cgroup_props,omitempty"`
+ CgroupPropsFile *string `protobuf:"bytes,42,opt,name=cgroup_props_file,json=cgroupPropsFile" json:"cgroup_props_file,omitempty"`
+ CgroupDumpController []string `protobuf:"bytes,43,rep,name=cgroup_dump_controller,json=cgroupDumpController" json:"cgroup_dump_controller,omitempty"`
+ FreezeCgroup *string `protobuf:"bytes,44,opt,name=freeze_cgroup,json=freezeCgroup" json:"freeze_cgroup,omitempty"`
+ Timeout *uint32 `protobuf:"varint,45,opt,name=timeout" json:"timeout,omitempty"`
+ TcpSkipInFlight *bool `protobuf:"varint,46,opt,name=tcp_skip_in_flight,json=tcpSkipInFlight" json:"tcp_skip_in_flight,omitempty"`
+ WeakSysctls *bool `protobuf:"varint,47,opt,name=weak_sysctls,json=weakSysctls" json:"weak_sysctls,omitempty"`
+ LazyPages *bool `protobuf:"varint,48,opt,name=lazy_pages,json=lazyPages" json:"lazy_pages,omitempty"`
+ StatusFd *int32 `protobuf:"varint,49,opt,name=status_fd,json=statusFd" json:"status_fd,omitempty"`
+ OrphanPtsMaster *bool `protobuf:"varint,50,opt,name=orphan_pts_master,json=orphanPtsMaster" json:"orphan_pts_master,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuOpts) Reset() { *m = CriuOpts{} }
+func (m *CriuOpts) String() string { return proto.CompactTextString(m) }
+func (*CriuOpts) ProtoMessage() {}
+func (*CriuOpts) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+
+const Default_CriuOpts_LogLevel int32 = 2
+const Default_CriuOpts_CpuCap uint32 = 4294967295
+const Default_CriuOpts_GhostLimit uint32 = 1048576
+
+func (m *CriuOpts) GetImagesDirFd() int32 {
+ if m != nil && m.ImagesDirFd != nil {
+ return *m.ImagesDirFd
+ }
+ return 0
+}
+
+func (m *CriuOpts) GetPid() int32 {
+ if m != nil && m.Pid != nil {
+ return *m.Pid
+ }
+ return 0
+}
+
+func (m *CriuOpts) GetLeaveRunning() bool {
+ if m != nil && m.LeaveRunning != nil {
+ return *m.LeaveRunning
+ }
+ return false
+}
+
+func (m *CriuOpts) GetExtUnixSk() bool {
+ if m != nil && m.ExtUnixSk != nil {
+ return *m.ExtUnixSk
+ }
+ return false
+}
+
+func (m *CriuOpts) GetTcpEstablished() bool {
+ if m != nil && m.TcpEstablished != nil {
+ return *m.TcpEstablished
+ }
+ return false
+}
+
+func (m *CriuOpts) GetEvasiveDevices() bool {
+ if m != nil && m.EvasiveDevices != nil {
+ return *m.EvasiveDevices
+ }
+ return false
+}
+
+func (m *CriuOpts) GetShellJob() bool {
+ if m != nil && m.ShellJob != nil {
+ return *m.ShellJob
+ }
+ return false
+}
+
+func (m *CriuOpts) GetFileLocks() bool {
+ if m != nil && m.FileLocks != nil {
+ return *m.FileLocks
+ }
+ return false
+}
+
+func (m *CriuOpts) GetLogLevel() int32 {
+ if m != nil && m.LogLevel != nil {
+ return *m.LogLevel
+ }
+ return Default_CriuOpts_LogLevel
+}
+
+func (m *CriuOpts) GetLogFile() string {
+ if m != nil && m.LogFile != nil {
+ return *m.LogFile
+ }
+ return ""
+}
+
+func (m *CriuOpts) GetPs() *CriuPageServerInfo {
+ if m != nil {
+ return m.Ps
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetNotifyScripts() bool {
+ if m != nil && m.NotifyScripts != nil {
+ return *m.NotifyScripts
+ }
+ return false
+}
+
+func (m *CriuOpts) GetRoot() string {
+ if m != nil && m.Root != nil {
+ return *m.Root
+ }
+ return ""
+}
+
+func (m *CriuOpts) GetParentImg() string {
+ if m != nil && m.ParentImg != nil {
+ return *m.ParentImg
+ }
+ return ""
+}
+
+func (m *CriuOpts) GetTrackMem() bool {
+ if m != nil && m.TrackMem != nil {
+ return *m.TrackMem
+ }
+ return false
+}
+
+func (m *CriuOpts) GetAutoDedup() bool {
+ if m != nil && m.AutoDedup != nil {
+ return *m.AutoDedup
+ }
+ return false
+}
+
+func (m *CriuOpts) GetWorkDirFd() int32 {
+ if m != nil && m.WorkDirFd != nil {
+ return *m.WorkDirFd
+ }
+ return 0
+}
+
+func (m *CriuOpts) GetLinkRemap() bool {
+ if m != nil && m.LinkRemap != nil {
+ return *m.LinkRemap
+ }
+ return false
+}
+
+func (m *CriuOpts) GetVeths() []*CriuVethPair {
+ if m != nil {
+ return m.Veths
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetCpuCap() uint32 {
+ if m != nil && m.CpuCap != nil {
+ return *m.CpuCap
+ }
+ return Default_CriuOpts_CpuCap
+}
+
+func (m *CriuOpts) GetForceIrmap() bool {
+ if m != nil && m.ForceIrmap != nil {
+ return *m.ForceIrmap
+ }
+ return false
+}
+
+func (m *CriuOpts) GetExecCmd() []string {
+ if m != nil {
+ return m.ExecCmd
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetExtMnt() []*ExtMountMap {
+ if m != nil {
+ return m.ExtMnt
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetManageCgroups() bool {
+ if m != nil && m.ManageCgroups != nil {
+ return *m.ManageCgroups
+ }
+ return false
+}
+
+func (m *CriuOpts) GetCgRoot() []*CgroupRoot {
+ if m != nil {
+ return m.CgRoot
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetRstSibling() bool {
+ if m != nil && m.RstSibling != nil {
+ return *m.RstSibling
+ }
+ return false
+}
+
+func (m *CriuOpts) GetInheritFd() []*InheritFd {
+ if m != nil {
+ return m.InheritFd
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetAutoExtMnt() bool {
+ if m != nil && m.AutoExtMnt != nil {
+ return *m.AutoExtMnt
+ }
+ return false
+}
+
+func (m *CriuOpts) GetExtSharing() bool {
+ if m != nil && m.ExtSharing != nil {
+ return *m.ExtSharing
+ }
+ return false
+}
+
+func (m *CriuOpts) GetExtMasters() bool {
+ if m != nil && m.ExtMasters != nil {
+ return *m.ExtMasters
+ }
+ return false
+}
+
+func (m *CriuOpts) GetSkipMnt() []string {
+ if m != nil {
+ return m.SkipMnt
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetEnableFs() []string {
+ if m != nil {
+ return m.EnableFs
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetUnixSkIno() []*UnixSk {
+ if m != nil {
+ return m.UnixSkIno
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetManageCgroupsMode() CriuCgMode {
+ if m != nil && m.ManageCgroupsMode != nil {
+ return *m.ManageCgroupsMode
+ }
+ return CriuCgMode_IGNORE
+}
+
+func (m *CriuOpts) GetGhostLimit() uint32 {
+ if m != nil && m.GhostLimit != nil {
+ return *m.GhostLimit
+ }
+ return Default_CriuOpts_GhostLimit
+}
+
+func (m *CriuOpts) GetIrmapScanPaths() []string {
+ if m != nil {
+ return m.IrmapScanPaths
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetExternal() []string {
+ if m != nil {
+ return m.External
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetEmptyNs() uint32 {
+ if m != nil && m.EmptyNs != nil {
+ return *m.EmptyNs
+ }
+ return 0
+}
+
+func (m *CriuOpts) GetJoinNs() []*JoinNamespace {
+ if m != nil {
+ return m.JoinNs
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetCgroupProps() string {
+ if m != nil && m.CgroupProps != nil {
+ return *m.CgroupProps
+ }
+ return ""
+}
+
+func (m *CriuOpts) GetCgroupPropsFile() string {
+ if m != nil && m.CgroupPropsFile != nil {
+ return *m.CgroupPropsFile
+ }
+ return ""
+}
+
+func (m *CriuOpts) GetCgroupDumpController() []string {
+ if m != nil {
+ return m.CgroupDumpController
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetFreezeCgroup() string {
+ if m != nil && m.FreezeCgroup != nil {
+ return *m.FreezeCgroup
+ }
+ return ""
+}
+
+func (m *CriuOpts) GetTimeout() uint32 {
+ if m != nil && m.Timeout != nil {
+ return *m.Timeout
+ }
+ return 0
+}
+
+func (m *CriuOpts) GetTcpSkipInFlight() bool {
+ if m != nil && m.TcpSkipInFlight != nil {
+ return *m.TcpSkipInFlight
+ }
+ return false
+}
+
+func (m *CriuOpts) GetWeakSysctls() bool {
+ if m != nil && m.WeakSysctls != nil {
+ return *m.WeakSysctls
+ }
+ return false
+}
+
+func (m *CriuOpts) GetLazyPages() bool {
+ if m != nil && m.LazyPages != nil {
+ return *m.LazyPages
+ }
+ return false
+}
+
+func (m *CriuOpts) GetStatusFd() int32 {
+ if m != nil && m.StatusFd != nil {
+ return *m.StatusFd
+ }
+ return 0
+}
+
+func (m *CriuOpts) GetOrphanPtsMaster() bool {
+ if m != nil && m.OrphanPtsMaster != nil {
+ return *m.OrphanPtsMaster
+ }
+ return false
+}
+
+type CriuDumpResp struct {
+ Restored *bool `protobuf:"varint,1,opt,name=restored" json:"restored,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuDumpResp) Reset() { *m = CriuDumpResp{} }
+func (m *CriuDumpResp) String() string { return proto.CompactTextString(m) }
+func (*CriuDumpResp) ProtoMessage() {}
+func (*CriuDumpResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+
+func (m *CriuDumpResp) GetRestored() bool {
+ if m != nil && m.Restored != nil {
+ return *m.Restored
+ }
+ return false
+}
+
+type CriuRestoreResp struct {
+ Pid *int32 `protobuf:"varint,1,req,name=pid" json:"pid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuRestoreResp) Reset() { *m = CriuRestoreResp{} }
+func (m *CriuRestoreResp) String() string { return proto.CompactTextString(m) }
+func (*CriuRestoreResp) ProtoMessage() {}
+func (*CriuRestoreResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+
+func (m *CriuRestoreResp) GetPid() int32 {
+ if m != nil && m.Pid != nil {
+ return *m.Pid
+ }
+ return 0
+}
+
+type CriuNotify struct {
+ Script *string `protobuf:"bytes,1,opt,name=script" json:"script,omitempty"`
+ Pid *int32 `protobuf:"varint,2,opt,name=pid" json:"pid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuNotify) Reset() { *m = CriuNotify{} }
+func (m *CriuNotify) String() string { return proto.CompactTextString(m) }
+func (*CriuNotify) ProtoMessage() {}
+func (*CriuNotify) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+
+func (m *CriuNotify) GetScript() string {
+ if m != nil && m.Script != nil {
+ return *m.Script
+ }
+ return ""
+}
+
+func (m *CriuNotify) GetPid() int32 {
+ if m != nil && m.Pid != nil {
+ return *m.Pid
+ }
+ return 0
+}
+
+//
+// List of features which can queried via
+// CRIU_REQ_TYPE__FEATURE_CHECK
+type CriuFeatures struct {
+ MemTrack *bool `protobuf:"varint,1,opt,name=mem_track,json=memTrack" json:"mem_track,omitempty"`
+ LazyPages *bool `protobuf:"varint,2,opt,name=lazy_pages,json=lazyPages" json:"lazy_pages,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuFeatures) Reset() { *m = CriuFeatures{} }
+func (m *CriuFeatures) String() string { return proto.CompactTextString(m) }
+func (*CriuFeatures) ProtoMessage() {}
+func (*CriuFeatures) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+
+func (m *CriuFeatures) GetMemTrack() bool {
+ if m != nil && m.MemTrack != nil {
+ return *m.MemTrack
+ }
+ return false
+}
+
+func (m *CriuFeatures) GetLazyPages() bool {
+ if m != nil && m.LazyPages != nil {
+ return *m.LazyPages
+ }
+ return false
+}
+
+type CriuReq struct {
+ Type *CriuReqType `protobuf:"varint,1,req,name=type,enum=CriuReqType" json:"type,omitempty"`
+ Opts *CriuOpts `protobuf:"bytes,2,opt,name=opts" json:"opts,omitempty"`
+ NotifySuccess *bool `protobuf:"varint,3,opt,name=notify_success,json=notifySuccess" json:"notify_success,omitempty"`
+ //
+ // When set service won't close the connection but
+ // will wait for more req-s to appear. Works not
+ // for all request types.
+ KeepOpen *bool `protobuf:"varint,4,opt,name=keep_open,json=keepOpen" json:"keep_open,omitempty"`
+ //
+ // 'features' can be used to query which features
+ // are supported by the installed criu/kernel
+ // via RPC.
+ Features *CriuFeatures `protobuf:"bytes,5,opt,name=features" json:"features,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuReq) Reset() { *m = CriuReq{} }
+func (m *CriuReq) String() string { return proto.CompactTextString(m) }
+func (*CriuReq) ProtoMessage() {}
+func (*CriuReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+
+func (m *CriuReq) GetType() CriuReqType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return CriuReqType_EMPTY
+}
+
+func (m *CriuReq) GetOpts() *CriuOpts {
+ if m != nil {
+ return m.Opts
+ }
+ return nil
+}
+
+func (m *CriuReq) GetNotifySuccess() bool {
+ if m != nil && m.NotifySuccess != nil {
+ return *m.NotifySuccess
+ }
+ return false
+}
+
+func (m *CriuReq) GetKeepOpen() bool {
+ if m != nil && m.KeepOpen != nil {
+ return *m.KeepOpen
+ }
+ return false
+}
+
+func (m *CriuReq) GetFeatures() *CriuFeatures {
+ if m != nil {
+ return m.Features
+ }
+ return nil
+}
+
+type CriuResp struct {
+ Type *CriuReqType `protobuf:"varint,1,req,name=type,enum=CriuReqType" json:"type,omitempty"`
+ Success *bool `protobuf:"varint,2,req,name=success" json:"success,omitempty"`
+ Dump *CriuDumpResp `protobuf:"bytes,3,opt,name=dump" json:"dump,omitempty"`
+ Restore *CriuRestoreResp `protobuf:"bytes,4,opt,name=restore" json:"restore,omitempty"`
+ Notify *CriuNotify `protobuf:"bytes,5,opt,name=notify" json:"notify,omitempty"`
+ Ps *CriuPageServerInfo `protobuf:"bytes,6,opt,name=ps" json:"ps,omitempty"`
+ CrErrno *int32 `protobuf:"varint,7,opt,name=cr_errno,json=crErrno" json:"cr_errno,omitempty"`
+ Features *CriuFeatures `protobuf:"bytes,8,opt,name=features" json:"features,omitempty"`
+ CrErrmsg *string `protobuf:"bytes,9,opt,name=cr_errmsg,json=crErrmsg" json:"cr_errmsg,omitempty"`
+ Version *CriuVersion `protobuf:"bytes,10,opt,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuResp) Reset() { *m = CriuResp{} }
+func (m *CriuResp) String() string { return proto.CompactTextString(m) }
+func (*CriuResp) ProtoMessage() {}
+func (*CriuResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+
+func (m *CriuResp) GetType() CriuReqType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return CriuReqType_EMPTY
+}
+
+func (m *CriuResp) GetSuccess() bool {
+ if m != nil && m.Success != nil {
+ return *m.Success
+ }
+ return false
+}
+
+func (m *CriuResp) GetDump() *CriuDumpResp {
+ if m != nil {
+ return m.Dump
+ }
+ return nil
+}
+
+func (m *CriuResp) GetRestore() *CriuRestoreResp {
+ if m != nil {
+ return m.Restore
+ }
+ return nil
+}
+
+func (m *CriuResp) GetNotify() *CriuNotify {
+ if m != nil {
+ return m.Notify
+ }
+ return nil
+}
+
+func (m *CriuResp) GetPs() *CriuPageServerInfo {
+ if m != nil {
+ return m.Ps
+ }
+ return nil
+}
+
+func (m *CriuResp) GetCrErrno() int32 {
+ if m != nil && m.CrErrno != nil {
+ return *m.CrErrno
+ }
+ return 0
+}
+
+func (m *CriuResp) GetFeatures() *CriuFeatures {
+ if m != nil {
+ return m.Features
+ }
+ return nil
+}
+
+func (m *CriuResp) GetCrErrmsg() string {
+ if m != nil && m.CrErrmsg != nil {
+ return *m.CrErrmsg
+ }
+ return ""
+}
+
+func (m *CriuResp) GetVersion() *CriuVersion {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+// Answer for criu_req_type.VERSION requests
+type CriuVersion struct {
+ Major *int32 `protobuf:"varint,1,req,name=major" json:"major,omitempty"`
+ Minor *int32 `protobuf:"varint,2,req,name=minor" json:"minor,omitempty"`
+ Gitid *string `protobuf:"bytes,3,opt,name=gitid" json:"gitid,omitempty"`
+ Sublevel *int32 `protobuf:"varint,4,opt,name=sublevel" json:"sublevel,omitempty"`
+ Extra *int32 `protobuf:"varint,5,opt,name=extra" json:"extra,omitempty"`
+ Name *string `protobuf:"bytes,6,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuVersion) Reset() { *m = CriuVersion{} }
+func (m *CriuVersion) String() string { return proto.CompactTextString(m) }
+func (*CriuVersion) ProtoMessage() {}
+func (*CriuVersion) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+
+func (m *CriuVersion) GetMajor() int32 {
+ if m != nil && m.Major != nil {
+ return *m.Major
+ }
+ return 0
+}
+
+func (m *CriuVersion) GetMinor() int32 {
+ if m != nil && m.Minor != nil {
+ return *m.Minor
+ }
+ return 0
+}
+
+func (m *CriuVersion) GetGitid() string {
+ if m != nil && m.Gitid != nil {
+ return *m.Gitid
+ }
+ return ""
+}
+
+func (m *CriuVersion) GetSublevel() int32 {
+ if m != nil && m.Sublevel != nil {
+ return *m.Sublevel
+ }
+ return 0
+}
+
+func (m *CriuVersion) GetExtra() int32 {
+ if m != nil && m.Extra != nil {
+ return *m.Extra
+ }
+ return 0
+}
+
+func (m *CriuVersion) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*CriuPageServerInfo)(nil), "criu_page_server_info")
+ proto.RegisterType((*CriuVethPair)(nil), "criu_veth_pair")
+ proto.RegisterType((*ExtMountMap)(nil), "ext_mount_map")
+ proto.RegisterType((*JoinNamespace)(nil), "join_namespace")
+ proto.RegisterType((*InheritFd)(nil), "inherit_fd")
+ proto.RegisterType((*CgroupRoot)(nil), "cgroup_root")
+ proto.RegisterType((*UnixSk)(nil), "unix_sk")
+ proto.RegisterType((*CriuOpts)(nil), "criu_opts")
+ proto.RegisterType((*CriuDumpResp)(nil), "criu_dump_resp")
+ proto.RegisterType((*CriuRestoreResp)(nil), "criu_restore_resp")
+ proto.RegisterType((*CriuNotify)(nil), "criu_notify")
+ proto.RegisterType((*CriuFeatures)(nil), "criu_features")
+ proto.RegisterType((*CriuReq)(nil), "criu_req")
+ proto.RegisterType((*CriuResp)(nil), "criu_resp")
+ proto.RegisterType((*CriuVersion)(nil), "criu_version")
+ proto.RegisterEnum("CriuCgMode", CriuCgMode_name, CriuCgMode_value)
+ proto.RegisterEnum("CriuReqType", CriuReqType_name, CriuReqType_value)
+}
+
+func init() { proto.RegisterFile("criurpc.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 1781 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xdd, 0x72, 0x5b, 0xb7,
+ 0x11, 0x0e, 0x29, 0xfe, 0x1c, 0x82, 0x3f, 0xa6, 0x10, 0xdb, 0x81, 0x93, 0xda, 0x62, 0xe8, 0x28,
+ 0x51, 0x15, 0x97, 0x4d, 0x58, 0x3b, 0xae, 0x33, 0xed, 0x85, 0x47, 0x22, 0x5d, 0x36, 0x92, 0xc8,
+ 0x01, 0x25, 0xcf, 0xe4, 0x0a, 0x73, 0x74, 0x0e, 0x48, 0xc1, 0x3c, 0x7f, 0x05, 0x40, 0x45, 0xf2,
+ 0x83, 0xf4, 0x29, 0xfa, 0x0c, 0x7d, 0x84, 0xbe, 0x4e, 0x6f, 0x3b, 0xbb, 0x00, 0x65, 0x29, 0xc9,
+ 0xb4, 0xbd, 0xc3, 0x7e, 0x58, 0x00, 0xbb, 0xfb, 0xed, 0x0f, 0x48, 0x3b, 0xd2, 0x6a, 0xad, 0x8b,
+ 0x68, 0x50, 0xe8, 0xdc, 0xe6, 0xfd, 0x25, 0x79, 0x00, 0x80, 0x28, 0xc2, 0xa5, 0x14, 0x46, 0xea,
+ 0x4b, 0xa9, 0x85, 0xca, 0x16, 0x39, 0x65, 0xa4, 0x1e, 0xc6, 0xb1, 0x96, 0xc6, 0xb0, 0x52, 0xaf,
+ 0xb4, 0xd7, 0xe0, 0x1b, 0x91, 0x52, 0x52, 0x29, 0x72, 0x6d, 0x59, 0xb9, 0x57, 0xda, 0xab, 0x72,
+ 0x5c, 0xd3, 0x2e, 0xd9, 0x2a, 0x54, 0xcc, 0xb6, 0x10, 0x82, 0x25, 0xed, 0x90, 0xf2, 0x22, 0x66,
+ 0x15, 0x04, 0xca, 0x8b, 0xb8, 0xff, 0x27, 0xd2, 0xc1, 0x87, 0x2e, 0xa5, 0xbd, 0x10, 0x45, 0xa8,
+ 0x34, 0xfd, 0x98, 0x54, 0xd5, 0x42, 0xa8, 0x8c, 0x95, 0x7a, 0xe5, 0xbd, 0x06, 0xaf, 0xa8, 0xc5,
+ 0x24, 0xa3, 0x0f, 0x48, 0x4d, 0x2d, 0x44, 0xbe, 0x86, 0xeb, 0x01, 0xad, 0xaa, 0xc5, 0x74, 0x6d,
+ 0xfb, 0x7f, 0x20, 0x6d, 0x79, 0x65, 0x45, 0x9a, 0xaf, 0x33, 0x2b, 0xd2, 0xb0, 0x80, 0x07, 0x57,
+ 0xf2, 0xda, 0x1f, 0x85, 0x25, 0x20, 0x97, 0x61, 0xe2, 0x8f, 0xc1, 0xb2, 0xff, 0x96, 0x74, 0xde,
+ 0xe5, 0x2a, 0x13, 0x59, 0x98, 0x4a, 0x53, 0x84, 0x91, 0x04, 0xa3, 0x32, 0xe3, 0x0f, 0x95, 0x33,
+ 0x43, 0x3f, 0x21, 0xf5, 0xcc, 0x88, 0x85, 0x4a, 0xa4, 0x3f, 0x57, 0xcb, 0xcc, 0x58, 0x25, 0x92,
+ 0x7e, 0x46, 0x1a, 0xf2, 0xca, 0xea, 0x50, 0xe4, 0x85, 0x45, 0xaf, 0x1a, 0x3c, 0x40, 0x60, 0x5a,
+ 0xd8, 0xfe, 0x80, 0x10, 0x95, 0x5d, 0x48, 0xad, 0xac, 0x58, 0xc4, 0xbf, 0x62, 0x89, 0x73, 0x1d,
+ 0x2e, 0x74, 0xae, 0xbf, 0x20, 0xcd, 0x68, 0xa9, 0xf3, 0x75, 0x21, 0x74, 0x9e, 0x5b, 0x88, 0x5f,
+ 0x64, 0x75, 0xe2, 0xc3, 0x8a, 0x6b, 0x8c, 0x69, 0x68, 0x2f, 0xbc, 0x15, 0xb8, 0xee, 0xef, 0x90,
+ 0xfa, 0x3a, 0x53, 0x57, 0xc2, 0xac, 0xe8, 0x7d, 0x52, 0x55, 0x59, 0x1e, 0x4b, 0x7c, 0xa5, 0xcd,
+ 0x9d, 0xd0, 0xff, 0x57, 0x9b, 0x34, 0x30, 0xa6, 0x79, 0x61, 0x0d, 0xed, 0x93, 0xb6, 0x4a, 0xc3,
+ 0xa5, 0x34, 0x22, 0x56, 0x5a, 0x2c, 0x62, 0xd4, 0xad, 0xf2, 0xa6, 0x03, 0x0f, 0x95, 0x1e, 0xc7,
+ 0x1b, 0x9a, 0xca, 0x1f, 0x68, 0x7a, 0x4a, 0xda, 0x89, 0x0c, 0x2f, 0xa5, 0xd0, 0xeb, 0x2c, 0x53,
+ 0xd9, 0x12, 0x9d, 0x0d, 0x78, 0x0b, 0x41, 0xee, 0x30, 0xfa, 0x84, 0x34, 0x21, 0xfa, 0xde, 0x1a,
+ 0x24, 0x35, 0xe0, 0x10, 0xa0, 0xb3, 0x4c, 0x5d, 0xcd, 0x57, 0xf4, 0x2b, 0x72, 0xcf, 0x46, 0x85,
+ 0x90, 0xc6, 0x86, 0xe7, 0x89, 0x32, 0x17, 0x32, 0x66, 0x55, 0xd4, 0xe9, 0xd8, 0xa8, 0x18, 0x7d,
+ 0x40, 0x41, 0x51, 0x5e, 0x86, 0x46, 0x5d, 0x4a, 0x11, 0xcb, 0x4b, 0x15, 0x49, 0xc3, 0x6a, 0x4e,
+ 0xd1, 0xc3, 0x87, 0x0e, 0x85, 0xf8, 0x9b, 0x0b, 0x99, 0x24, 0xe2, 0x5d, 0x7e, 0xce, 0xea, 0xa8,
+ 0x12, 0x20, 0xf0, 0xd7, 0xfc, 0x9c, 0x3e, 0x26, 0x04, 0x28, 0x13, 0x49, 0x1e, 0xad, 0x0c, 0x0b,
+ 0x9c, 0x35, 0x80, 0x1c, 0x01, 0x40, 0x9f, 0x90, 0x46, 0x92, 0x2f, 0x45, 0x22, 0x2f, 0x65, 0xc2,
+ 0x1a, 0xe0, 0xea, 0xf7, 0xa5, 0x21, 0x0f, 0x92, 0x7c, 0x79, 0x04, 0x10, 0x7d, 0x44, 0x60, 0xed,
+ 0x58, 0x27, 0x2e, 0xb5, 0x93, 0x7c, 0x89, 0xb4, 0x7f, 0x49, 0xca, 0x85, 0x61, 0xcd, 0x5e, 0x69,
+ 0xaf, 0x39, 0x7c, 0x38, 0xf8, 0xd5, 0xc2, 0xe0, 0xe5, 0xc2, 0xd0, 0x5d, 0xd2, 0xc9, 0x72, 0xab,
+ 0x16, 0xd7, 0xc2, 0x44, 0x5a, 0x15, 0xd6, 0xb0, 0x16, 0x5a, 0xd1, 0x76, 0xe8, 0xdc, 0x81, 0xc0,
+ 0x2a, 0x30, 0xce, 0xda, 0x8e, 0x69, 0x64, 0xff, 0x31, 0x21, 0x45, 0xa8, 0x65, 0x66, 0x85, 0x4a,
+ 0x97, 0xac, 0x83, 0x3b, 0x0d, 0x87, 0x4c, 0xd2, 0x25, 0x38, 0x6e, 0x75, 0x18, 0xad, 0x44, 0x2a,
+ 0x53, 0x76, 0xcf, 0x39, 0x8e, 0xc0, 0xb1, 0x4c, 0xe1, 0x6c, 0xb8, 0xb6, 0xb9, 0x88, 0x65, 0xbc,
+ 0x2e, 0x58, 0xd7, 0x39, 0x0e, 0xc8, 0x21, 0x00, 0x40, 0xd3, 0x4f, 0xb9, 0x5e, 0x6d, 0xf8, 0xdf,
+ 0x46, 0x96, 0x1b, 0x00, 0x39, 0xf6, 0x1f, 0x13, 0x92, 0xa8, 0x6c, 0x25, 0xb4, 0x4c, 0xc3, 0x82,
+ 0x51, 0x77, 0x1c, 0x10, 0x0e, 0x00, 0xdd, 0x25, 0x55, 0x28, 0x4e, 0xc3, 0x3e, 0xee, 0x6d, 0xed,
+ 0x35, 0x87, 0xf7, 0x06, 0x77, 0xeb, 0x95, 0xbb, 0x5d, 0xfa, 0x94, 0xd4, 0xa3, 0x62, 0x2d, 0xa2,
+ 0xb0, 0x60, 0xf7, 0x7b, 0xa5, 0xbd, 0xf6, 0xf7, 0xe4, 0xf9, 0xf0, 0xd5, 0xf3, 0x57, 0xdf, 0xbd,
+ 0x1c, 0xbe, 0x7a, 0xc1, 0x6b, 0x51, 0xb1, 0x3e, 0x08, 0x0b, 0xba, 0x43, 0x9a, 0x8b, 0x5c, 0x47,
+ 0x52, 0x28, 0x0d, 0x6f, 0x3d, 0xc0, 0xb7, 0x08, 0x42, 0x13, 0x40, 0x80, 0x04, 0x79, 0x25, 0x23,
+ 0x11, 0xa5, 0x31, 0x7b, 0xd8, 0xdb, 0x02, 0x12, 0x40, 0x3e, 0x48, 0x21, 0x49, 0xea, 0x58, 0xeb,
+ 0x99, 0x65, 0x9f, 0xa0, 0x25, 0x9d, 0xc1, 0x9d, 0xda, 0xe7, 0x35, 0x79, 0x65, 0x8f, 0x33, 0x0b,
+ 0x2c, 0xa4, 0x61, 0x06, 0xfc, 0xb8, 0xf2, 0x32, 0x8c, 0x39, 0x16, 0x1c, 0x7a, 0xe0, 0x40, 0xba,
+ 0x4b, 0xea, 0xd1, 0x12, 0x4b, 0x8f, 0x3d, 0xc2, 0xfb, 0x5a, 0x83, 0x5b, 0xe5, 0xc8, 0x6b, 0xd1,
+ 0x92, 0x03, 0x31, 0x3b, 0xa4, 0xa9, 0x8d, 0x15, 0x46, 0x9d, 0x27, 0x50, 0x07, 0x9f, 0x3a, 0x93,
+ 0xb5, 0xb1, 0x73, 0x87, 0xd0, 0xfd, 0xdb, 0x65, 0xcf, 0x3e, 0xc3, 0xab, 0x9a, 0x83, 0x0f, 0x10,
+ 0x6f, 0xf8, 0xf5, 0x38, 0xa6, 0x3d, 0xd2, 0x42, 0xa6, 0x36, 0x8e, 0xfc, 0xc6, 0xdd, 0x06, 0xd8,
+ 0xc8, 0x19, 0xbf, 0xe3, 0x6a, 0xca, 0x5c, 0x84, 0x1a, 0x9e, 0x7b, 0xec, 0x14, 0xe4, 0x95, 0x9d,
+ 0x3b, 0x64, 0xa3, 0x90, 0x86, 0xc6, 0x4a, 0x6d, 0xd8, 0x93, 0x1b, 0x85, 0x63, 0x87, 0x40, 0x08,
+ 0xcd, 0x4a, 0x15, 0x78, 0xff, 0x8e, 0x0b, 0x21, 0xc8, 0x70, 0x39, 0xb4, 0xaf, 0x2c, 0x3c, 0x4f,
+ 0xa4, 0x58, 0x18, 0xd6, 0xc3, 0xbd, 0xc0, 0x01, 0x63, 0x43, 0xf7, 0x48, 0xd3, 0x57, 0xb2, 0x50,
+ 0x59, 0xce, 0x3e, 0x47, 0x47, 0x82, 0x81, 0xc7, 0x78, 0x63, 0x8d, 0x45, 0x3d, 0xc9, 0x72, 0xfa,
+ 0x67, 0xf2, 0xf1, 0xdd, 0x00, 0x8b, 0x14, 0x9a, 0x50, 0xbf, 0x57, 0xda, 0xeb, 0x0c, 0xdb, 0x2e,
+ 0x3f, 0xa2, 0x25, 0x82, 0x7c, 0xfb, 0x4e, 0xd0, 0x8f, 0xf3, 0x58, 0xc2, 0x43, 0xcb, 0x8b, 0xdc,
+ 0x58, 0x91, 0xa8, 0x54, 0x59, 0xf6, 0x14, 0xb3, 0xa5, 0xfe, 0xed, 0x37, 0xcf, 0xff, 0xf8, 0xe2,
+ 0xe5, 0x77, 0x9c, 0xe0, 0xde, 0x11, 0x6c, 0xd1, 0x3d, 0xd2, 0xc5, 0x44, 0x11, 0x26, 0x0a, 0x33,
+ 0x01, 0xdd, 0xcf, 0xb0, 0x2f, 0xd0, 0xec, 0x0e, 0xe2, 0xf3, 0x28, 0xcc, 0x66, 0x80, 0xd2, 0x4f,
+ 0x21, 0x6f, 0xac, 0xd4, 0x59, 0x98, 0xb0, 0x5d, 0xef, 0x98, 0x97, 0x31, 0xa7, 0xd2, 0xc2, 0x5e,
+ 0x8b, 0xcc, 0xb0, 0x2f, 0xe1, 0x31, 0x5e, 0x47, 0xf9, 0x04, 0x7c, 0xae, 0xbb, 0x51, 0x60, 0xd8,
+ 0x57, 0x3e, 0xbb, 0xef, 0x8e, 0x06, 0x5e, 0x03, 0xf9, 0xc4, 0xd0, 0xcf, 0x49, 0xcb, 0x67, 0x47,
+ 0xa1, 0xf3, 0xc2, 0xb0, 0xdf, 0x62, 0x85, 0xfa, 0x06, 0x3e, 0x03, 0x88, 0xee, 0x93, 0xed, 0xdb,
+ 0x2a, 0xae, 0x93, 0xec, 0xa3, 0xde, 0xbd, 0x5b, 0x7a, 0xd8, 0x51, 0x9e, 0x93, 0x87, 0x5e, 0x37,
+ 0x5e, 0xa7, 0x85, 0x88, 0xf2, 0xcc, 0xea, 0x3c, 0x49, 0xa4, 0x66, 0x5f, 0xa3, 0xf5, 0xf7, 0xdd,
+ 0xee, 0xe1, 0x3a, 0x2d, 0x0e, 0x6e, 0xf6, 0xa0, 0x2b, 0x2f, 0xb4, 0x94, 0xef, 0x37, 0x81, 0x67,
+ 0xcf, 0xf0, 0xf6, 0x96, 0x03, 0x5d, 0x8c, 0x61, 0x42, 0x5b, 0x95, 0x4a, 0x98, 0x95, 0xbf, 0x73,
+ 0xde, 0x7a, 0x91, 0x7e, 0x4d, 0x28, 0xf4, 0x63, 0xcc, 0x0e, 0x95, 0x89, 0x45, 0xa2, 0x96, 0x17,
+ 0x96, 0x0d, 0x30, 0x83, 0xa0, 0x53, 0xcf, 0x57, 0xaa, 0x98, 0x64, 0x63, 0x84, 0xc1, 0xe1, 0x9f,
+ 0x64, 0xb8, 0x12, 0xe6, 0xda, 0x44, 0x36, 0x31, 0xec, 0xf7, 0xa8, 0xd6, 0x04, 0x6c, 0xee, 0x20,
+ 0x6c, 0x1c, 0xe1, 0xfb, 0x6b, 0xec, 0x85, 0x86, 0x7d, 0xe3, 0x1b, 0x47, 0xf8, 0xfe, 0x7a, 0x06,
+ 0x00, 0x36, 0x6b, 0x1b, 0xda, 0xb5, 0x81, 0xba, 0xf8, 0x16, 0xbb, 0x4e, 0xe0, 0x80, 0x71, 0x0c,
+ 0xc1, 0xca, 0x75, 0x71, 0x01, 0xb4, 0x5a, 0xe3, 0xb3, 0x99, 0x0d, 0x9d, 0x29, 0x6e, 0x63, 0x66,
+ 0x8d, 0x4b, 0xe9, 0xfe, 0x33, 0xff, 0x47, 0xc0, 0x50, 0x69, 0x69, 0x0a, 0xa0, 0x5b, 0x4b, 0x63,
+ 0x73, 0x2d, 0x63, 0x9c, 0x97, 0x01, 0xbf, 0x91, 0xfb, 0xbb, 0x64, 0x1b, 0xb5, 0x3d, 0xe0, 0x0e,
+ 0xf8, 0x09, 0xe7, 0x66, 0x1f, 0x2c, 0xfb, 0x2f, 0x49, 0x13, 0xd5, 0x5c, 0x6b, 0xa6, 0x0f, 0x49,
+ 0xcd, 0xf5, 0x6c, 0x3f, 0x7f, 0xbd, 0xf4, 0xcb, 0xd1, 0xd8, 0xff, 0xc1, 0xfd, 0x95, 0xc4, 0x42,
+ 0x86, 0x76, 0xad, 0x9d, 0x9f, 0xa9, 0x4c, 0x05, 0xb6, 0xe3, 0x8d, 0x35, 0xa9, 0x4c, 0x4f, 0x41,
+ 0xfe, 0x59, 0x8c, 0xca, 0x3f, 0x8b, 0x51, 0xff, 0x9f, 0x25, 0x12, 0x78, 0x6b, 0xff, 0x46, 0xfb,
+ 0xa4, 0x62, 0xaf, 0x0b, 0x37, 0xcd, 0x3b, 0xc3, 0xce, 0x60, 0xb3, 0x21, 0x00, 0xe5, 0xb8, 0x47,
+ 0x9f, 0x90, 0x0a, 0x8c, 0x75, 0xbc, 0xa9, 0x39, 0x24, 0x83, 0x9b, 0x41, 0xcf, 0x11, 0xbf, 0x3d,
+ 0x82, 0xd6, 0x51, 0x04, 0xdf, 0xb4, 0xad, 0x3b, 0x23, 0xc8, 0x81, 0x60, 0xf3, 0x4a, 0xca, 0x42,
+ 0xe4, 0x85, 0xcc, 0xfc, 0xe0, 0x0e, 0x00, 0x98, 0x16, 0x32, 0xa3, 0xfb, 0x24, 0xd8, 0x38, 0x87,
+ 0x03, 0xbb, 0xb9, 0xb1, 0x65, 0x83, 0xf2, 0x9b, 0xfd, 0xfe, 0xbf, 0xcb, 0xfe, 0xb3, 0x81, 0x61,
+ 0xfe, 0x7f, 0x3c, 0x60, 0xa4, 0xbe, 0x31, 0x0d, 0xbe, 0x35, 0x01, 0xdf, 0x88, 0xf4, 0x29, 0xa9,
+ 0x00, 0xc5, 0x68, 0xf1, 0xcd, 0xa0, 0xb9, 0x21, 0x9d, 0xe3, 0x26, 0x7d, 0x46, 0xea, 0x9e, 0x59,
+ 0xb4, 0xbb, 0x39, 0xa4, 0x83, 0x5f, 0xd0, 0xcd, 0x37, 0x2a, 0xf4, 0x0b, 0x52, 0x73, 0x8e, 0x7b,
+ 0x47, 0x5a, 0x83, 0x5b, 0xa4, 0x73, 0xbf, 0xe7, 0xe7, 0x7b, 0xed, 0x7f, 0xce, 0xf7, 0x47, 0x40,
+ 0x96, 0x90, 0x5a, 0x67, 0x39, 0xfe, 0x3e, 0xaa, 0xbc, 0x1e, 0xe9, 0x11, 0x88, 0x77, 0x62, 0x16,
+ 0xfc, 0xf7, 0x98, 0x41, 0xf0, 0xdd, 0x35, 0xa9, 0x59, 0xe2, 0x4f, 0xa4, 0xc1, 0x03, 0xbc, 0x27,
+ 0x35, 0x4b, 0x18, 0x73, 0x97, 0x52, 0x1b, 0x95, 0x67, 0xf8, 0x0b, 0x69, 0x6e, 0x1a, 0xaa, 0x07,
+ 0xf9, 0x66, 0xb7, 0xff, 0xf7, 0x12, 0x69, 0xdd, 0xde, 0x81, 0xdf, 0x60, 0x1a, 0xbe, 0xcb, 0xb5,
+ 0xcf, 0x72, 0x27, 0x20, 0xaa, 0xb2, 0x5c, 0xfb, 0x8f, 0xa7, 0x13, 0x00, 0x5d, 0x2a, 0xeb, 0xbf,
+ 0xe6, 0x0d, 0xee, 0x04, 0x28, 0x2b, 0xb3, 0x3e, 0x77, 0x3f, 0xa4, 0x8a, 0x2f, 0x58, 0x2f, 0xc3,
+ 0x09, 0xfc, 0xe9, 0x62, 0x20, 0xab, 0xdc, 0x09, 0xf0, 0x95, 0x81, 0x5e, 0x89, 0xb1, 0x6b, 0x70,
+ 0x5c, 0xef, 0x0b, 0x6f, 0x97, 0x1f, 0x01, 0x94, 0x90, 0xda, 0xe4, 0xcd, 0xc9, 0x94, 0x8f, 0xba,
+ 0x1f, 0xd1, 0x26, 0xa9, 0x1f, 0xbc, 0x11, 0x27, 0xd3, 0x93, 0x51, 0xb7, 0x44, 0x1b, 0xa4, 0x3a,
+ 0xe3, 0xd3, 0xd9, 0xbc, 0x5b, 0xa6, 0x01, 0xa9, 0xcc, 0xa7, 0xe3, 0xd3, 0xee, 0x16, 0xac, 0xc6,
+ 0x67, 0x47, 0x47, 0xdd, 0x0a, 0x9c, 0x9b, 0x9f, 0xf2, 0xc9, 0xc1, 0x69, 0xb7, 0x0a, 0xe7, 0x0e,
+ 0x47, 0xe3, 0xd7, 0x67, 0x47, 0xa7, 0xdd, 0xda, 0xfe, 0x3f, 0x4a, 0xbe, 0x04, 0x37, 0x99, 0x05,
+ 0x37, 0x8d, 0x8e, 0x67, 0xa7, 0x3f, 0x76, 0x3f, 0x82, 0xf3, 0x87, 0x67, 0xc7, 0xb3, 0x6e, 0x09,
+ 0xce, 0xf0, 0xd1, 0xfc, 0x14, 0x1e, 0x2e, 0x83, 0xc6, 0xc1, 0x5f, 0x46, 0x07, 0x3f, 0x74, 0xb7,
+ 0x68, 0x8b, 0x04, 0x33, 0x3e, 0x12, 0xa8, 0x55, 0xa1, 0xf7, 0x48, 0x73, 0xf6, 0xfa, 0xcd, 0x48,
+ 0xcc, 0x47, 0xfc, 0xed, 0x88, 0x77, 0xab, 0xf0, 0xec, 0xc9, 0xf4, 0x74, 0x32, 0xfe, 0xb1, 0x5b,
+ 0xa3, 0x5d, 0xd2, 0x3a, 0x98, 0x9d, 0x4d, 0x4e, 0xc6, 0x53, 0xa7, 0x5e, 0xa7, 0xdb, 0xa4, 0xbd,
+ 0x41, 0xdc, 0x7d, 0x01, 0x40, 0xe3, 0xd1, 0xeb, 0xd3, 0x33, 0x3e, 0xf2, 0x50, 0x03, 0x9e, 0x7e,
+ 0x3b, 0xe2, 0xf3, 0xc9, 0xf4, 0xa4, 0x4b, 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x2a, 0xaf,
+ 0x49, 0x5b, 0x0d, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.proto b/vendor/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.proto
new file mode 100644
index 000000000..48e42e26e
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.proto
@@ -0,0 +1,209 @@
+syntax = "proto2";
+
+message criu_page_server_info {
+ optional string address = 1;
+ optional int32 port = 2;
+ optional int32 pid = 3;
+ optional int32 fd = 4;
+}
+
+message criu_veth_pair {
+ required string if_in = 1;
+ required string if_out = 2;
+};
+
+message ext_mount_map {
+ required string key = 1;
+ required string val = 2;
+};
+
+message join_namespace {
+ required string ns = 1;
+ required string ns_file = 2;
+ optional string extra_opt = 3;
+}
+
+message inherit_fd {
+ required string key = 1;
+ required int32 fd = 2;
+};
+
+message cgroup_root {
+ optional string ctrl = 1;
+ required string path = 2;
+};
+
+message unix_sk {
+ required uint32 inode = 1;
+};
+
+enum criu_cg_mode {
+ IGNORE = 0;
+ CG_NONE = 1;
+ PROPS = 2;
+ SOFT = 3;
+ FULL = 4;
+ STRICT = 5;
+ DEFAULT = 6;
+};
+
+message criu_opts {
+ required int32 images_dir_fd = 1;
+ optional int32 pid = 2; /* if not set on dump, will dump requesting process */
+
+ optional bool leave_running = 3;
+ optional bool ext_unix_sk = 4;
+ optional bool tcp_established = 5;
+ optional bool evasive_devices = 6;
+ optional bool shell_job = 7;
+ optional bool file_locks = 8;
+ optional int32 log_level = 9 [default = 2];
+ optional string log_file = 10; /* No subdirs are allowed. Consider using work-dir */
+
+ optional criu_page_server_info ps = 11;
+
+ optional bool notify_scripts = 12;
+
+ optional string root = 13;
+ optional string parent_img = 14;
+ optional bool track_mem = 15;
+ optional bool auto_dedup = 16;
+
+ optional int32 work_dir_fd = 17;
+ optional bool link_remap = 18;
+ repeated criu_veth_pair veths = 19; /* DEPRECATED, use external instead */
+
+ optional uint32 cpu_cap = 20 [default = 0xffffffff];
+ optional bool force_irmap = 21;
+ repeated string exec_cmd = 22;
+
+ repeated ext_mount_map ext_mnt = 23; /* DEPRECATED, use external instead */
+ optional bool manage_cgroups = 24; /* backward compatibility */
+ repeated cgroup_root cg_root = 25;
+
+ optional bool rst_sibling = 26; /* swrk only */
+ repeated inherit_fd inherit_fd = 27; /* swrk only */
+
+ optional bool auto_ext_mnt = 28;
+ optional bool ext_sharing = 29;
+ optional bool ext_masters = 30;
+
+ repeated string skip_mnt = 31;
+ repeated string enable_fs = 32;
+
+ repeated unix_sk unix_sk_ino = 33; /* DEPRECATED, use external instead */
+
+ optional criu_cg_mode manage_cgroups_mode = 34;
+ optional uint32 ghost_limit = 35 [default = 0x100000];
+ repeated string irmap_scan_paths = 36;
+ repeated string external = 37;
+ optional uint32 empty_ns = 38;
+ repeated join_namespace join_ns = 39;
+
+ optional string cgroup_props = 41;
+ optional string cgroup_props_file = 42;
+ repeated string cgroup_dump_controller = 43;
+
+ optional string freeze_cgroup = 44;
+ optional uint32 timeout = 45;
+ optional bool tcp_skip_in_flight = 46;
+ optional bool weak_sysctls = 47;
+ optional bool lazy_pages = 48;
+ optional int32 status_fd = 49;
+ optional bool orphan_pts_master = 50;
+}
+
+message criu_dump_resp {
+ optional bool restored = 1;
+}
+
+message criu_restore_resp {
+ required int32 pid = 1;
+}
+
+message criu_notify {
+ optional string script = 1;
+ optional int32 pid = 2;
+}
+
+enum criu_req_type {
+ EMPTY = 0;
+ DUMP = 1;
+ RESTORE = 2;
+ CHECK = 3;
+ PRE_DUMP = 4;
+ PAGE_SERVER = 5;
+
+ NOTIFY = 6;
+
+ CPUINFO_DUMP = 7;
+ CPUINFO_CHECK = 8;
+
+ FEATURE_CHECK = 9;
+
+ VERSION = 10;
+}
+
+/*
+ * List of features which can queried via
+ * CRIU_REQ_TYPE__FEATURE_CHECK
+ */
+message criu_features {
+ optional bool mem_track = 1;
+ optional bool lazy_pages = 2;
+}
+
+/*
+ * Request -- each type corresponds to must-be-there
+ * request arguments of respective type
+ */
+
+message criu_req {
+ required criu_req_type type = 1;
+
+ optional criu_opts opts = 2;
+ optional bool notify_success = 3;
+
+ /*
+ * When set service won't close the connection but
+ * will wait for more req-s to appear. Works not
+ * for all request types.
+ */
+ optional bool keep_open = 4;
+ /*
+ * 'features' can be used to query which features
+ * are supported by the installed criu/kernel
+ * via RPC.
+ */
+ optional criu_features features = 5;
+}
+
+/*
+ * Response -- it states whether the request was served
+ * and additional request-specific information
+ */
+
+message criu_resp {
+ required criu_req_type type = 1;
+ required bool success = 2;
+
+ optional criu_dump_resp dump = 3;
+ optional criu_restore_resp restore = 4;
+ optional criu_notify notify = 5;
+ optional criu_page_server_info ps = 6;
+
+ optional int32 cr_errno = 7;
+ optional criu_features features = 8;
+ optional string cr_errmsg = 9;
+ optional criu_version version = 10;
+}
+
+/* Answer for criu_req_type.VERSION requests */
+message criu_version {
+ required int32 major = 1;
+ required int32 minor = 2;
+ optional string gitid = 3;
+ optional int32 sublevel = 4;
+ optional int32 extra = 5;
+ optional string name = 6;
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_linux.go
new file mode 100644
index 000000000..461dc097c
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_linux.go
@@ -0,0 +1,100 @@
+package devices
+
+import (
+ "errors"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+
+ "golang.org/x/sys/unix"
+)
+
+var (
+ ErrNotADevice = errors.New("not a device node")
+)
+
+// Testing dependencies
+var (
+ unixLstat = unix.Lstat
+ ioutilReadDir = ioutil.ReadDir
+)
+
+// Given the path to a device and its cgroup_permissions(which cannot be easily queried) look up the information about a linux device and return that information as a Device struct.
+func DeviceFromPath(path, permissions string) (*configs.Device, error) {
+ var stat unix.Stat_t
+ err := unixLstat(path, &stat)
+ if err != nil {
+ return nil, err
+ }
+ var (
+ devType rune
+ mode = stat.Mode
+ )
+ switch {
+ case mode&unix.S_IFBLK == unix.S_IFBLK:
+ devType = 'b'
+ case mode&unix.S_IFCHR == unix.S_IFCHR:
+ devType = 'c'
+ default:
+ return nil, ErrNotADevice
+ }
+ devNumber := int(stat.Rdev)
+ uid := stat.Uid
+ gid := stat.Gid
+ return &configs.Device{
+ Type: devType,
+ Path: path,
+ Major: Major(devNumber),
+ Minor: Minor(devNumber),
+ Permissions: permissions,
+ FileMode: os.FileMode(mode),
+ Uid: uid,
+ Gid: gid,
+ }, nil
+}
+
+func HostDevices() ([]*configs.Device, error) {
+ return getDevices("/dev")
+}
+
+func getDevices(path string) ([]*configs.Device, error) {
+ files, err := ioutilReadDir(path)
+ if err != nil {
+ return nil, err
+ }
+ out := []*configs.Device{}
+ for _, f := range files {
+ switch {
+ case f.IsDir():
+ switch f.Name() {
+ // ".lxc" & ".lxd-mounts" added to address https://github.com/lxc/lxd/issues/2825
+ case "pts", "shm", "fd", "mqueue", ".lxc", ".lxd-mounts":
+ continue
+ default:
+ sub, err := getDevices(filepath.Join(path, f.Name()))
+ if err != nil {
+ return nil, err
+ }
+
+ out = append(out, sub...)
+ continue
+ }
+ case f.Name() == "console":
+ continue
+ }
+ device, err := DeviceFromPath(filepath.Join(path, f.Name()), "rwm")
+ if err != nil {
+ if err == ErrNotADevice {
+ continue
+ }
+ if os.IsNotExist(err) {
+ continue
+ }
+ return nil, err
+ }
+ out = append(out, device)
+ }
+ return out, nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go
new file mode 100644
index 000000000..6649b9f2d
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go
@@ -0,0 +1,3 @@
+// +build !linux
+
+package devices
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/number.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/number.go
new file mode 100644
index 000000000..885b6e5dd
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/devices/number.go
@@ -0,0 +1,24 @@
+// +build linux freebsd
+
+package devices
+
+/*
+
+This code provides support for manipulating linux device numbers. It should be replaced by normal syscall functions once http://code.google.com/p/go/issues/detail?id=8106 is solved.
+
+You can read what they are here:
+
+ - http://www.makelinux.net/ldd3/chp-3-sect-2
+ - http://www.linux-tutorial.info/modules.php?name=MContent&pageid=94
+
+Note! These are NOT the same as the MAJOR(dev_t device);, MINOR(dev_t device); and MKDEV(int major, int minor); functions as defined in <linux/kdev_t.h> as the representation of device numbers used by go is different than the one used internally to the kernel! - https://github.com/torvalds/linux/blob/master/include/linux/kdev_t.h#L9
+
+*/
+
+func Major(devNumber int) int64 {
+ return int64((devNumber >> 8) & 0xfff)
+}
+
+func Minor(devNumber int) int64 {
+ return int64((devNumber & 0xff) | ((devNumber >> 12) & 0xfff00))
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/error.go b/vendor/github.com/opencontainers/runc/libcontainer/error.go
new file mode 100644
index 000000000..21a3789ba
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/error.go
@@ -0,0 +1,70 @@
+package libcontainer
+
+import "io"
+
+// ErrorCode is the API error code type.
+type ErrorCode int
+
+// API error codes.
+const (
+ // Factory errors
+ IdInUse ErrorCode = iota
+ InvalidIdFormat
+
+ // Container errors
+ ContainerNotExists
+ ContainerPaused
+ ContainerNotStopped
+ ContainerNotRunning
+ ContainerNotPaused
+
+ // Process errors
+ NoProcessOps
+
+ // Common errors
+ ConfigInvalid
+ ConsoleExists
+ SystemError
+)
+
+func (c ErrorCode) String() string {
+ switch c {
+ case IdInUse:
+ return "Id already in use"
+ case InvalidIdFormat:
+ return "Invalid format"
+ case ContainerPaused:
+ return "Container paused"
+ case ConfigInvalid:
+ return "Invalid configuration"
+ case SystemError:
+ return "System error"
+ case ContainerNotExists:
+ return "Container does not exist"
+ case ContainerNotStopped:
+ return "Container is not stopped"
+ case ContainerNotRunning:
+ return "Container is not running"
+ case ConsoleExists:
+ return "Console exists for process"
+ case ContainerNotPaused:
+ return "Container is not paused"
+ case NoProcessOps:
+ return "No process operations"
+ default:
+ return "Unknown error"
+ }
+}
+
+// Error is the API error type.
+type Error interface {
+ error
+
+ // Returns an error if it failed to write the detail of the Error to w.
+ // The detail of the Error may include the error message and a
+ // representation of the stack trace.
+ Detail(w io.Writer) error
+
+ // Returns the error code for this error.
+ Code() ErrorCode
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/factory.go b/vendor/github.com/opencontainers/runc/libcontainer/factory.go
new file mode 100644
index 000000000..0986cd77e
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/factory.go
@@ -0,0 +1,44 @@
+package libcontainer
+
+import (
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type Factory interface {
+ // Creates a new container with the given id and starts the initial process inside it.
+ // id must be a string containing only letters, digits and underscores and must contain
+ // between 1 and 1024 characters, inclusive.
+ //
+ // The id must not already be in use by an existing container. Containers created using
+ // a factory with the same path (and filesystem) must have distinct ids.
+ //
+ // Returns the new container with a running process.
+ //
+ // errors:
+ // IdInUse - id is already in use by a container
+ // InvalidIdFormat - id has incorrect format
+ // ConfigInvalid - config is invalid
+ // Systemerror - System error
+ //
+ // On error, any partially created container parts are cleaned up (the operation is atomic).
+ Create(id string, config *configs.Config) (Container, error)
+
+ // Load takes an ID for an existing container and returns the container information
+ // from the state. This presents a read only view of the container.
+ //
+ // errors:
+ // Path does not exist
+ // System error
+ Load(id string) (Container, error)
+
+ // StartInitialization is an internal API to libcontainer used during the reexec of the
+ // container.
+ //
+ // Errors:
+ // Pipe connection error
+ // System error
+ StartInitialization() error
+
+ // Type returns info string about factory type (e.g. lxc, libcontainer...)
+ Type() string
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go
new file mode 100644
index 000000000..42b6f5a05
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go
@@ -0,0 +1,325 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "runtime/debug"
+ "strconv"
+
+ "github.com/docker/docker/pkg/mount"
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fs"
+ "github.com/opencontainers/runc/libcontainer/cgroups/rootless"
+ "github.com/opencontainers/runc/libcontainer/cgroups/systemd"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/configs/validate"
+ "github.com/opencontainers/runc/libcontainer/utils"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ stateFilename = "state.json"
+ execFifoFilename = "exec.fifo"
+)
+
+var idRegex = regexp.MustCompile(`^[\w+-\.]+$`)
+
+// InitArgs returns an options func to configure a LinuxFactory with the
+// provided init binary path and arguments.
+func InitArgs(args ...string) func(*LinuxFactory) error {
+ return func(l *LinuxFactory) (err error) {
+ if len(args) > 0 {
+ // Resolve relative paths to ensure that its available
+ // after directory changes.
+ if args[0], err = filepath.Abs(args[0]); err != nil {
+ return newGenericError(err, ConfigInvalid)
+ }
+ }
+
+ l.InitArgs = args
+ return nil
+ }
+}
+
+// SystemdCgroups is an options func to configure a LinuxFactory to return
+// containers that use systemd to create and manage cgroups.
+func SystemdCgroups(l *LinuxFactory) error {
+ l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {
+ return &systemd.Manager{
+ Cgroups: config,
+ Paths: paths,
+ }
+ }
+ return nil
+}
+
+// Cgroupfs is an options func to configure a LinuxFactory to return
+// containers that use the native cgroups filesystem implementation to
+// create and manage cgroups.
+func Cgroupfs(l *LinuxFactory) error {
+ l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {
+ return &fs.Manager{
+ Cgroups: config,
+ Paths: paths,
+ }
+ }
+ return nil
+}
+
+// RootlessCgroups is an options func to configure a LinuxFactory to
+// return containers that use the "rootless" cgroup manager, which will
+// fail to do any operations not possible to do with an unprivileged user.
+// It should only be used in conjunction with rootless containers.
+func RootlessCgroups(l *LinuxFactory) error {
+ l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {
+ return &rootless.Manager{
+ Cgroups: config,
+ Paths: paths,
+ }
+ }
+ return nil
+}
+
+// TmpfsRoot is an option func to mount LinuxFactory.Root to tmpfs.
+func TmpfsRoot(l *LinuxFactory) error {
+ mounted, err := mount.Mounted(l.Root)
+ if err != nil {
+ return err
+ }
+ if !mounted {
+ if err := unix.Mount("tmpfs", l.Root, "tmpfs", 0, ""); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// CriuPath returns an option func to configure a LinuxFactory with the
+// provided criupath
+func CriuPath(criupath string) func(*LinuxFactory) error {
+ return func(l *LinuxFactory) error {
+ l.CriuPath = criupath
+ return nil
+ }
+}
+
+// New returns a linux based container factory based in the root directory and
+// configures the factory with the provided option funcs.
+func New(root string, options ...func(*LinuxFactory) error) (Factory, error) {
+ if root != "" {
+ if err := os.MkdirAll(root, 0700); err != nil {
+ return nil, newGenericError(err, SystemError)
+ }
+ }
+ l := &LinuxFactory{
+ Root: root,
+ InitArgs: []string{"/proc/self/exe", "init"},
+ Validator: validate.New(),
+ CriuPath: "criu",
+ }
+ Cgroupfs(l)
+ for _, opt := range options {
+ if err := opt(l); err != nil {
+ return nil, err
+ }
+ }
+ return l, nil
+}
+
+// LinuxFactory implements the default factory interface for linux based systems.
+type LinuxFactory struct {
+ // Root directory for the factory to store state.
+ Root string
+
+ // InitArgs are arguments for calling the init responsibilities for spawning
+ // a container.
+ InitArgs []string
+
+ // CriuPath is the path to the criu binary used for checkpoint and restore of
+ // containers.
+ CriuPath string
+
+ // Validator provides validation to container configurations.
+ Validator validate.Validator
+
+ // NewCgroupsManager returns an initialized cgroups manager for a single container.
+ NewCgroupsManager func(config *configs.Cgroup, paths map[string]string) cgroups.Manager
+}
+
+func (l *LinuxFactory) Create(id string, config *configs.Config) (Container, error) {
+ if l.Root == "" {
+ return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid)
+ }
+ if err := l.validateID(id); err != nil {
+ return nil, err
+ }
+ if err := l.Validator.Validate(config); err != nil {
+ return nil, newGenericError(err, ConfigInvalid)
+ }
+ containerRoot := filepath.Join(l.Root, id)
+ if _, err := os.Stat(containerRoot); err == nil {
+ return nil, newGenericError(fmt.Errorf("container with id exists: %v", id), IdInUse)
+ } else if !os.IsNotExist(err) {
+ return nil, newGenericError(err, SystemError)
+ }
+ if err := os.MkdirAll(containerRoot, 0711); err != nil {
+ return nil, newGenericError(err, SystemError)
+ }
+ if err := os.Chown(containerRoot, unix.Geteuid(), unix.Getegid()); err != nil {
+ return nil, newGenericError(err, SystemError)
+ }
+ if config.Rootless {
+ RootlessCgroups(l)
+ }
+ c := &linuxContainer{
+ id: id,
+ root: containerRoot,
+ config: config,
+ initArgs: l.InitArgs,
+ criuPath: l.CriuPath,
+ cgroupManager: l.NewCgroupsManager(config.Cgroups, nil),
+ }
+ c.state = &stoppedState{c: c}
+ return c, nil
+}
+
+func (l *LinuxFactory) Load(id string) (Container, error) {
+ if l.Root == "" {
+ return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid)
+ }
+ containerRoot := filepath.Join(l.Root, id)
+ state, err := l.loadState(containerRoot, id)
+ if err != nil {
+ return nil, err
+ }
+ r := &nonChildProcess{
+ processPid: state.InitProcessPid,
+ processStartTime: state.InitProcessStartTime,
+ fds: state.ExternalDescriptors,
+ }
+ // We have to use the RootlessManager.
+ if state.Rootless {
+ RootlessCgroups(l)
+ }
+ c := &linuxContainer{
+ initProcess: r,
+ initProcessStartTime: state.InitProcessStartTime,
+ id: id,
+ config: &state.Config,
+ initArgs: l.InitArgs,
+ criuPath: l.CriuPath,
+ cgroupManager: l.NewCgroupsManager(state.Config.Cgroups, state.CgroupPaths),
+ root: containerRoot,
+ created: state.Created,
+ }
+ c.state = &loadedState{c: c}
+ if err := c.refreshState(); err != nil {
+ return nil, err
+ }
+ return c, nil
+}
+
+func (l *LinuxFactory) Type() string {
+ return "libcontainer"
+}
+
+// StartInitialization loads a container by opening the pipe fd from the parent to read the configuration and state
+// This is a low level implementation detail of the reexec and should not be consumed externally
+func (l *LinuxFactory) StartInitialization() (err error) {
+ var (
+ pipefd, rootfd int
+ consoleSocket *os.File
+ envInitPipe = os.Getenv("_LIBCONTAINER_INITPIPE")
+ envStateDir = os.Getenv("_LIBCONTAINER_STATEDIR")
+ envConsole = os.Getenv("_LIBCONTAINER_CONSOLE")
+ )
+
+ // Get the INITPIPE.
+ pipefd, err = strconv.Atoi(envInitPipe)
+ if err != nil {
+ return fmt.Errorf("unable to convert _LIBCONTAINER_INITPIPE=%s to int: %s", envInitPipe, err)
+ }
+
+ var (
+ pipe = os.NewFile(uintptr(pipefd), "pipe")
+ it = initType(os.Getenv("_LIBCONTAINER_INITTYPE"))
+ )
+ defer pipe.Close()
+
+ // Only init processes have STATEDIR.
+ rootfd = -1
+ if it == initStandard {
+ if rootfd, err = strconv.Atoi(envStateDir); err != nil {
+ return fmt.Errorf("unable to convert _LIBCONTAINER_STATEDIR=%s to int: %s", envStateDir, err)
+ }
+ }
+
+ if envConsole != "" {
+ console, err := strconv.Atoi(envConsole)
+ if err != nil {
+ return fmt.Errorf("unable to convert _LIBCONTAINER_CONSOLE=%s to int: %s", envConsole, err)
+ }
+ consoleSocket = os.NewFile(uintptr(console), "console-socket")
+ defer consoleSocket.Close()
+ }
+
+ // clear the current process's environment to clean any libcontainer
+ // specific env vars.
+ os.Clearenv()
+
+ defer func() {
+ // We have an error during the initialization of the container's init,
+ // send it back to the parent process in the form of an initError.
+ if werr := utils.WriteJSON(pipe, syncT{procError}); werr != nil {
+ fmt.Fprintln(os.Stderr, err)
+ return
+ }
+ if werr := utils.WriteJSON(pipe, newSystemError(err)); werr != nil {
+ fmt.Fprintln(os.Stderr, err)
+ return
+ }
+ }()
+ defer func() {
+ if e := recover(); e != nil {
+ err = fmt.Errorf("panic from initialization: %v, %v", e, string(debug.Stack()))
+ }
+ }()
+
+ i, err := newContainerInit(it, pipe, consoleSocket, rootfd)
+ if err != nil {
+ return err
+ }
+
+ // If Init succeeds, syscall.Exec will not return, hence none of the defers will be called.
+ return i.Init()
+}
+
+func (l *LinuxFactory) loadState(root, id string) (*State, error) {
+ f, err := os.Open(filepath.Join(root, stateFilename))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, newGenericError(fmt.Errorf("container %q does not exist", id), ContainerNotExists)
+ }
+ return nil, newGenericError(err, SystemError)
+ }
+ defer f.Close()
+ var state *State
+ if err := json.NewDecoder(f).Decode(&state); err != nil {
+ return nil, newGenericError(err, SystemError)
+ }
+ return state, nil
+}
+
+func (l *LinuxFactory) validateID(id string) error {
+ if !idRegex.MatchString(id) {
+ return newGenericError(fmt.Errorf("invalid id format: %v", id), InvalidIdFormat)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/generic_error.go b/vendor/github.com/opencontainers/runc/libcontainer/generic_error.go
new file mode 100644
index 000000000..6e7de2fe7
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/generic_error.go
@@ -0,0 +1,92 @@
+package libcontainer
+
+import (
+ "fmt"
+ "io"
+ "text/template"
+ "time"
+
+ "github.com/opencontainers/runc/libcontainer/stacktrace"
+)
+
+var errorTemplate = template.Must(template.New("error").Parse(`Timestamp: {{.Timestamp}}
+Code: {{.ECode}}
+{{if .Message }}
+Message: {{.Message}}
+{{end}}
+Frames:{{range $i, $frame := .Stack.Frames}}
+---
+{{$i}}: {{$frame.Function}}
+Package: {{$frame.Package}}
+File: {{$frame.File}}@{{$frame.Line}}{{end}}
+`))
+
+func newGenericError(err error, c ErrorCode) Error {
+ if le, ok := err.(Error); ok {
+ return le
+ }
+ gerr := &genericError{
+ Timestamp: time.Now(),
+ Err: err,
+ ECode: c,
+ Stack: stacktrace.Capture(1),
+ }
+ if err != nil {
+ gerr.Message = err.Error()
+ }
+ return gerr
+}
+
+func newSystemError(err error) Error {
+ return createSystemError(err, "")
+}
+
+func newSystemErrorWithCausef(err error, cause string, v ...interface{}) Error {
+ return createSystemError(err, fmt.Sprintf(cause, v...))
+}
+
+func newSystemErrorWithCause(err error, cause string) Error {
+ return createSystemError(err, cause)
+}
+
+// createSystemError creates the specified error with the correct number of
+// stack frames skipped. This is only to be called by the other functions for
+// formatting the error.
+func createSystemError(err error, cause string) Error {
+ gerr := &genericError{
+ Timestamp: time.Now(),
+ Err: err,
+ ECode: SystemError,
+ Cause: cause,
+ Stack: stacktrace.Capture(2),
+ }
+ if err != nil {
+ gerr.Message = err.Error()
+ }
+ return gerr
+}
+
+type genericError struct {
+ Timestamp time.Time
+ ECode ErrorCode
+ Err error `json:"-"`
+ Cause string
+ Message string
+ Stack stacktrace.Stacktrace
+}
+
+func (e *genericError) Error() string {
+ if e.Cause == "" {
+ return e.Message
+ }
+ frame := e.Stack.Frames[0]
+ return fmt.Sprintf("%s:%d: %s caused %q", frame.File, frame.Line, e.Cause, e.Message)
+}
+
+func (e *genericError) Code() ErrorCode {
+ return e.ECode
+}
+
+func (e *genericError) Detail(w io.Writer) error {
+ return errorTemplate.Execute(w, e)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go
new file mode 100644
index 000000000..63afd28eb
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go
@@ -0,0 +1,502 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "strings"
+ "syscall" // only for Errno
+ "unsafe"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/system"
+ "github.com/opencontainers/runc/libcontainer/user"
+ "github.com/opencontainers/runc/libcontainer/utils"
+
+ "github.com/sirupsen/logrus"
+ "github.com/vishvananda/netlink"
+ "golang.org/x/sys/unix"
+)
+
+type initType string
+
+const (
+ initSetns initType = "setns"
+ initStandard initType = "standard"
+)
+
+type pid struct {
+ Pid int `json:"pid"`
+}
+
+// network is an internal struct used to setup container networks.
+type network struct {
+ configs.Network
+
+ // TempVethPeerName is a unique temporary veth peer name that was placed into
+ // the container's namespace.
+ TempVethPeerName string `json:"temp_veth_peer_name"`
+}
+
+// initConfig is used for transferring parameters from Exec() to Init()
+type initConfig struct {
+ Args []string `json:"args"`
+ Env []string `json:"env"`
+ Cwd string `json:"cwd"`
+ Capabilities *configs.Capabilities `json:"capabilities"`
+ ProcessLabel string `json:"process_label"`
+ AppArmorProfile string `json:"apparmor_profile"`
+ NoNewPrivileges bool `json:"no_new_privileges"`
+ User string `json:"user"`
+ AdditionalGroups []string `json:"additional_groups"`
+ Config *configs.Config `json:"config"`
+ Networks []*network `json:"network"`
+ PassedFilesCount int `json:"passed_files_count"`
+ ContainerId string `json:"containerid"`
+ Rlimits []configs.Rlimit `json:"rlimits"`
+ CreateConsole bool `json:"create_console"`
+ Rootless bool `json:"rootless"`
+}
+
+type initer interface {
+ Init() error
+}
+
+func newContainerInit(t initType, pipe *os.File, consoleSocket *os.File, stateDirFD int) (initer, error) {
+ var config *initConfig
+ if err := json.NewDecoder(pipe).Decode(&config); err != nil {
+ return nil, err
+ }
+ if err := populateProcessEnvironment(config.Env); err != nil {
+ return nil, err
+ }
+ switch t {
+ case initSetns:
+ return &linuxSetnsInit{
+ pipe: pipe,
+ consoleSocket: consoleSocket,
+ config: config,
+ }, nil
+ case initStandard:
+ return &linuxStandardInit{
+ pipe: pipe,
+ consoleSocket: consoleSocket,
+ parentPid: unix.Getppid(),
+ config: config,
+ stateDirFD: stateDirFD,
+ }, nil
+ }
+ return nil, fmt.Errorf("unknown init type %q", t)
+}
+
+// populateProcessEnvironment loads the provided environment variables into the
+// current processes's environment.
+func populateProcessEnvironment(env []string) error {
+ for _, pair := range env {
+ p := strings.SplitN(pair, "=", 2)
+ if len(p) < 2 {
+ return fmt.Errorf("invalid environment '%v'", pair)
+ }
+ if err := os.Setenv(p[0], p[1]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// finalizeNamespace drops the caps, sets the correct user
+// and working dir, and closes any leaked file descriptors
+// before executing the command inside the namespace
+func finalizeNamespace(config *initConfig) error {
+ // Ensure that all unwanted fds we may have accidentally
+ // inherited are marked close-on-exec so they stay out of the
+ // container
+ if err := utils.CloseExecFrom(config.PassedFilesCount + 3); err != nil {
+ return err
+ }
+
+ capabilities := &configs.Capabilities{}
+ if config.Capabilities != nil {
+ capabilities = config.Capabilities
+ } else if config.Config.Capabilities != nil {
+ capabilities = config.Config.Capabilities
+ }
+ w, err := newContainerCapList(capabilities)
+ if err != nil {
+ return err
+ }
+ // drop capabilities in bounding set before changing user
+ if err := w.ApplyBoundingSet(); err != nil {
+ return err
+ }
+ // preserve existing capabilities while we change users
+ if err := system.SetKeepCaps(); err != nil {
+ return err
+ }
+ if err := setupUser(config); err != nil {
+ return err
+ }
+ if err := system.ClearKeepCaps(); err != nil {
+ return err
+ }
+ if err := w.ApplyCaps(); err != nil {
+ return err
+ }
+ if config.Cwd != "" {
+ if err := unix.Chdir(config.Cwd); err != nil {
+ return fmt.Errorf("chdir to cwd (%q) set in config.json failed: %v", config.Cwd, err)
+ }
+ }
+ return nil
+}
+
+// setupConsole sets up the console from inside the container, and sends the
+// master pty fd to the config.Pipe (using cmsg). This is done to ensure that
+// consoles are scoped to a container properly (see runc#814 and the many
+// issues related to that). This has to be run *after* we've pivoted to the new
+// rootfs (and the users' configuration is entirely set up).
+func setupConsole(socket *os.File, config *initConfig, mount bool) error {
+ defer socket.Close()
+ // At this point, /dev/ptmx points to something that we would expect. We
+ // used to change the owner of the slave path, but since the /dev/pts mount
+ // can have gid=X set (at the users' option). So touching the owner of the
+ // slave PTY is not necessary, as the kernel will handle that for us. Note
+ // however, that setupUser (specifically fixStdioPermissions) *will* change
+ // the UID owner of the console to be the user the process will run as (so
+ // they can actually control their console).
+ console, err := newConsole()
+ if err != nil {
+ return err
+ }
+ // After we return from here, we don't need the console anymore.
+ defer console.Close()
+
+ linuxConsole, ok := console.(*linuxConsole)
+ if !ok {
+ return fmt.Errorf("failed to cast console to *linuxConsole")
+ }
+ // Mount the console inside our rootfs.
+ if mount {
+ if err := linuxConsole.mount(); err != nil {
+ return err
+ }
+ }
+ // While we can access console.master, using the API is a good idea.
+ if err := utils.SendFd(socket, linuxConsole.File()); err != nil {
+ return err
+ }
+ // Now, dup over all the things.
+ return linuxConsole.dupStdio()
+}
+
+// syncParentReady sends to the given pipe a JSON payload which indicates that
+// the init is ready to Exec the child process. It then waits for the parent to
+// indicate that it is cleared to Exec.
+func syncParentReady(pipe io.ReadWriter) error {
+ // Tell parent.
+ if err := writeSync(pipe, procReady); err != nil {
+ return err
+ }
+
+ // Wait for parent to give the all-clear.
+ if err := readSync(pipe, procRun); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// syncParentHooks sends to the given pipe a JSON payload which indicates that
+// the parent should execute pre-start hooks. It then waits for the parent to
+// indicate that it is cleared to resume.
+func syncParentHooks(pipe io.ReadWriter) error {
+ // Tell parent.
+ if err := writeSync(pipe, procHooks); err != nil {
+ return err
+ }
+
+ // Wait for parent to give the all-clear.
+ if err := readSync(pipe, procResume); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// setupUser changes the groups, gid, and uid for the user inside the container
+func setupUser(config *initConfig) error {
+ // Set up defaults.
+ defaultExecUser := user.ExecUser{
+ Uid: 0,
+ Gid: 0,
+ Home: "/",
+ }
+
+ passwdPath, err := user.GetPasswdPath()
+ if err != nil {
+ return err
+ }
+
+ groupPath, err := user.GetGroupPath()
+ if err != nil {
+ return err
+ }
+
+ execUser, err := user.GetExecUserPath(config.User, &defaultExecUser, passwdPath, groupPath)
+ if err != nil {
+ return err
+ }
+
+ var addGroups []int
+ if len(config.AdditionalGroups) > 0 {
+ addGroups, err = user.GetAdditionalGroupsPath(config.AdditionalGroups, groupPath)
+ if err != nil {
+ return err
+ }
+ }
+
+ if config.Rootless {
+ if execUser.Uid != 0 {
+ return fmt.Errorf("cannot run as a non-root user in a rootless container")
+ }
+
+ if execUser.Gid != 0 {
+ return fmt.Errorf("cannot run as a non-root group in a rootless container")
+ }
+
+ // We cannot set any additional groups in a rootless container and thus we
+ // bail if the user asked us to do so. TODO: We currently can't do this
+ // earlier, but if libcontainer.Process.User was typesafe this might work.
+ if len(addGroups) > 0 {
+ return fmt.Errorf("cannot set any additional groups in a rootless container")
+ }
+ }
+
+ // before we change to the container's user make sure that the processes STDIO
+ // is correctly owned by the user that we are switching to.
+ if err := fixStdioPermissions(config, execUser); err != nil {
+ return err
+ }
+
+ // This isn't allowed in an unprivileged user namespace since Linux 3.19.
+ // There's nothing we can do about /etc/group entries, so we silently
+ // ignore setting groups here (since the user didn't explicitly ask us to
+ // set the group).
+ if !config.Rootless {
+ suppGroups := append(execUser.Sgids, addGroups...)
+ if err := unix.Setgroups(suppGroups); err != nil {
+ return err
+ }
+ }
+
+ if err := system.Setgid(execUser.Gid); err != nil {
+ return err
+ }
+
+ if err := system.Setuid(execUser.Uid); err != nil {
+ return err
+ }
+
+ // if we didn't get HOME already, set it based on the user's HOME
+ if envHome := os.Getenv("HOME"); envHome == "" {
+ if err := os.Setenv("HOME", execUser.Home); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// fixStdioPermissions fixes the permissions of PID 1's STDIO within the container to the specified user.
+// The ownership needs to match because it is created outside of the container and needs to be
+// localized.
+func fixStdioPermissions(config *initConfig, u *user.ExecUser) error {
+ var null unix.Stat_t
+ if err := unix.Stat("/dev/null", &null); err != nil {
+ return err
+ }
+ for _, fd := range []uintptr{
+ os.Stdin.Fd(),
+ os.Stderr.Fd(),
+ os.Stdout.Fd(),
+ } {
+ var s unix.Stat_t
+ if err := unix.Fstat(int(fd), &s); err != nil {
+ return err
+ }
+
+ // Skip chown of /dev/null if it was used as one of the STDIO fds.
+ if s.Rdev == null.Rdev {
+ continue
+ }
+
+ // Skip chown if s.Gid is actually an unmapped gid in the host. While
+ // this is a bit dodgy if it just so happens that the console _is_
+ // owned by overflow_gid, there's no way for us to disambiguate this as
+ // a userspace program.
+ if _, err := config.Config.HostGID(int(s.Gid)); err != nil {
+ continue
+ }
+
+ // We only change the uid owner (as it is possible for the mount to
+ // prefer a different gid, and there's no reason for us to change it).
+ // The reason why we don't just leave the default uid=X mount setup is
+ // that users expect to be able to actually use their console. Without
+ // this code, you couldn't effectively run as a non-root user inside a
+ // container and also have a console set up.
+ if err := unix.Fchown(int(fd), u.Uid, int(s.Gid)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// setupNetwork sets up and initializes any network interface inside the container.
+func setupNetwork(config *initConfig) error {
+ for _, config := range config.Networks {
+ strategy, err := getStrategy(config.Type)
+ if err != nil {
+ return err
+ }
+ if err := strategy.initialize(config); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func setupRoute(config *configs.Config) error {
+ for _, config := range config.Routes {
+ _, dst, err := net.ParseCIDR(config.Destination)
+ if err != nil {
+ return err
+ }
+ src := net.ParseIP(config.Source)
+ if src == nil {
+ return fmt.Errorf("Invalid source for route: %s", config.Source)
+ }
+ gw := net.ParseIP(config.Gateway)
+ if gw == nil {
+ return fmt.Errorf("Invalid gateway for route: %s", config.Gateway)
+ }
+ l, err := netlink.LinkByName(config.InterfaceName)
+ if err != nil {
+ return err
+ }
+ route := &netlink.Route{
+ Scope: netlink.SCOPE_UNIVERSE,
+ Dst: dst,
+ Src: src,
+ Gw: gw,
+ LinkIndex: l.Attrs().Index,
+ }
+ if err := netlink.RouteAdd(route); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func setupRlimits(limits []configs.Rlimit, pid int) error {
+ for _, rlimit := range limits {
+ if err := system.Prlimit(pid, rlimit.Type, unix.Rlimit{Max: rlimit.Hard, Cur: rlimit.Soft}); err != nil {
+ return fmt.Errorf("error setting rlimit type %v: %v", rlimit.Type, err)
+ }
+ }
+ return nil
+}
+
+const _P_PID = 1
+
+type siginfo struct {
+ si_signo int32
+ si_errno int32
+ si_code int32
+ // below here is a union; si_pid is the only field we use
+ si_pid int32
+ // Pad to 128 bytes as detailed in blockUntilWaitable
+ pad [96]byte
+}
+
+// isWaitable returns true if the process has exited false otherwise.
+// Its based off blockUntilWaitable in src/os/wait_waitid.go
+func isWaitable(pid int) (bool, error) {
+ si := &siginfo{}
+ _, _, e := unix.Syscall6(unix.SYS_WAITID, _P_PID, uintptr(pid), uintptr(unsafe.Pointer(si)), unix.WEXITED|unix.WNOWAIT|unix.WNOHANG, 0, 0)
+ if e != 0 {
+ return false, os.NewSyscallError("waitid", e)
+ }
+
+ return si.si_pid != 0, nil
+}
+
+// isNoChildren returns true if err represents a unix.ECHILD (formerly syscall.ECHILD) false otherwise
+func isNoChildren(err error) bool {
+ switch err := err.(type) {
+ case syscall.Errno:
+ if err == unix.ECHILD {
+ return true
+ }
+ case *os.SyscallError:
+ if err.Err == unix.ECHILD {
+ return true
+ }
+ }
+ return false
+}
+
+// signalAllProcesses freezes then iterates over all the processes inside the
+// manager's cgroups sending the signal s to them.
+// If s is SIGKILL then it will wait for each process to exit.
+// For all other signals it will check if the process is ready to report its
+// exit status and only if it is will a wait be performed.
+func signalAllProcesses(m cgroups.Manager, s os.Signal) error {
+ var procs []*os.Process
+ if err := m.Freeze(configs.Frozen); err != nil {
+ logrus.Warn(err)
+ }
+ pids, err := m.GetAllPids()
+ if err != nil {
+ m.Freeze(configs.Thawed)
+ return err
+ }
+ for _, pid := range pids {
+ p, err := os.FindProcess(pid)
+ if err != nil {
+ logrus.Warn(err)
+ continue
+ }
+ procs = append(procs, p)
+ if err := p.Signal(s); err != nil {
+ logrus.Warn(err)
+ }
+ }
+ if err := m.Freeze(configs.Thawed); err != nil {
+ logrus.Warn(err)
+ }
+
+ for _, p := range procs {
+ if s != unix.SIGKILL {
+ if ok, err := isWaitable(p.Pid); err != nil {
+ if !isNoChildren(err) {
+ logrus.Warn("signalAllProcesses: ", p.Pid, err)
+ }
+ continue
+ } else if !ok {
+ // Not ready to report so don't wait
+ continue
+ }
+ }
+
+ if _, err := p.Wait(); err != nil {
+ if !isNoChildren(err) {
+ logrus.Warn("wait: ", err)
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/keys/keyctl.go b/vendor/github.com/opencontainers/runc/libcontainer/keys/keyctl.go
new file mode 100644
index 000000000..82ffa7a88
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/keys/keyctl.go
@@ -0,0 +1,50 @@
+// +build linux
+
+package keys
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "golang.org/x/sys/unix"
+)
+
+type KeySerial uint32
+
+func JoinSessionKeyring(name string) (KeySerial, error) {
+ sessKeyId, err := unix.KeyctlJoinSessionKeyring(name)
+ if err != nil {
+ return 0, fmt.Errorf("could not create session key: %v", err)
+ }
+ return KeySerial(sessKeyId), nil
+}
+
+// ModKeyringPerm modifies permissions on a keyring by reading the current permissions,
+// anding the bits with the given mask (clearing permissions) and setting
+// additional permission bits
+func ModKeyringPerm(ringId KeySerial, mask, setbits uint32) error {
+ dest, err := unix.KeyctlString(unix.KEYCTL_DESCRIBE, int(ringId))
+ if err != nil {
+ return err
+ }
+
+ res := strings.Split(string(dest), ";")
+ if len(res) < 5 {
+ return fmt.Errorf("Destination buffer for key description is too small")
+ }
+
+ // parse permissions
+ perm64, err := strconv.ParseUint(res[3], 16, 32)
+ if err != nil {
+ return err
+ }
+
+ perm := (uint32(perm64) & mask) | setbits
+
+ if err := unix.KeyctlSetperm(int(ringId), perm); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go
new file mode 100644
index 000000000..8829b71ad
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go
@@ -0,0 +1,87 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "github.com/vishvananda/netlink/nl"
+ "golang.org/x/sys/unix"
+)
+
+// list of known message types we want to send to bootstrap program
+// The number is randomly chosen to not conflict with known netlink types
+const (
+ InitMsg uint16 = 62000
+ CloneFlagsAttr uint16 = 27281
+ NsPathsAttr uint16 = 27282
+ UidmapAttr uint16 = 27283
+ GidmapAttr uint16 = 27284
+ SetgroupAttr uint16 = 27285
+ OomScoreAdjAttr uint16 = 27286
+ RootlessAttr uint16 = 27287
+)
+
+type Int32msg struct {
+ Type uint16
+ Value uint32
+}
+
+// Serialize serializes the message.
+// Int32msg has the following representation
+// | nlattr len | nlattr type |
+// | uint32 value |
+func (msg *Int32msg) Serialize() []byte {
+ buf := make([]byte, msg.Len())
+ native := nl.NativeEndian()
+ native.PutUint16(buf[0:2], uint16(msg.Len()))
+ native.PutUint16(buf[2:4], msg.Type)
+ native.PutUint32(buf[4:8], msg.Value)
+ return buf
+}
+
+func (msg *Int32msg) Len() int {
+ return unix.NLA_HDRLEN + 4
+}
+
+// Bytemsg has the following representation
+// | nlattr len | nlattr type |
+// | value | pad |
+type Bytemsg struct {
+ Type uint16
+ Value []byte
+}
+
+func (msg *Bytemsg) Serialize() []byte {
+ l := msg.Len()
+ buf := make([]byte, (l+unix.NLA_ALIGNTO-1) & ^(unix.NLA_ALIGNTO-1))
+ native := nl.NativeEndian()
+ native.PutUint16(buf[0:2], uint16(l))
+ native.PutUint16(buf[2:4], msg.Type)
+ copy(buf[4:], msg.Value)
+ return buf
+}
+
+func (msg *Bytemsg) Len() int {
+ return unix.NLA_HDRLEN + len(msg.Value) + 1 // null-terminated
+}
+
+type Boolmsg struct {
+ Type uint16
+ Value bool
+}
+
+func (msg *Boolmsg) Serialize() []byte {
+ buf := make([]byte, msg.Len())
+ native := nl.NativeEndian()
+ native.PutUint16(buf[0:2], uint16(msg.Len()))
+ native.PutUint16(buf[2:4], msg.Type)
+ if msg.Value {
+ buf[4] = 1
+ } else {
+ buf[4] = 0
+ }
+ return buf
+}
+
+func (msg *Boolmsg) Len() int {
+ return unix.NLA_HDRLEN + 1
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/network_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/network_linux.go
new file mode 100644
index 000000000..5075bee4d
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/network_linux.go
@@ -0,0 +1,259 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/utils"
+ "github.com/vishvananda/netlink"
+)
+
+var strategies = map[string]networkStrategy{
+ "veth": &veth{},
+ "loopback": &loopback{},
+}
+
+// networkStrategy represents a specific network configuration for
+// a container's networking stack
+type networkStrategy interface {
+ create(*network, int) error
+ initialize(*network) error
+ detach(*configs.Network) error
+ attach(*configs.Network) error
+}
+
+// getStrategy returns the specific network strategy for the
+// provided type.
+func getStrategy(tpe string) (networkStrategy, error) {
+ s, exists := strategies[tpe]
+ if !exists {
+ return nil, fmt.Errorf("unknown strategy type %q", tpe)
+ }
+ return s, nil
+}
+
+// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo.
+func getNetworkInterfaceStats(interfaceName string) (*NetworkInterface, error) {
+ out := &NetworkInterface{Name: interfaceName}
+ // This can happen if the network runtime information is missing - possible if the
+ // container was created by an old version of libcontainer.
+ if interfaceName == "" {
+ return out, nil
+ }
+ type netStatsPair struct {
+ // Where to write the output.
+ Out *uint64
+ // The network stats file to read.
+ File string
+ }
+ // Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container.
+ netStats := []netStatsPair{
+ {Out: &out.RxBytes, File: "tx_bytes"},
+ {Out: &out.RxPackets, File: "tx_packets"},
+ {Out: &out.RxErrors, File: "tx_errors"},
+ {Out: &out.RxDropped, File: "tx_dropped"},
+
+ {Out: &out.TxBytes, File: "rx_bytes"},
+ {Out: &out.TxPackets, File: "rx_packets"},
+ {Out: &out.TxErrors, File: "rx_errors"},
+ {Out: &out.TxDropped, File: "rx_dropped"},
+ }
+ for _, netStat := range netStats {
+ data, err := readSysfsNetworkStats(interfaceName, netStat.File)
+ if err != nil {
+ return nil, err
+ }
+ *(netStat.Out) = data
+ }
+ return out, nil
+}
+
+// Reads the specified statistics available under /sys/class/net/<EthInterface>/statistics
+func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) {
+ data, err := ioutil.ReadFile(filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile))
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
+
+// loopback is a network strategy that provides a basic loopback device
+type loopback struct {
+}
+
+func (l *loopback) create(n *network, nspid int) error {
+ return nil
+}
+
+func (l *loopback) initialize(config *network) error {
+ return netlink.LinkSetUp(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: "lo"}})
+}
+
+func (l *loopback) attach(n *configs.Network) (err error) {
+ return nil
+}
+
+func (l *loopback) detach(n *configs.Network) (err error) {
+ return nil
+}
+
+// veth is a network strategy that uses a bridge and creates
+// a veth pair, one that is attached to the bridge on the host and the other
+// is placed inside the container's namespace
+type veth struct {
+}
+
+func (v *veth) detach(n *configs.Network) (err error) {
+ return netlink.LinkSetMaster(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: n.HostInterfaceName}}, nil)
+}
+
+// attach a container network interface to an external network
+func (v *veth) attach(n *configs.Network) (err error) {
+ brl, err := netlink.LinkByName(n.Bridge)
+ if err != nil {
+ return err
+ }
+ br, ok := brl.(*netlink.Bridge)
+ if !ok {
+ return fmt.Errorf("Wrong device type %T", brl)
+ }
+ host, err := netlink.LinkByName(n.HostInterfaceName)
+ if err != nil {
+ return err
+ }
+
+ if err := netlink.LinkSetMaster(host, br); err != nil {
+ return err
+ }
+ if err := netlink.LinkSetMTU(host, n.Mtu); err != nil {
+ return err
+ }
+ if n.HairpinMode {
+ if err := netlink.LinkSetHairpin(host, true); err != nil {
+ return err
+ }
+ }
+ if err := netlink.LinkSetUp(host); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (v *veth) create(n *network, nspid int) (err error) {
+ tmpName, err := v.generateTempPeerName()
+ if err != nil {
+ return err
+ }
+ n.TempVethPeerName = tmpName
+ if n.Bridge == "" {
+ return fmt.Errorf("bridge is not specified")
+ }
+ veth := &netlink.Veth{
+ LinkAttrs: netlink.LinkAttrs{
+ Name: n.HostInterfaceName,
+ TxQLen: n.TxQueueLen,
+ },
+ PeerName: n.TempVethPeerName,
+ }
+ if err := netlink.LinkAdd(veth); err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ netlink.LinkDel(veth)
+ }
+ }()
+ if err := v.attach(&n.Network); err != nil {
+ return err
+ }
+ child, err := netlink.LinkByName(n.TempVethPeerName)
+ if err != nil {
+ return err
+ }
+ return netlink.LinkSetNsPid(child, nspid)
+}
+
+func (v *veth) generateTempPeerName() (string, error) {
+ return utils.GenerateRandomName("veth", 7)
+}
+
+func (v *veth) initialize(config *network) error {
+ peer := config.TempVethPeerName
+ if peer == "" {
+ return fmt.Errorf("peer is not specified")
+ }
+ child, err := netlink.LinkByName(peer)
+ if err != nil {
+ return err
+ }
+ if err := netlink.LinkSetDown(child); err != nil {
+ return err
+ }
+ if err := netlink.LinkSetName(child, config.Name); err != nil {
+ return err
+ }
+ // get the interface again after we changed the name as the index also changes.
+ if child, err = netlink.LinkByName(config.Name); err != nil {
+ return err
+ }
+ if config.MacAddress != "" {
+ mac, err := net.ParseMAC(config.MacAddress)
+ if err != nil {
+ return err
+ }
+ if err := netlink.LinkSetHardwareAddr(child, mac); err != nil {
+ return err
+ }
+ }
+ ip, err := netlink.ParseAddr(config.Address)
+ if err != nil {
+ return err
+ }
+ if err := netlink.AddrAdd(child, ip); err != nil {
+ return err
+ }
+ if config.IPv6Address != "" {
+ ip6, err := netlink.ParseAddr(config.IPv6Address)
+ if err != nil {
+ return err
+ }
+ if err := netlink.AddrAdd(child, ip6); err != nil {
+ return err
+ }
+ }
+ if err := netlink.LinkSetMTU(child, config.Mtu); err != nil {
+ return err
+ }
+ if err := netlink.LinkSetUp(child); err != nil {
+ return err
+ }
+ if config.Gateway != "" {
+ gw := net.ParseIP(config.Gateway)
+ if err := netlink.RouteAdd(&netlink.Route{
+ Scope: netlink.SCOPE_UNIVERSE,
+ LinkIndex: child.Attrs().Index,
+ Gw: gw,
+ }); err != nil {
+ return err
+ }
+ }
+ if config.IPv6Gateway != "" {
+ gw := net.ParseIP(config.IPv6Gateway)
+ if err := netlink.RouteAdd(&netlink.Route{
+ Scope: netlink.SCOPE_UNIVERSE,
+ LinkIndex: child.Attrs().Index,
+ Gw: gw,
+ }); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/notify_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/notify_linux.go
new file mode 100644
index 000000000..81587b9b1
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/notify_linux.go
@@ -0,0 +1,90 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "golang.org/x/sys/unix"
+)
+
+const oomCgroupName = "memory"
+
+type PressureLevel uint
+
+const (
+ LowPressure PressureLevel = iota
+ MediumPressure
+ CriticalPressure
+)
+
+func registerMemoryEvent(cgDir string, evName string, arg string) (<-chan struct{}, error) {
+ evFile, err := os.Open(filepath.Join(cgDir, evName))
+ if err != nil {
+ return nil, err
+ }
+ fd, err := unix.Eventfd(0, unix.EFD_CLOEXEC)
+ if err != nil {
+ evFile.Close()
+ return nil, err
+ }
+
+ eventfd := os.NewFile(uintptr(fd), "eventfd")
+
+ eventControlPath := filepath.Join(cgDir, "cgroup.event_control")
+ data := fmt.Sprintf("%d %d %s", eventfd.Fd(), evFile.Fd(), arg)
+ if err := ioutil.WriteFile(eventControlPath, []byte(data), 0700); err != nil {
+ eventfd.Close()
+ evFile.Close()
+ return nil, err
+ }
+ ch := make(chan struct{})
+ go func() {
+ defer func() {
+ close(ch)
+ eventfd.Close()
+ evFile.Close()
+ }()
+ buf := make([]byte, 8)
+ for {
+ if _, err := eventfd.Read(buf); err != nil {
+ return
+ }
+ // When a cgroup is destroyed, an event is sent to eventfd.
+ // So if the control path is gone, return instead of notifying.
+ if _, err := os.Lstat(eventControlPath); os.IsNotExist(err) {
+ return
+ }
+ ch <- struct{}{}
+ }
+ }()
+ return ch, nil
+}
+
+// notifyOnOOM returns channel on which you can expect event about OOM,
+// if process died without OOM this channel will be closed.
+func notifyOnOOM(paths map[string]string) (<-chan struct{}, error) {
+ dir := paths[oomCgroupName]
+ if dir == "" {
+ return nil, fmt.Errorf("path %q missing", oomCgroupName)
+ }
+
+ return registerMemoryEvent(dir, "memory.oom_control", "")
+}
+
+func notifyMemoryPressure(paths map[string]string, level PressureLevel) (<-chan struct{}, error) {
+ dir := paths[oomCgroupName]
+ if dir == "" {
+ return nil, fmt.Errorf("path %q missing", oomCgroupName)
+ }
+
+ if level > CriticalPressure {
+ return nil, fmt.Errorf("invalid pressure level %d", level)
+ }
+
+ levelStr := []string{"low", "medium", "critical"}[level]
+ return registerMemoryEvent(dir, "memory.pressure_level", levelStr)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md
new file mode 100644
index 000000000..575701371
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md
@@ -0,0 +1,44 @@
+## nsenter
+
+The `nsenter` package registers a special init constructor that is called before
+the Go runtime has a chance to boot. This provides us the ability to `setns` on
+existing namespaces and avoid the issues that the Go runtime has with multiple
+threads. This constructor will be called if this package is registered,
+imported, in your go application.
+
+The `nsenter` package will `import "C"` and it uses [cgo](https://golang.org/cmd/cgo/)
+package. In cgo, if the import of "C" is immediately preceded by a comment, that comment,
+called the preamble, is used as a header when compiling the C parts of the package.
+So every time we import package `nsenter`, the C code function `nsexec()` would be
+called. And package `nsenter` is now only imported in `main_unix.go`, so every time
+before we call `cmd.Start` on linux, that C code would run.
+
+Because `nsexec()` must be run before the Go runtime in order to use the
+Linux kernel namespace, you must `import` this library into a package if
+you plan to use `libcontainer` directly. Otherwise Go will not execute
+the `nsexec()` constructor, which means that the re-exec will not cause
+the namespaces to be joined. You can import it like this:
+
+```go
+import _ "github.com/opencontainers/runc/libcontainer/nsenter"
+```
+
+`nsexec()` will first get the file descriptor number for the init pipe
+from the environment variable `_LIBCONTAINER_INITPIPE` (which was opened
+by the parent and kept open across the fork-exec of the `nsexec()` init
+process). The init pipe is used to read bootstrap data (namespace paths,
+clone flags, uid and gid mappings, and the console path) from the parent
+process. `nsexec()` will then call `setns(2)` to join the namespaces
+provided in the bootstrap data (if available), `clone(2)` a child process
+with the provided clone flags, update the user and group ID mappings, do
+some further miscellaneous setup steps, and then send the PID of the
+child process to the parent of the `nsexec()` "caller". Finally,
+the parent `nsexec()` will exit and the child `nsexec()` process will
+return to allow the Go runtime take over.
+
+NOTE: We do both `setns(2)` and `clone(2)` even if we don't have any
+CLONE_NEW* clone flags because we must fork a new process in order to
+enter the PID namespace.
+
+
+
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/namespace.h b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/namespace.h
new file mode 100644
index 000000000..9e9bdca05
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/namespace.h
@@ -0,0 +1,32 @@
+#ifndef NSENTER_NAMESPACE_H
+#define NSENTER_NAMESPACE_H
+
+#ifndef _GNU_SOURCE
+# define _GNU_SOURCE
+#endif
+#include <sched.h>
+
+/* All of these are taken from include/uapi/linux/sched.h */
+#ifndef CLONE_NEWNS
+# define CLONE_NEWNS 0x00020000 /* New mount namespace group */
+#endif
+#ifndef CLONE_NEWCGROUP
+# define CLONE_NEWCGROUP 0x02000000 /* New cgroup namespace */
+#endif
+#ifndef CLONE_NEWUTS
+# define CLONE_NEWUTS 0x04000000 /* New utsname namespace */
+#endif
+#ifndef CLONE_NEWIPC
+# define CLONE_NEWIPC 0x08000000 /* New ipc namespace */
+#endif
+#ifndef CLONE_NEWUSER
+# define CLONE_NEWUSER 0x10000000 /* New user namespace */
+#endif
+#ifndef CLONE_NEWPID
+# define CLONE_NEWPID 0x20000000 /* New pid namespace */
+#endif
+#ifndef CLONE_NEWNET
+# define CLONE_NEWNET 0x40000000 /* New network namespace */
+#endif
+
+#endif /* NSENTER_NAMESPACE_H */
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go
new file mode 100644
index 000000000..07f4d63e4
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go
@@ -0,0 +1,12 @@
+// +build linux,!gccgo
+
+package nsenter
+
+/*
+#cgo CFLAGS: -Wall
+extern void nsexec();
+void __attribute__((constructor)) init(void) {
+ nsexec();
+}
+*/
+import "C"
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go
new file mode 100644
index 000000000..63c7a3ec2
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go
@@ -0,0 +1,25 @@
+// +build linux,gccgo
+
+package nsenter
+
+/*
+#cgo CFLAGS: -Wall
+extern void nsexec();
+void __attribute__((constructor)) init(void) {
+ nsexec();
+}
+*/
+import "C"
+
+// AlwaysFalse is here to stay false
+// (and be exported so the compiler doesn't optimize out its reference)
+var AlwaysFalse bool
+
+func init() {
+ if AlwaysFalse {
+ // by referencing this C init() in a noop test, it will ensure the compiler
+ // links in the C function.
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134
+ C.init()
+ }
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go
new file mode 100644
index 000000000..ac701ca39
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go
@@ -0,0 +1,5 @@
+// +build !linux !cgo
+
+package nsenter
+
+import "C"
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
new file mode 100644
index 000000000..197e6d08e
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
@@ -0,0 +1,862 @@
+#define _GNU_SOURCE
+#include <endian.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <grp.h>
+#include <sched.h>
+#include <setjmp.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/ioctl.h>
+#include <sys/prctl.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include <linux/limits.h>
+#include <linux/netlink.h>
+#include <linux/types.h>
+
+/* Get all of the CLONE_NEW* flags. */
+#include "namespace.h"
+
+/* Synchronisation values. */
+enum sync_t {
+ SYNC_USERMAP_PLS = 0x40, /* Request parent to map our users. */
+ SYNC_USERMAP_ACK = 0x41, /* Mapping finished by the parent. */
+ SYNC_RECVPID_PLS = 0x42, /* Tell parent we're sending the PID. */
+ SYNC_RECVPID_ACK = 0x43, /* PID was correctly received by parent. */
+ SYNC_GRANDCHILD = 0x44, /* The grandchild is ready to run. */
+ SYNC_CHILD_READY = 0x45, /* The child or grandchild is ready to return. */
+
+ /* XXX: This doesn't help with segfaults and other such issues. */
+ SYNC_ERR = 0xFF, /* Fatal error, no turning back. The error code follows. */
+};
+
+/* longjmp() arguments. */
+#define JUMP_PARENT 0x00
+#define JUMP_CHILD 0xA0
+#define JUMP_INIT 0xA1
+
+/* JSON buffer. */
+#define JSON_MAX 4096
+
+/* Assume the stack grows down, so arguments should be above it. */
+struct clone_t {
+ /*
+ * Reserve some space for clone() to locate arguments
+ * and retcode in this place
+ */
+ char stack[4096] __attribute__ ((aligned(16)));
+ char stack_ptr[0];
+
+ /* There's two children. This is used to execute the different code. */
+ jmp_buf *env;
+ int jmpval;
+};
+
+struct nlconfig_t {
+ char *data;
+ uint32_t cloneflags;
+ char *uidmap;
+ size_t uidmap_len;
+ char *gidmap;
+ size_t gidmap_len;
+ char *namespaces;
+ size_t namespaces_len;
+ uint8_t is_setgroup;
+ uint8_t is_rootless;
+ char *oom_score_adj;
+ size_t oom_score_adj_len;
+};
+
+/*
+ * List of netlink message types sent to us as part of bootstrapping the init.
+ * These constants are defined in libcontainer/message_linux.go.
+ */
+#define INIT_MSG 62000
+#define CLONE_FLAGS_ATTR 27281
+#define NS_PATHS_ATTR 27282
+#define UIDMAP_ATTR 27283
+#define GIDMAP_ATTR 27284
+#define SETGROUP_ATTR 27285
+#define OOM_SCORE_ADJ_ATTR 27286
+#define ROOTLESS_ATTR 27287
+
+/*
+ * Use the raw syscall for versions of glibc which don't include a function for
+ * it, namely (glibc 2.12).
+ */
+#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 14
+# define _GNU_SOURCE
+# include "syscall.h"
+# if !defined(SYS_setns) && defined(__NR_setns)
+# define SYS_setns __NR_setns
+# endif
+
+#ifndef SYS_setns
+# error "setns(2) syscall not supported by glibc version"
+#endif
+
+int setns(int fd, int nstype)
+{
+ return syscall(SYS_setns, fd, nstype);
+}
+#endif
+
+/* XXX: This is ugly. */
+static int syncfd = -1;
+
+/* TODO(cyphar): Fix this so it correctly deals with syncT. */
+#define bail(fmt, ...) \
+ do { \
+ int ret = __COUNTER__ + 1; \
+ fprintf(stderr, "nsenter: " fmt ": %m\n", ##__VA_ARGS__); \
+ if (syncfd >= 0) { \
+ enum sync_t s = SYNC_ERR; \
+ if (write(syncfd, &s, sizeof(s)) != sizeof(s)) \
+ fprintf(stderr, "nsenter: failed: write(s)"); \
+ if (write(syncfd, &ret, sizeof(ret)) != sizeof(ret)) \
+ fprintf(stderr, "nsenter: failed: write(ret)"); \
+ } \
+ exit(ret); \
+ } while(0)
+
+static int write_file(char *data, size_t data_len, char *pathfmt, ...)
+{
+ int fd, len, ret = 0;
+ char path[PATH_MAX];
+
+ va_list ap;
+ va_start(ap, pathfmt);
+ len = vsnprintf(path, PATH_MAX, pathfmt, ap);
+ va_end(ap);
+ if (len < 0)
+ return -1;
+
+ fd = open(path, O_RDWR);
+ if (fd < 0) {
+ return -1;
+ }
+
+ len = write(fd, data, data_len);
+ if (len != data_len) {
+ ret = -1;
+ goto out;
+ }
+
+out:
+ close(fd);
+ return ret;
+}
+
+enum policy_t {
+ SETGROUPS_DEFAULT = 0,
+ SETGROUPS_ALLOW,
+ SETGROUPS_DENY,
+};
+
+/* This *must* be called before we touch gid_map. */
+static void update_setgroups(int pid, enum policy_t setgroup)
+{
+ char *policy;
+
+ switch (setgroup) {
+ case SETGROUPS_ALLOW:
+ policy = "allow";
+ break;
+ case SETGROUPS_DENY:
+ policy = "deny";
+ break;
+ case SETGROUPS_DEFAULT:
+ default:
+ /* Nothing to do. */
+ return;
+ }
+
+ if (write_file(policy, strlen(policy), "/proc/%d/setgroups", pid) < 0) {
+ /*
+ * If the kernel is too old to support /proc/pid/setgroups,
+ * open(2) or write(2) will return ENOENT. This is fine.
+ */
+ if (errno != ENOENT)
+ bail("failed to write '%s' to /proc/%d/setgroups", policy, pid);
+ }
+}
+
+static void update_uidmap(int pid, char *map, size_t map_len)
+{
+ if (map == NULL || map_len <= 0)
+ return;
+
+ if (write_file(map, map_len, "/proc/%d/uid_map", pid) < 0)
+ bail("failed to update /proc/%d/uid_map", pid);
+}
+
+static void update_gidmap(int pid, char *map, size_t map_len)
+{
+ if (map == NULL || map_len <= 0)
+ return;
+
+ if (write_file(map, map_len, "/proc/%d/gid_map", pid) < 0)
+ bail("failed to update /proc/%d/gid_map", pid);
+}
+
+static void update_oom_score_adj(char *data, size_t len)
+{
+ if (data == NULL || len <= 0)
+ return;
+
+ if (write_file(data, len, "/proc/self/oom_score_adj") < 0)
+ bail("failed to update /proc/self/oom_score_adj");
+}
+
+/* A dummy function that just jumps to the given jumpval. */
+static int child_func(void *arg) __attribute__ ((noinline));
+static int child_func(void *arg)
+{
+ struct clone_t *ca = (struct clone_t *)arg;
+ longjmp(*ca->env, ca->jmpval);
+}
+
+static int clone_parent(jmp_buf *env, int jmpval) __attribute__ ((noinline));
+static int clone_parent(jmp_buf *env, int jmpval)
+{
+ struct clone_t ca = {
+ .env = env,
+ .jmpval = jmpval,
+ };
+
+ return clone(child_func, ca.stack_ptr, CLONE_PARENT | SIGCHLD, &ca);
+}
+
+/*
+ * Gets the init pipe fd from the environment, which is used to read the
+ * bootstrap data and tell the parent what the new pid is after we finish
+ * setting up the environment.
+ */
+static int initpipe(void)
+{
+ int pipenum;
+ char *initpipe, *endptr;
+
+ initpipe = getenv("_LIBCONTAINER_INITPIPE");
+ if (initpipe == NULL || *initpipe == '\0')
+ return -1;
+
+ pipenum = strtol(initpipe, &endptr, 10);
+ if (*endptr != '\0')
+ bail("unable to parse _LIBCONTAINER_INITPIPE");
+
+ return pipenum;
+}
+
+/* Returns the clone(2) flag for a namespace, given the name of a namespace. */
+static int nsflag(char *name)
+{
+ if (!strcmp(name, "cgroup"))
+ return CLONE_NEWCGROUP;
+ else if (!strcmp(name, "ipc"))
+ return CLONE_NEWIPC;
+ else if (!strcmp(name, "mnt"))
+ return CLONE_NEWNS;
+ else if (!strcmp(name, "net"))
+ return CLONE_NEWNET;
+ else if (!strcmp(name, "pid"))
+ return CLONE_NEWPID;
+ else if (!strcmp(name, "user"))
+ return CLONE_NEWUSER;
+ else if (!strcmp(name, "uts"))
+ return CLONE_NEWUTS;
+
+ /* If we don't recognise a name, fallback to 0. */
+ return 0;
+}
+
+static uint32_t readint32(char *buf)
+{
+ return *(uint32_t *) buf;
+}
+
+static uint8_t readint8(char *buf)
+{
+ return *(uint8_t *) buf;
+}
+
+static void nl_parse(int fd, struct nlconfig_t *config)
+{
+ size_t len, size;
+ struct nlmsghdr hdr;
+ char *data, *current;
+
+ /* Retrieve the netlink header. */
+ len = read(fd, &hdr, NLMSG_HDRLEN);
+ if (len != NLMSG_HDRLEN)
+ bail("invalid netlink header length %zu", len);
+
+ if (hdr.nlmsg_type == NLMSG_ERROR)
+ bail("failed to read netlink message");
+
+ if (hdr.nlmsg_type != INIT_MSG)
+ bail("unexpected msg type %d", hdr.nlmsg_type);
+
+ /* Retrieve data. */
+ size = NLMSG_PAYLOAD(&hdr, 0);
+ current = data = malloc(size);
+ if (!data)
+ bail("failed to allocate %zu bytes of memory for nl_payload", size);
+
+ len = read(fd, data, size);
+ if (len != size)
+ bail("failed to read netlink payload, %zu != %zu", len, size);
+
+ /* Parse the netlink payload. */
+ config->data = data;
+ while (current < data + size) {
+ struct nlattr *nlattr = (struct nlattr *)current;
+ size_t payload_len = nlattr->nla_len - NLA_HDRLEN;
+
+ /* Advance to payload. */
+ current += NLA_HDRLEN;
+
+ /* Handle payload. */
+ switch (nlattr->nla_type) {
+ case CLONE_FLAGS_ATTR:
+ config->cloneflags = readint32(current);
+ break;
+ case ROOTLESS_ATTR:
+ config->is_rootless = readint8(current);
+ break;
+ case OOM_SCORE_ADJ_ATTR:
+ config->oom_score_adj = current;
+ config->oom_score_adj_len = payload_len;
+ break;
+ case NS_PATHS_ATTR:
+ config->namespaces = current;
+ config->namespaces_len = payload_len;
+ break;
+ case UIDMAP_ATTR:
+ config->uidmap = current;
+ config->uidmap_len = payload_len;
+ break;
+ case GIDMAP_ATTR:
+ config->gidmap = current;
+ config->gidmap_len = payload_len;
+ break;
+ case SETGROUP_ATTR:
+ config->is_setgroup = readint8(current);
+ break;
+ default:
+ bail("unknown netlink message type %d", nlattr->nla_type);
+ }
+
+ current += NLA_ALIGN(payload_len);
+ }
+}
+
+void nl_free(struct nlconfig_t *config)
+{
+ free(config->data);
+}
+
+void join_namespaces(char *nslist)
+{
+ int num = 0, i;
+ char *saveptr = NULL;
+ char *namespace = strtok_r(nslist, ",", &saveptr);
+ struct namespace_t {
+ int fd;
+ int ns;
+ char type[PATH_MAX];
+ char path[PATH_MAX];
+ } *namespaces = NULL;
+
+ if (!namespace || !strlen(namespace) || !strlen(nslist))
+ bail("ns paths are empty");
+
+ /*
+ * We have to open the file descriptors first, since after
+ * we join the mnt namespace we might no longer be able to
+ * access the paths.
+ */
+ do {
+ int fd;
+ char *path;
+ struct namespace_t *ns;
+
+ /* Resize the namespace array. */
+ namespaces = realloc(namespaces, ++num * sizeof(struct namespace_t));
+ if (!namespaces)
+ bail("failed to reallocate namespace array");
+ ns = &namespaces[num - 1];
+
+ /* Split 'ns:path'. */
+ path = strstr(namespace, ":");
+ if (!path)
+ bail("failed to parse %s", namespace);
+ *path++ = '\0';
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ bail("failed to open %s", path);
+
+ ns->fd = fd;
+ ns->ns = nsflag(namespace);
+ strncpy(ns->path, path, PATH_MAX);
+ } while ((namespace = strtok_r(NULL, ",", &saveptr)) != NULL);
+
+ /*
+ * The ordering in which we join namespaces is important. We should
+ * always join the user namespace *first*. This is all guaranteed
+ * from the container_linux.go side of this, so we're just going to
+ * follow the order given to us.
+ */
+
+ for (i = 0; i < num; i++) {
+ struct namespace_t ns = namespaces[i];
+
+ if (setns(ns.fd, ns.ns) < 0)
+ bail("failed to setns to %s", ns.path);
+
+ close(ns.fd);
+ }
+
+ free(namespaces);
+}
+
+void nsexec(void)
+{
+ int pipenum;
+ jmp_buf env;
+ int sync_child_pipe[2], sync_grandchild_pipe[2];
+ struct nlconfig_t config = {0};
+
+ /*
+ * If we don't have an init pipe, just return to the go routine.
+ * We'll only get an init pipe for start or exec.
+ */
+ pipenum = initpipe();
+ if (pipenum == -1)
+ return;
+
+ /* Parse all of the netlink configuration. */
+ nl_parse(pipenum, &config);
+
+ /* Set oom_score_adj. This has to be done before !dumpable because
+ * /proc/self/oom_score_adj is not writeable unless you're an privileged
+ * user (if !dumpable is set). All children inherit their parent's
+ * oom_score_adj value on fork(2) so this will always be propagated
+ * properly.
+ */
+ update_oom_score_adj(config.oom_score_adj, config.oom_score_adj_len);
+
+ /*
+ * Make the process non-dumpable, to avoid various race conditions that
+ * could cause processes in namespaces we're joining to access host
+ * resources (or potentially execute code).
+ *
+ * However, if the number of namespaces we are joining is 0, we are not
+ * going to be switching to a different security context. Thus setting
+ * ourselves to be non-dumpable only breaks things (like rootless
+ * containers), which is the recommendation from the kernel folks.
+ */
+ if (config.namespaces) {
+ if (prctl(PR_SET_DUMPABLE, 0, 0, 0, 0) < 0)
+ bail("failed to set process as non-dumpable");
+ }
+
+ /* Pipe so we can tell the child when we've finished setting up. */
+ if (socketpair(AF_LOCAL, SOCK_STREAM, 0, sync_child_pipe) < 0)
+ bail("failed to setup sync pipe between parent and child");
+
+ /*
+ * We need a new socketpair to sync with grandchild so we don't have
+ * race condition with child.
+ */
+ if (socketpair(AF_LOCAL, SOCK_STREAM, 0, sync_grandchild_pipe) < 0)
+ bail("failed to setup sync pipe between parent and grandchild");
+
+ /* TODO: Currently we aren't dealing with child deaths properly. */
+
+ /*
+ * Okay, so this is quite annoying.
+ *
+ * In order for this unsharing code to be more extensible we need to split
+ * up unshare(CLONE_NEWUSER) and clone() in various ways. The ideal case
+ * would be if we did clone(CLONE_NEWUSER) and the other namespaces
+ * separately, but because of SELinux issues we cannot really do that. But
+ * we cannot just dump the namespace flags into clone(...) because several
+ * usecases (such as rootless containers) require more granularity around
+ * the namespace setup. In addition, some older kernels had issues where
+ * CLONE_NEWUSER wasn't handled before other namespaces (but we cannot
+ * handle this while also dealing with SELinux so we choose SELinux support
+ * over broken kernel support).
+ *
+ * However, if we unshare(2) the user namespace *before* we clone(2), then
+ * all hell breaks loose.
+ *
+ * The parent no longer has permissions to do many things (unshare(2) drops
+ * all capabilities in your old namespace), and the container cannot be set
+ * up to have more than one {uid,gid} mapping. This is obviously less than
+ * ideal. In order to fix this, we have to first clone(2) and then unshare.
+ *
+ * Unfortunately, it's not as simple as that. We have to fork to enter the
+ * PID namespace (the PID namespace only applies to children). Since we'll
+ * have to double-fork, this clone_parent() call won't be able to get the
+ * PID of the _actual_ init process (without doing more synchronisation than
+ * I can deal with at the moment). So we'll just get the parent to send it
+ * for us, the only job of this process is to update
+ * /proc/pid/{setgroups,uid_map,gid_map}.
+ *
+ * And as a result of the above, we also need to setns(2) in the first child
+ * because if we join a PID namespace in the topmost parent then our child
+ * will be in that namespace (and it will not be able to give us a PID value
+ * that makes sense without resorting to sending things with cmsg).
+ *
+ * This also deals with an older issue caused by dumping cloneflags into
+ * clone(2): On old kernels, CLONE_PARENT didn't work with CLONE_NEWPID, so
+ * we have to unshare(2) before clone(2) in order to do this. This was fixed
+ * in upstream commit 1f7f4dde5c945f41a7abc2285be43d918029ecc5, and was
+ * introduced by 40a0d32d1eaffe6aac7324ca92604b6b3977eb0e. As far as we're
+ * aware, the last mainline kernel which had this bug was Linux 3.12.
+ * However, we cannot comment on which kernels the broken patch was
+ * backported to.
+ *
+ * -- Aleksa "what has my life come to?" Sarai
+ */
+
+ switch (setjmp(env)) {
+ /*
+ * Stage 0: We're in the parent. Our job is just to create a new child
+ * (stage 1: JUMP_CHILD) process and write its uid_map and
+ * gid_map. That process will go on to create a new process, then
+ * it will send us its PID which we will send to the bootstrap
+ * process.
+ */
+ case JUMP_PARENT: {
+ int len;
+ pid_t child;
+ char buf[JSON_MAX];
+ bool ready = false;
+
+ /* For debugging. */
+ prctl(PR_SET_NAME, (unsigned long) "runc:[0:PARENT]", 0, 0, 0);
+
+ /* Start the process of getting a container. */
+ child = clone_parent(&env, JUMP_CHILD);
+ if (child < 0)
+ bail("unable to fork: child_func");
+
+ /*
+ * State machine for synchronisation with the children.
+ *
+ * Father only return when both child and grandchild are
+ * ready, so we can receive all possible error codes
+ * generated by children.
+ */
+ while (!ready) {
+ enum sync_t s;
+ int ret;
+
+ syncfd = sync_child_pipe[1];
+ close(sync_child_pipe[0]);
+
+ if (read(syncfd, &s, sizeof(s)) != sizeof(s))
+ bail("failed to sync with child: next state");
+
+ switch (s) {
+ case SYNC_ERR:
+ /* We have to mirror the error code of the child. */
+ if (read(syncfd, &ret, sizeof(ret)) != sizeof(ret))
+ bail("failed to sync with child: read(error code)");
+
+ exit(ret);
+ case SYNC_USERMAP_PLS:
+ /*
+ * Enable setgroups(2) if we've been asked to. But we also
+ * have to explicitly disable setgroups(2) if we're
+ * creating a rootless container (this is required since
+ * Linux 3.19).
+ */
+ if (config.is_rootless && config.is_setgroup) {
+ kill(child, SIGKILL);
+ bail("cannot allow setgroup in an unprivileged user namespace setup");
+ }
+
+ if (config.is_setgroup)
+ update_setgroups(child, SETGROUPS_ALLOW);
+ if (config.is_rootless)
+ update_setgroups(child, SETGROUPS_DENY);
+
+ /* Set up mappings. */
+ update_uidmap(child, config.uidmap, config.uidmap_len);
+ update_gidmap(child, config.gidmap, config.gidmap_len);
+
+ s = SYNC_USERMAP_ACK;
+ if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
+ kill(child, SIGKILL);
+ bail("failed to sync with child: write(SYNC_USERMAP_ACK)");
+ }
+ break;
+ case SYNC_RECVPID_PLS: {
+ pid_t old = child;
+
+ /* Get the init_func pid. */
+ if (read(syncfd, &child, sizeof(child)) != sizeof(child)) {
+ kill(old, SIGKILL);
+ bail("failed to sync with child: read(childpid)");
+ }
+
+ /* Send ACK. */
+ s = SYNC_RECVPID_ACK;
+ if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
+ kill(old, SIGKILL);
+ kill(child, SIGKILL);
+ bail("failed to sync with child: write(SYNC_RECVPID_ACK)");
+ }
+ }
+ break;
+ case SYNC_CHILD_READY:
+ ready = true;
+ break;
+ default:
+ bail("unexpected sync value: %u", s);
+ }
+ }
+
+ /* Now sync with grandchild. */
+
+ ready = false;
+ while (!ready) {
+ enum sync_t s;
+ int ret;
+
+ syncfd = sync_grandchild_pipe[1];
+ close(sync_grandchild_pipe[0]);
+
+ s = SYNC_GRANDCHILD;
+ if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
+ kill(child, SIGKILL);
+ bail("failed to sync with child: write(SYNC_GRANDCHILD)");
+ }
+
+ if (read(syncfd, &s, sizeof(s)) != sizeof(s))
+ bail("failed to sync with child: next state");
+
+ switch (s) {
+ case SYNC_ERR:
+ /* We have to mirror the error code of the child. */
+ if (read(syncfd, &ret, sizeof(ret)) != sizeof(ret))
+ bail("failed to sync with child: read(error code)");
+
+ exit(ret);
+ case SYNC_CHILD_READY:
+ ready = true;
+ break;
+ default:
+ bail("unexpected sync value: %u", s);
+ }
+ }
+
+ /* Send the init_func pid back to our parent. */
+ len = snprintf(buf, JSON_MAX, "{\"pid\": %d}\n", child);
+ if (len < 0) {
+ kill(child, SIGKILL);
+ bail("unable to generate JSON for child pid");
+ }
+ if (write(pipenum, buf, len) != len) {
+ kill(child, SIGKILL);
+ bail("unable to send child pid to bootstrapper");
+ }
+
+ exit(0);
+ }
+
+ /*
+ * Stage 1: We're in the first child process. Our job is to join any
+ * provided namespaces in the netlink payload and unshare all
+ * of the requested namespaces. If we've been asked to
+ * CLONE_NEWUSER, we will ask our parent (stage 0) to set up
+ * our user mappings for us. Then, we create a new child
+ * (stage 2: JUMP_INIT) for PID namespace. We then send the
+ * child's PID to our parent (stage 0).
+ */
+ case JUMP_CHILD: {
+ pid_t child;
+ enum sync_t s;
+
+ /* We're in a child and thus need to tell the parent if we die. */
+ syncfd = sync_child_pipe[0];
+ close(sync_child_pipe[1]);
+
+ /* For debugging. */
+ prctl(PR_SET_NAME, (unsigned long) "runc:[1:CHILD]", 0, 0, 0);
+
+ /*
+ * We need to setns first. We cannot do this earlier (in stage 0)
+ * because of the fact that we forked to get here (the PID of
+ * [stage 2: JUMP_INIT]) would be meaningless). We could send it
+ * using cmsg(3) but that's just annoying.
+ */
+ if (config.namespaces)
+ join_namespaces(config.namespaces);
+
+ /*
+ * Unshare all of the namespaces. Now, it should be noted that this
+ * ordering might break in the future (especially with rootless
+ * containers). But for now, it's not possible to split this into
+ * CLONE_NEWUSER + [the rest] because of some RHEL SELinux issues.
+ *
+ * Note that we don't merge this with clone() because there were
+ * some old kernel versions where clone(CLONE_PARENT | CLONE_NEWPID)
+ * was broken, so we'll just do it the long way anyway.
+ */
+ if (unshare(config.cloneflags) < 0)
+ bail("failed to unshare namespaces");
+
+ /*
+ * Deal with user namespaces first. They are quite special, as they
+ * affect our ability to unshare other namespaces and are used as
+ * context for privilege checks.
+ */
+ if (config.cloneflags & CLONE_NEWUSER) {
+ /*
+ * We don't have the privileges to do any mapping here (see the
+ * clone_parent rant). So signal our parent to hook us up.
+ */
+
+ /* Switching is only necessary if we joined namespaces. */
+ if (config.namespaces) {
+ if (prctl(PR_SET_DUMPABLE, 1, 0, 0, 0) < 0)
+ bail("failed to set process as dumpable");
+ }
+ s = SYNC_USERMAP_PLS;
+ if (write(syncfd, &s, sizeof(s)) != sizeof(s))
+ bail("failed to sync with parent: write(SYNC_USERMAP_PLS)");
+
+ /* ... wait for mapping ... */
+
+ if (read(syncfd, &s, sizeof(s)) != sizeof(s))
+ bail("failed to sync with parent: read(SYNC_USERMAP_ACK)");
+ if (s != SYNC_USERMAP_ACK)
+ bail("failed to sync with parent: SYNC_USERMAP_ACK: got %u", s);
+ /* Switching is only necessary if we joined namespaces. */
+ if (config.namespaces) {
+ if (prctl(PR_SET_DUMPABLE, 0, 0, 0, 0) < 0)
+ bail("failed to set process as dumpable");
+ }
+ }
+
+ /*
+ * TODO: What about non-namespace clone flags that we're dropping here?
+ *
+ * We fork again because of PID namespace, setns(2) or unshare(2) don't
+ * change the PID namespace of the calling process, because doing so
+ * would change the caller's idea of its own PID (as reported by getpid()),
+ * which would break many applications and libraries, so we must fork
+ * to actually enter the new PID namespace.
+ */
+ child = clone_parent(&env, JUMP_INIT);
+ if (child < 0)
+ bail("unable to fork: init_func");
+
+ /* Send the child to our parent, which knows what it's doing. */
+ s = SYNC_RECVPID_PLS;
+ if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
+ kill(child, SIGKILL);
+ bail("failed to sync with parent: write(SYNC_RECVPID_PLS)");
+ }
+ if (write(syncfd, &child, sizeof(child)) != sizeof(child)) {
+ kill(child, SIGKILL);
+ bail("failed to sync with parent: write(childpid)");
+ }
+
+ /* ... wait for parent to get the pid ... */
+
+ if (read(syncfd, &s, sizeof(s)) != sizeof(s)) {
+ kill(child, SIGKILL);
+ bail("failed to sync with parent: read(SYNC_RECVPID_ACK)");
+ }
+ if (s != SYNC_RECVPID_ACK) {
+ kill(child, SIGKILL);
+ bail("failed to sync with parent: SYNC_RECVPID_ACK: got %u", s);
+ }
+
+ s = SYNC_CHILD_READY;
+ if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
+ kill(child, SIGKILL);
+ bail("failed to sync with parent: write(SYNC_CHILD_READY)");
+ }
+
+ /* Our work is done. [Stage 2: JUMP_INIT] is doing the rest of the work. */
+ exit(0);
+ }
+
+ /*
+ * Stage 2: We're the final child process, and the only process that will
+ * actually return to the Go runtime. Our job is to just do the
+ * final cleanup steps and then return to the Go runtime to allow
+ * init_linux.go to run.
+ */
+ case JUMP_INIT: {
+ /*
+ * We're inside the child now, having jumped from the
+ * start_child() code after forking in the parent.
+ */
+ enum sync_t s;
+
+ /* We're in a child and thus need to tell the parent if we die. */
+ syncfd = sync_grandchild_pipe[0];
+ close(sync_grandchild_pipe[1]);
+ close(sync_child_pipe[0]);
+ close(sync_child_pipe[1]);
+
+ /* For debugging. */
+ prctl(PR_SET_NAME, (unsigned long) "runc:[2:INIT]", 0, 0, 0);
+
+ if (read(syncfd, &s, sizeof(s)) != sizeof(s))
+ bail("failed to sync with parent: read(SYNC_GRANDCHILD)");
+ if (s != SYNC_GRANDCHILD)
+ bail("failed to sync with parent: SYNC_GRANDCHILD: got %u", s);
+
+ if (setsid() < 0)
+ bail("setsid failed");
+
+ if (setuid(0) < 0)
+ bail("setuid failed");
+
+ if (setgid(0) < 0)
+ bail("setgid failed");
+
+ if (!config.is_rootless && config.is_setgroup) {
+ if (setgroups(0, NULL) < 0)
+ bail("setgroups failed");
+ }
+
+ s = SYNC_CHILD_READY;
+ if (write(syncfd, &s, sizeof(s)) != sizeof(s))
+ bail("failed to sync with patent: write(SYNC_CHILD_READY)");
+
+ /* Close sync pipes. */
+ close(sync_grandchild_pipe[0]);
+
+ /* Free netlink data. */
+ nl_free(&config);
+
+ /* Finish executing, let the Go runtime take over. */
+ return;
+ }
+ default:
+ bail("unexpected jump value");
+ }
+
+ /* Should never be reached. */
+ bail("should never be reached");
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/process.go b/vendor/github.com/opencontainers/runc/libcontainer/process.go
new file mode 100644
index 000000000..f1ad08149
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/process.go
@@ -0,0 +1,106 @@
+package libcontainer
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "os"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type processOperations interface {
+ wait() (*os.ProcessState, error)
+ signal(sig os.Signal) error
+ pid() int
+}
+
+// Process specifies the configuration and IO for a process inside
+// a container.
+type Process struct {
+ // The command to be run followed by any arguments.
+ Args []string
+
+ // Env specifies the environment variables for the process.
+ Env []string
+
+ // User will set the uid and gid of the executing process running inside the container
+ // local to the container's user and group configuration.
+ User string
+
+ // AdditionalGroups specifies the gids that should be added to supplementary groups
+ // in addition to those that the user belongs to.
+ AdditionalGroups []string
+
+ // Cwd will change the processes current working directory inside the container's rootfs.
+ Cwd string
+
+ // Stdin is a pointer to a reader which provides the standard input stream.
+ Stdin io.Reader
+
+ // Stdout is a pointer to a writer which receives the standard output stream.
+ Stdout io.Writer
+
+ // Stderr is a pointer to a writer which receives the standard error stream.
+ Stderr io.Writer
+
+ // ExtraFiles specifies additional open files to be inherited by the container
+ ExtraFiles []*os.File
+
+ // Capabilities specify the capabilities to keep when executing the process inside the container
+ // All capabilities not specified will be dropped from the processes capability mask
+ Capabilities *configs.Capabilities
+
+ // AppArmorProfile specifies the profile to apply to the process and is
+ // changed at the time the process is execed
+ AppArmorProfile string
+
+ // Label specifies the label to apply to the process. It is commonly used by selinux
+ Label string
+
+ // NoNewPrivileges controls whether processes can gain additional privileges.
+ NoNewPrivileges *bool
+
+ // Rlimits specifies the resource limits, such as max open files, to set in the container
+ // If Rlimits are not set, the container will inherit rlimits from the parent process
+ Rlimits []configs.Rlimit
+
+ // ConsoleSocket provides the masterfd console.
+ ConsoleSocket *os.File
+
+ ops processOperations
+}
+
+// Wait waits for the process to exit.
+// Wait releases any resources associated with the Process
+func (p Process) Wait() (*os.ProcessState, error) {
+ if p.ops == nil {
+ return nil, newGenericError(fmt.Errorf("invalid process"), NoProcessOps)
+ }
+ return p.ops.wait()
+}
+
+// Pid returns the process ID
+func (p Process) Pid() (int, error) {
+ // math.MinInt32 is returned here, because it's invalid value
+ // for the kill() system call.
+ if p.ops == nil {
+ return math.MinInt32, newGenericError(fmt.Errorf("invalid process"), NoProcessOps)
+ }
+ return p.ops.pid(), nil
+}
+
+// Signal sends a signal to the Process.
+func (p Process) Signal(sig os.Signal) error {
+ if p.ops == nil {
+ return newGenericError(fmt.Errorf("invalid process"), NoProcessOps)
+ }
+ return p.ops.signal(sig)
+}
+
+// IO holds the process's STDIO
+type IO struct {
+ Stdin io.WriteCloser
+ Stdout io.ReadCloser
+ Stderr io.ReadCloser
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go
new file mode 100644
index 000000000..171685ccd
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go
@@ -0,0 +1,493 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "syscall" // only for Signal
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/system"
+ "github.com/opencontainers/runc/libcontainer/utils"
+
+ "golang.org/x/sys/unix"
+)
+
+type parentProcess interface {
+ // pid returns the pid for the running process.
+ pid() int
+
+ // start starts the process execution.
+ start() error
+
+ // send a SIGKILL to the process and wait for the exit.
+ terminate() error
+
+ // wait waits on the process returning the process state.
+ wait() (*os.ProcessState, error)
+
+ // startTime returns the process start time.
+ startTime() (uint64, error)
+
+ signal(os.Signal) error
+
+ externalDescriptors() []string
+
+ setExternalDescriptors(fds []string)
+}
+
+type setnsProcess struct {
+ cmd *exec.Cmd
+ parentPipe *os.File
+ childPipe *os.File
+ cgroupPaths map[string]string
+ config *initConfig
+ fds []string
+ process *Process
+ bootstrapData io.Reader
+}
+
+func (p *setnsProcess) startTime() (uint64, error) {
+ stat, err := system.Stat(p.pid())
+ return stat.StartTime, err
+}
+
+func (p *setnsProcess) signal(sig os.Signal) error {
+ s, ok := sig.(syscall.Signal)
+ if !ok {
+ return errors.New("os: unsupported signal type")
+ }
+ return unix.Kill(p.pid(), s)
+}
+
+func (p *setnsProcess) start() (err error) {
+ defer p.parentPipe.Close()
+ err = p.cmd.Start()
+ p.childPipe.Close()
+ if err != nil {
+ return newSystemErrorWithCause(err, "starting setns process")
+ }
+ if p.bootstrapData != nil {
+ if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil {
+ return newSystemErrorWithCause(err, "copying bootstrap data to pipe")
+ }
+ }
+ if err = p.execSetns(); err != nil {
+ return newSystemErrorWithCause(err, "executing setns process")
+ }
+ // We can't join cgroups if we're in a rootless container.
+ if !p.config.Rootless && len(p.cgroupPaths) > 0 {
+ if err := cgroups.EnterPid(p.cgroupPaths, p.pid()); err != nil {
+ return newSystemErrorWithCausef(err, "adding pid %d to cgroups", p.pid())
+ }
+ }
+ // set rlimits, this has to be done here because we lose permissions
+ // to raise the limits once we enter a user-namespace
+ if err := setupRlimits(p.config.Rlimits, p.pid()); err != nil {
+ return newSystemErrorWithCause(err, "setting rlimits for process")
+ }
+ if err := utils.WriteJSON(p.parentPipe, p.config); err != nil {
+ return newSystemErrorWithCause(err, "writing config to pipe")
+ }
+
+ ierr := parseSync(p.parentPipe, func(sync *syncT) error {
+ switch sync.Type {
+ case procReady:
+ // This shouldn't happen.
+ panic("unexpected procReady in setns")
+ case procHooks:
+ // This shouldn't happen.
+ panic("unexpected procHooks in setns")
+ default:
+ return newSystemError(fmt.Errorf("invalid JSON payload from child"))
+ }
+ })
+
+ if err := unix.Shutdown(int(p.parentPipe.Fd()), unix.SHUT_WR); err != nil {
+ return newSystemErrorWithCause(err, "calling shutdown on init pipe")
+ }
+ // Must be done after Shutdown so the child will exit and we can wait for it.
+ if ierr != nil {
+ p.wait()
+ return ierr
+ }
+ return nil
+}
+
+// execSetns runs the process that executes C code to perform the setns calls
+// because setns support requires the C process to fork off a child and perform the setns
+// before the go runtime boots, we wait on the process to die and receive the child's pid
+// over the provided pipe.
+func (p *setnsProcess) execSetns() error {
+ status, err := p.cmd.Process.Wait()
+ if err != nil {
+ p.cmd.Wait()
+ return newSystemErrorWithCause(err, "waiting on setns process to finish")
+ }
+ if !status.Success() {
+ p.cmd.Wait()
+ return newSystemError(&exec.ExitError{ProcessState: status})
+ }
+ var pid *pid
+ if err := json.NewDecoder(p.parentPipe).Decode(&pid); err != nil {
+ p.cmd.Wait()
+ return newSystemErrorWithCause(err, "reading pid from init pipe")
+ }
+ process, err := os.FindProcess(pid.Pid)
+ if err != nil {
+ return err
+ }
+ p.cmd.Process = process
+ p.process.ops = p
+ return nil
+}
+
+// terminate sends a SIGKILL to the forked process for the setns routine then waits to
+// avoid the process becoming a zombie.
+func (p *setnsProcess) terminate() error {
+ if p.cmd.Process == nil {
+ return nil
+ }
+ err := p.cmd.Process.Kill()
+ if _, werr := p.wait(); err == nil {
+ err = werr
+ }
+ return err
+}
+
+func (p *setnsProcess) wait() (*os.ProcessState, error) {
+ err := p.cmd.Wait()
+
+ // Return actual ProcessState even on Wait error
+ return p.cmd.ProcessState, err
+}
+
+func (p *setnsProcess) pid() int {
+ return p.cmd.Process.Pid
+}
+
+func (p *setnsProcess) externalDescriptors() []string {
+ return p.fds
+}
+
+func (p *setnsProcess) setExternalDescriptors(newFds []string) {
+ p.fds = newFds
+}
+
+type initProcess struct {
+ cmd *exec.Cmd
+ parentPipe *os.File
+ childPipe *os.File
+ config *initConfig
+ manager cgroups.Manager
+ container *linuxContainer
+ fds []string
+ process *Process
+ bootstrapData io.Reader
+ sharePidns bool
+ rootDir *os.File
+}
+
+func (p *initProcess) pid() int {
+ return p.cmd.Process.Pid
+}
+
+func (p *initProcess) externalDescriptors() []string {
+ return p.fds
+}
+
+// execSetns runs the process that executes C code to perform the setns calls
+// because setns support requires the C process to fork off a child and perform the setns
+// before the go runtime boots, we wait on the process to die and receive the child's pid
+// over the provided pipe.
+// This is called by initProcess.start function
+func (p *initProcess) execSetns() error {
+ status, err := p.cmd.Process.Wait()
+ if err != nil {
+ p.cmd.Wait()
+ return err
+ }
+ if !status.Success() {
+ p.cmd.Wait()
+ return &exec.ExitError{ProcessState: status}
+ }
+ var pid *pid
+ if err := json.NewDecoder(p.parentPipe).Decode(&pid); err != nil {
+ p.cmd.Wait()
+ return err
+ }
+ process, err := os.FindProcess(pid.Pid)
+ if err != nil {
+ return err
+ }
+ p.cmd.Process = process
+ p.process.ops = p
+ return nil
+}
+
+func (p *initProcess) start() error {
+ defer p.parentPipe.Close()
+ err := p.cmd.Start()
+ p.process.ops = p
+ p.childPipe.Close()
+ p.rootDir.Close()
+ if err != nil {
+ p.process.ops = nil
+ return newSystemErrorWithCause(err, "starting init process command")
+ }
+ if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil {
+ return newSystemErrorWithCause(err, "copying bootstrap data to pipe")
+ }
+ if err := p.execSetns(); err != nil {
+ return newSystemErrorWithCause(err, "running exec setns process for init")
+ }
+ // Save the standard descriptor names before the container process
+ // can potentially move them (e.g., via dup2()). If we don't do this now,
+ // we won't know at checkpoint time which file descriptor to look up.
+ fds, err := getPipeFds(p.pid())
+ if err != nil {
+ return newSystemErrorWithCausef(err, "getting pipe fds for pid %d", p.pid())
+ }
+ p.setExternalDescriptors(fds)
+ // Do this before syncing with child so that no children can escape the
+ // cgroup. We don't need to worry about not doing this and not being root
+ // because we'd be using the rootless cgroup manager in that case.
+ if err := p.manager.Apply(p.pid()); err != nil {
+ return newSystemErrorWithCause(err, "applying cgroup configuration for process")
+ }
+ defer func() {
+ if err != nil {
+ // TODO: should not be the responsibility to call here
+ p.manager.Destroy()
+ }
+ }()
+ if err := p.createNetworkInterfaces(); err != nil {
+ return newSystemErrorWithCause(err, "creating network interfaces")
+ }
+ if err := p.sendConfig(); err != nil {
+ return newSystemErrorWithCause(err, "sending config to init process")
+ }
+ var (
+ sentRun bool
+ sentResume bool
+ )
+
+ ierr := parseSync(p.parentPipe, func(sync *syncT) error {
+ switch sync.Type {
+ case procReady:
+ // set rlimits, this has to be done here because we lose permissions
+ // to raise the limits once we enter a user-namespace
+ if err := setupRlimits(p.config.Rlimits, p.pid()); err != nil {
+ return newSystemErrorWithCause(err, "setting rlimits for ready process")
+ }
+ // call prestart hooks
+ if !p.config.Config.Namespaces.Contains(configs.NEWNS) {
+ // Setup cgroup before prestart hook, so that the prestart hook could apply cgroup permissions.
+ if err := p.manager.Set(p.config.Config); err != nil {
+ return newSystemErrorWithCause(err, "setting cgroup config for ready process")
+ }
+
+ if p.config.Config.Hooks != nil {
+ s := configs.HookState{
+ Version: p.container.config.Version,
+ ID: p.container.id,
+ Pid: p.pid(),
+ Bundle: utils.SearchLabels(p.config.Config.Labels, "bundle"),
+ }
+ for i, hook := range p.config.Config.Hooks.Prestart {
+ if err := hook.Run(s); err != nil {
+ return newSystemErrorWithCausef(err, "running prestart hook %d", i)
+ }
+ }
+ }
+ }
+ // Sync with child.
+ if err := writeSync(p.parentPipe, procRun); err != nil {
+ return newSystemErrorWithCause(err, "writing syncT 'run'")
+ }
+ sentRun = true
+ case procHooks:
+ // Setup cgroup before prestart hook, so that the prestart hook could apply cgroup permissions.
+ if err := p.manager.Set(p.config.Config); err != nil {
+ return newSystemErrorWithCause(err, "setting cgroup config for procHooks process")
+ }
+ if p.config.Config.Hooks != nil {
+ s := configs.HookState{
+ Version: p.container.config.Version,
+ ID: p.container.id,
+ Pid: p.pid(),
+ Bundle: utils.SearchLabels(p.config.Config.Labels, "bundle"),
+ }
+ for i, hook := range p.config.Config.Hooks.Prestart {
+ if err := hook.Run(s); err != nil {
+ return newSystemErrorWithCausef(err, "running prestart hook %d", i)
+ }
+ }
+ }
+ // Sync with child.
+ if err := writeSync(p.parentPipe, procResume); err != nil {
+ return newSystemErrorWithCause(err, "writing syncT 'resume'")
+ }
+ sentResume = true
+ default:
+ return newSystemError(fmt.Errorf("invalid JSON payload from child"))
+ }
+
+ return nil
+ })
+
+ if !sentRun {
+ return newSystemErrorWithCause(ierr, "container init")
+ }
+ if p.config.Config.Namespaces.Contains(configs.NEWNS) && !sentResume {
+ return newSystemError(fmt.Errorf("could not synchronise after executing prestart hooks with container process"))
+ }
+ if err := unix.Shutdown(int(p.parentPipe.Fd()), unix.SHUT_WR); err != nil {
+ return newSystemErrorWithCause(err, "shutting down init pipe")
+ }
+
+ // Must be done after Shutdown so the child will exit and we can wait for it.
+ if ierr != nil {
+ p.wait()
+ return ierr
+ }
+ return nil
+}
+
+func (p *initProcess) wait() (*os.ProcessState, error) {
+ err := p.cmd.Wait()
+ if err != nil {
+ return p.cmd.ProcessState, err
+ }
+ // we should kill all processes in cgroup when init is died if we use host PID namespace
+ if p.sharePidns {
+ signalAllProcesses(p.manager, unix.SIGKILL)
+ }
+ return p.cmd.ProcessState, nil
+}
+
+func (p *initProcess) terminate() error {
+ if p.cmd.Process == nil {
+ return nil
+ }
+ err := p.cmd.Process.Kill()
+ if _, werr := p.wait(); err == nil {
+ err = werr
+ }
+ return err
+}
+
+func (p *initProcess) startTime() (uint64, error) {
+ stat, err := system.Stat(p.pid())
+ return stat.StartTime, err
+}
+
+func (p *initProcess) sendConfig() error {
+ // send the config to the container's init process, we don't use JSON Encode
+ // here because there might be a problem in JSON decoder in some cases, see:
+ // https://github.com/docker/docker/issues/14203#issuecomment-174177790
+ return utils.WriteJSON(p.parentPipe, p.config)
+}
+
+func (p *initProcess) createNetworkInterfaces() error {
+ for _, config := range p.config.Config.Networks {
+ strategy, err := getStrategy(config.Type)
+ if err != nil {
+ return err
+ }
+ n := &network{
+ Network: *config,
+ }
+ if err := strategy.create(n, p.pid()); err != nil {
+ return err
+ }
+ p.config.Networks = append(p.config.Networks, n)
+ }
+ return nil
+}
+
+func (p *initProcess) signal(sig os.Signal) error {
+ s, ok := sig.(syscall.Signal)
+ if !ok {
+ return errors.New("os: unsupported signal type")
+ }
+ return unix.Kill(p.pid(), s)
+}
+
+func (p *initProcess) setExternalDescriptors(newFds []string) {
+ p.fds = newFds
+}
+
+func getPipeFds(pid int) ([]string, error) {
+ fds := make([]string, 3)
+
+ dirPath := filepath.Join("/proc", strconv.Itoa(pid), "/fd")
+ for i := 0; i < 3; i++ {
+ // XXX: This breaks if the path is not a valid symlink (which can
+ // happen in certain particularly unlucky mount namespace setups).
+ f := filepath.Join(dirPath, strconv.Itoa(i))
+ target, err := os.Readlink(f)
+ if err != nil {
+ // Ignore permission errors, for rootless containers and other
+ // non-dumpable processes. if we can't get the fd for a particular
+ // file, there's not much we can do.
+ if os.IsPermission(err) {
+ continue
+ }
+ return fds, err
+ }
+ fds[i] = target
+ }
+ return fds, nil
+}
+
+// InitializeIO creates pipes for use with the process's stdio and returns the
+// opposite side for each. Do not use this if you want to have a pseudoterminal
+// set up for you by libcontainer (TODO: fix that too).
+// TODO: This is mostly unnecessary, and should be handled by clients.
+func (p *Process) InitializeIO(rootuid, rootgid int) (i *IO, err error) {
+ var fds []uintptr
+ i = &IO{}
+ // cleanup in case of an error
+ defer func() {
+ if err != nil {
+ for _, fd := range fds {
+ unix.Close(int(fd))
+ }
+ }
+ }()
+ // STDIN
+ r, w, err := os.Pipe()
+ if err != nil {
+ return nil, err
+ }
+ fds = append(fds, r.Fd(), w.Fd())
+ p.Stdin, i.Stdin = r, w
+ // STDOUT
+ if r, w, err = os.Pipe(); err != nil {
+ return nil, err
+ }
+ fds = append(fds, r.Fd(), w.Fd())
+ p.Stdout, i.Stdout = w, r
+ // STDERR
+ if r, w, err = os.Pipe(); err != nil {
+ return nil, err
+ }
+ fds = append(fds, r.Fd(), w.Fd())
+ p.Stderr, i.Stderr = w, r
+ // change ownership of the pipes incase we are in a user namespace
+ for _, fd := range fds {
+ if err := unix.Fchown(int(fd), rootuid, rootgid); err != nil {
+ return nil, err
+ }
+ }
+ return i, nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/restored_process.go b/vendor/github.com/opencontainers/runc/libcontainer/restored_process.go
new file mode 100644
index 000000000..408916ad9
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/restored_process.go
@@ -0,0 +1,122 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/opencontainers/runc/libcontainer/system"
+)
+
+func newRestoredProcess(pid int, fds []string) (*restoredProcess, error) {
+ var (
+ err error
+ )
+ proc, err := os.FindProcess(pid)
+ if err != nil {
+ return nil, err
+ }
+ stat, err := system.Stat(pid)
+ if err != nil {
+ return nil, err
+ }
+ return &restoredProcess{
+ proc: proc,
+ processStartTime: stat.StartTime,
+ fds: fds,
+ }, nil
+}
+
+type restoredProcess struct {
+ proc *os.Process
+ processStartTime uint64
+ fds []string
+}
+
+func (p *restoredProcess) start() error {
+ return newGenericError(fmt.Errorf("restored process cannot be started"), SystemError)
+}
+
+func (p *restoredProcess) pid() int {
+ return p.proc.Pid
+}
+
+func (p *restoredProcess) terminate() error {
+ err := p.proc.Kill()
+ if _, werr := p.wait(); err == nil {
+ err = werr
+ }
+ return err
+}
+
+func (p *restoredProcess) wait() (*os.ProcessState, error) {
+ // TODO: how do we wait on the actual process?
+ // maybe use --exec-cmd in criu
+ st, err := p.proc.Wait()
+ if err != nil {
+ return nil, err
+ }
+ return st, nil
+}
+
+func (p *restoredProcess) startTime() (uint64, error) {
+ return p.processStartTime, nil
+}
+
+func (p *restoredProcess) signal(s os.Signal) error {
+ return p.proc.Signal(s)
+}
+
+func (p *restoredProcess) externalDescriptors() []string {
+ return p.fds
+}
+
+func (p *restoredProcess) setExternalDescriptors(newFds []string) {
+ p.fds = newFds
+}
+
+// nonChildProcess represents a process where the calling process is not
+// the parent process. This process is created when a factory loads a container from
+// a persisted state.
+type nonChildProcess struct {
+ processPid int
+ processStartTime uint64
+ fds []string
+}
+
+func (p *nonChildProcess) start() error {
+ return newGenericError(fmt.Errorf("restored process cannot be started"), SystemError)
+}
+
+func (p *nonChildProcess) pid() int {
+ return p.processPid
+}
+
+func (p *nonChildProcess) terminate() error {
+ return newGenericError(fmt.Errorf("restored process cannot be terminated"), SystemError)
+}
+
+func (p *nonChildProcess) wait() (*os.ProcessState, error) {
+ return nil, newGenericError(fmt.Errorf("restored process cannot be waited on"), SystemError)
+}
+
+func (p *nonChildProcess) startTime() (uint64, error) {
+ return p.processStartTime, nil
+}
+
+func (p *nonChildProcess) signal(s os.Signal) error {
+ proc, err := os.FindProcess(p.processPid)
+ if err != nil {
+ return err
+ }
+ return proc.Signal(s)
+}
+
+func (p *nonChildProcess) externalDescriptors() []string {
+ return p.fds
+}
+
+func (p *nonChildProcess) setExternalDescriptors(newFds []string) {
+ p.fds = newFds
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go
new file mode 100644
index 000000000..e2e734a85
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go
@@ -0,0 +1,812 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/docker/docker/pkg/mount"
+ "github.com/docker/docker/pkg/symlink"
+ "github.com/mrunalp/fileutils"
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/system"
+ libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils"
+ "github.com/opencontainers/selinux/go-selinux/label"
+
+ "golang.org/x/sys/unix"
+)
+
+const defaultMountFlags = unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_NODEV
+
+// needsSetupDev returns true if /dev needs to be set up.
+func needsSetupDev(config *configs.Config) bool {
+ for _, m := range config.Mounts {
+ if m.Device == "bind" && libcontainerUtils.CleanPath(m.Destination) == "/dev" {
+ return false
+ }
+ }
+ return true
+}
+
+// prepareRootfs sets up the devices, mount points, and filesystems for use
+// inside a new mount namespace. It doesn't set anything as ro. You must call
+// finalizeRootfs after this function to finish setting up the rootfs.
+func prepareRootfs(pipe io.ReadWriter, config *configs.Config) (err error) {
+ if err := prepareRoot(config); err != nil {
+ return newSystemErrorWithCause(err, "preparing rootfs")
+ }
+
+ setupDev := needsSetupDev(config)
+ for _, m := range config.Mounts {
+ for _, precmd := range m.PremountCmds {
+ if err := mountCmd(precmd); err != nil {
+ return newSystemErrorWithCause(err, "running premount command")
+ }
+ }
+
+ if err := mountToRootfs(m, config.Rootfs, config.MountLabel); err != nil {
+ return newSystemErrorWithCausef(err, "mounting %q to rootfs %q at %q", m.Source, config.Rootfs, m.Destination)
+ }
+
+ for _, postcmd := range m.PostmountCmds {
+ if err := mountCmd(postcmd); err != nil {
+ return newSystemErrorWithCause(err, "running postmount command")
+ }
+ }
+ }
+
+ if setupDev {
+ if err := createDevices(config); err != nil {
+ return newSystemErrorWithCause(err, "creating device nodes")
+ }
+ if err := setupPtmx(config); err != nil {
+ return newSystemErrorWithCause(err, "setting up ptmx")
+ }
+ if err := setupDevSymlinks(config.Rootfs); err != nil {
+ return newSystemErrorWithCause(err, "setting up /dev symlinks")
+ }
+ }
+
+ // Signal the parent to run the pre-start hooks.
+ // The hooks are run after the mounts are setup, but before we switch to the new
+ // root, so that the old root is still available in the hooks for any mount
+ // manipulations.
+ if err := syncParentHooks(pipe); err != nil {
+ return err
+ }
+
+ // The reason these operations are done here rather than in finalizeRootfs
+ // is because the console-handling code gets quite sticky if we have to set
+ // up the console before doing the pivot_root(2). This is because the
+ // Console API has to also work with the ExecIn case, which means that the
+ // API must be able to deal with being inside as well as outside the
+ // container. It's just cleaner to do this here (at the expense of the
+ // operation not being perfectly split).
+
+ if err := unix.Chdir(config.Rootfs); err != nil {
+ return newSystemErrorWithCausef(err, "changing dir to %q", config.Rootfs)
+ }
+
+ if config.NoPivotRoot {
+ err = msMoveRoot(config.Rootfs)
+ } else {
+ err = pivotRoot(config.Rootfs)
+ }
+ if err != nil {
+ return newSystemErrorWithCause(err, "jailing process inside rootfs")
+ }
+
+ if setupDev {
+ if err := reOpenDevNull(); err != nil {
+ return newSystemErrorWithCause(err, "reopening /dev/null inside container")
+ }
+ }
+
+ return nil
+}
+
+// finalizeRootfs sets anything to ro if necessary. You must call
+// prepareRootfs first.
+func finalizeRootfs(config *configs.Config) (err error) {
+ // remount dev as ro if specified
+ for _, m := range config.Mounts {
+ if libcontainerUtils.CleanPath(m.Destination) == "/dev" {
+ if m.Flags&unix.MS_RDONLY == unix.MS_RDONLY {
+ if err := remountReadonly(m); err != nil {
+ return newSystemErrorWithCausef(err, "remounting %q as readonly", m.Destination)
+ }
+ }
+ break
+ }
+ }
+
+ // set rootfs ( / ) as readonly
+ if config.Readonlyfs {
+ if err := setReadonly(); err != nil {
+ return newSystemErrorWithCause(err, "setting rootfs as readonly")
+ }
+ }
+
+ unix.Umask(0022)
+ return nil
+}
+
+func mountCmd(cmd configs.Command) error {
+ command := exec.Command(cmd.Path, cmd.Args[:]...)
+ command.Env = cmd.Env
+ command.Dir = cmd.Dir
+ if out, err := command.CombinedOutput(); err != nil {
+ return fmt.Errorf("%#v failed: %s: %v", cmd, string(out), err)
+ }
+ return nil
+}
+
+func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error {
+ var (
+ dest = m.Destination
+ )
+ if !strings.HasPrefix(dest, rootfs) {
+ dest = filepath.Join(rootfs, dest)
+ }
+
+ switch m.Device {
+ case "proc", "sysfs":
+ if err := os.MkdirAll(dest, 0755); err != nil {
+ return err
+ }
+ // Selinux kernels do not support labeling of /proc or /sys
+ return mountPropagate(m, rootfs, "")
+ case "mqueue":
+ if err := os.MkdirAll(dest, 0755); err != nil {
+ return err
+ }
+ if err := mountPropagate(m, rootfs, mountLabel); err != nil {
+ // older kernels do not support labeling of /dev/mqueue
+ if err := mountPropagate(m, rootfs, ""); err != nil {
+ return err
+ }
+ return label.SetFileLabel(dest, mountLabel)
+ }
+ return nil
+ case "tmpfs":
+ copyUp := m.Extensions&configs.EXT_COPYUP == configs.EXT_COPYUP
+ tmpDir := ""
+ stat, err := os.Stat(dest)
+ if err != nil {
+ if err := os.MkdirAll(dest, 0755); err != nil {
+ return err
+ }
+ }
+ if copyUp {
+ tmpDir, err = ioutil.TempDir("/tmp", "runctmpdir")
+ if err != nil {
+ return newSystemErrorWithCause(err, "tmpcopyup: failed to create tmpdir")
+ }
+ defer os.RemoveAll(tmpDir)
+ m.Destination = tmpDir
+ }
+ if err := mountPropagate(m, rootfs, mountLabel); err != nil {
+ return err
+ }
+ if copyUp {
+ if err := fileutils.CopyDirectory(dest, tmpDir); err != nil {
+ errMsg := fmt.Errorf("tmpcopyup: failed to copy %s to %s: %v", dest, tmpDir, err)
+ if err1 := unix.Unmount(tmpDir, unix.MNT_DETACH); err1 != nil {
+ return newSystemErrorWithCausef(err1, "tmpcopyup: %v: failed to unmount", errMsg)
+ }
+ return errMsg
+ }
+ if err := unix.Mount(tmpDir, dest, "", unix.MS_MOVE, ""); err != nil {
+ errMsg := fmt.Errorf("tmpcopyup: failed to move mount %s to %s: %v", tmpDir, dest, err)
+ if err1 := unix.Unmount(tmpDir, unix.MNT_DETACH); err1 != nil {
+ return newSystemErrorWithCausef(err1, "tmpcopyup: %v: failed to unmount", errMsg)
+ }
+ return errMsg
+ }
+ }
+ if stat != nil {
+ if err = os.Chmod(dest, stat.Mode()); err != nil {
+ return err
+ }
+ }
+ return nil
+ case "bind":
+ stat, err := os.Stat(m.Source)
+ if err != nil {
+ // error out if the source of a bind mount does not exist as we will be
+ // unable to bind anything to it.
+ return err
+ }
+ // ensure that the destination of the bind mount is resolved of symlinks at mount time because
+ // any previous mounts can invalidate the next mount's destination.
+ // this can happen when a user specifies mounts within other mounts to cause breakouts or other
+ // evil stuff to try to escape the container's rootfs.
+ if dest, err = symlink.FollowSymlinkInScope(dest, rootfs); err != nil {
+ return err
+ }
+ if err := checkMountDestination(rootfs, dest); err != nil {
+ return err
+ }
+ // update the mount with the correct dest after symlinks are resolved.
+ m.Destination = dest
+ if err := createIfNotExists(dest, stat.IsDir()); err != nil {
+ return err
+ }
+ if err := mountPropagate(m, rootfs, mountLabel); err != nil {
+ return err
+ }
+ // bind mount won't change mount options, we need remount to make mount options effective.
+ // first check that we have non-default options required before attempting a remount
+ if m.Flags&^(unix.MS_REC|unix.MS_REMOUNT|unix.MS_BIND) != 0 {
+ // only remount if unique mount options are set
+ if err := remount(m, rootfs); err != nil {
+ return err
+ }
+ }
+
+ if m.Relabel != "" {
+ if err := label.Validate(m.Relabel); err != nil {
+ return err
+ }
+ shared := label.IsShared(m.Relabel)
+ if err := label.Relabel(m.Source, mountLabel, shared); err != nil {
+ return err
+ }
+ }
+ case "cgroup":
+ binds, err := getCgroupMounts(m)
+ if err != nil {
+ return err
+ }
+ var merged []string
+ for _, b := range binds {
+ ss := filepath.Base(b.Destination)
+ if strings.Contains(ss, ",") {
+ merged = append(merged, ss)
+ }
+ }
+ tmpfs := &configs.Mount{
+ Source: "tmpfs",
+ Device: "tmpfs",
+ Destination: m.Destination,
+ Flags: defaultMountFlags,
+ Data: "mode=755",
+ PropagationFlags: m.PropagationFlags,
+ }
+ if err := mountToRootfs(tmpfs, rootfs, mountLabel); err != nil {
+ return err
+ }
+ for _, b := range binds {
+ if err := mountToRootfs(b, rootfs, mountLabel); err != nil {
+ return err
+ }
+ }
+ for _, mc := range merged {
+ for _, ss := range strings.Split(mc, ",") {
+ // symlink(2) is very dumb, it will just shove the path into
+ // the link and doesn't do any checks or relative path
+ // conversion. Also, don't error out if the cgroup already exists.
+ if err := os.Symlink(mc, filepath.Join(rootfs, m.Destination, ss)); err != nil && !os.IsExist(err) {
+ return err
+ }
+ }
+ }
+ if m.Flags&unix.MS_RDONLY != 0 {
+ // remount cgroup root as readonly
+ mcgrouproot := &configs.Mount{
+ Source: m.Destination,
+ Device: "bind",
+ Destination: m.Destination,
+ Flags: defaultMountFlags | unix.MS_RDONLY | unix.MS_BIND,
+ }
+ if err := remount(mcgrouproot, rootfs); err != nil {
+ return err
+ }
+ }
+ default:
+ // ensure that the destination of the mount is resolved of symlinks at mount time because
+ // any previous mounts can invalidate the next mount's destination.
+ // this can happen when a user specifies mounts within other mounts to cause breakouts or other
+ // evil stuff to try to escape the container's rootfs.
+ var err error
+ if dest, err = symlink.FollowSymlinkInScope(dest, rootfs); err != nil {
+ return err
+ }
+ if err := checkMountDestination(rootfs, dest); err != nil {
+ return err
+ }
+ // update the mount with the correct dest after symlinks are resolved.
+ m.Destination = dest
+ if err := os.MkdirAll(dest, 0755); err != nil {
+ return err
+ }
+ return mountPropagate(m, rootfs, mountLabel)
+ }
+ return nil
+}
+
+func getCgroupMounts(m *configs.Mount) ([]*configs.Mount, error) {
+ mounts, err := cgroups.GetCgroupMounts(false)
+ if err != nil {
+ return nil, err
+ }
+
+ cgroupPaths, err := cgroups.ParseCgroupFile("/proc/self/cgroup")
+ if err != nil {
+ return nil, err
+ }
+
+ var binds []*configs.Mount
+
+ for _, mm := range mounts {
+ dir, err := mm.GetOwnCgroup(cgroupPaths)
+ if err != nil {
+ return nil, err
+ }
+ relDir, err := filepath.Rel(mm.Root, dir)
+ if err != nil {
+ return nil, err
+ }
+ binds = append(binds, &configs.Mount{
+ Device: "bind",
+ Source: filepath.Join(mm.Mountpoint, relDir),
+ Destination: filepath.Join(m.Destination, filepath.Base(mm.Mountpoint)),
+ Flags: unix.MS_BIND | unix.MS_REC | m.Flags,
+ PropagationFlags: m.PropagationFlags,
+ })
+ }
+
+ return binds, nil
+}
+
+// checkMountDestination checks to ensure that the mount destination is not over the top of /proc.
+// dest is required to be an abs path and have any symlinks resolved before calling this function.
+func checkMountDestination(rootfs, dest string) error {
+ invalidDestinations := []string{
+ "/proc",
+ }
+ // White list, it should be sub directories of invalid destinations
+ validDestinations := []string{
+ // These entries can be bind mounted by files emulated by fuse,
+ // so commands like top, free displays stats in container.
+ "/proc/cpuinfo",
+ "/proc/diskstats",
+ "/proc/meminfo",
+ "/proc/stat",
+ "/proc/swaps",
+ "/proc/uptime",
+ "/proc/net/dev",
+ }
+ for _, valid := range validDestinations {
+ path, err := filepath.Rel(filepath.Join(rootfs, valid), dest)
+ if err != nil {
+ return err
+ }
+ if path == "." {
+ return nil
+ }
+ }
+ for _, invalid := range invalidDestinations {
+ path, err := filepath.Rel(filepath.Join(rootfs, invalid), dest)
+ if err != nil {
+ return err
+ }
+ if path == "." || !strings.HasPrefix(path, "..") {
+ return fmt.Errorf("%q cannot be mounted because it is located inside %q", dest, invalid)
+ }
+ }
+ return nil
+}
+
+func setupDevSymlinks(rootfs string) error {
+ var links = [][2]string{
+ {"/proc/self/fd", "/dev/fd"},
+ {"/proc/self/fd/0", "/dev/stdin"},
+ {"/proc/self/fd/1", "/dev/stdout"},
+ {"/proc/self/fd/2", "/dev/stderr"},
+ }
+ // kcore support can be toggled with CONFIG_PROC_KCORE; only create a symlink
+ // in /dev if it exists in /proc.
+ if _, err := os.Stat("/proc/kcore"); err == nil {
+ links = append(links, [2]string{"/proc/kcore", "/dev/core"})
+ }
+ for _, link := range links {
+ var (
+ src = link[0]
+ dst = filepath.Join(rootfs, link[1])
+ )
+ if err := os.Symlink(src, dst); err != nil && !os.IsExist(err) {
+ return fmt.Errorf("symlink %s %s %s", src, dst, err)
+ }
+ }
+ return nil
+}
+
+// If stdin, stdout, and/or stderr are pointing to `/dev/null` in the parent's rootfs
+// this method will make them point to `/dev/null` in this container's rootfs. This
+// needs to be called after we chroot/pivot into the container's rootfs so that any
+// symlinks are resolved locally.
+func reOpenDevNull() error {
+ var stat, devNullStat unix.Stat_t
+ file, err := os.OpenFile("/dev/null", os.O_RDWR, 0)
+ if err != nil {
+ return fmt.Errorf("Failed to open /dev/null - %s", err)
+ }
+ defer file.Close()
+ if err := unix.Fstat(int(file.Fd()), &devNullStat); err != nil {
+ return err
+ }
+ for fd := 0; fd < 3; fd++ {
+ if err := unix.Fstat(fd, &stat); err != nil {
+ return err
+ }
+ if stat.Rdev == devNullStat.Rdev {
+ // Close and re-open the fd.
+ if err := unix.Dup3(int(file.Fd()), fd, 0); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// Create the device nodes in the container.
+func createDevices(config *configs.Config) error {
+ useBindMount := system.RunningInUserNS() || config.Namespaces.Contains(configs.NEWUSER)
+ oldMask := unix.Umask(0000)
+ for _, node := range config.Devices {
+ // containers running in a user namespace are not allowed to mknod
+ // devices so we can just bind mount it from the host.
+ if err := createDeviceNode(config.Rootfs, node, useBindMount); err != nil {
+ unix.Umask(oldMask)
+ return err
+ }
+ }
+ unix.Umask(oldMask)
+ return nil
+}
+
+func bindMountDeviceNode(dest string, node *configs.Device) error {
+ f, err := os.Create(dest)
+ if err != nil && !os.IsExist(err) {
+ return err
+ }
+ if f != nil {
+ f.Close()
+ }
+ return unix.Mount(node.Path, dest, "bind", unix.MS_BIND, "")
+}
+
+// Creates the device node in the rootfs of the container.
+func createDeviceNode(rootfs string, node *configs.Device, bind bool) error {
+ dest := filepath.Join(rootfs, node.Path)
+ if err := os.MkdirAll(filepath.Dir(dest), 0755); err != nil {
+ return err
+ }
+
+ if bind {
+ return bindMountDeviceNode(dest, node)
+ }
+ if err := mknodDevice(dest, node); err != nil {
+ if os.IsExist(err) {
+ return nil
+ } else if os.IsPermission(err) {
+ return bindMountDeviceNode(dest, node)
+ }
+ return err
+ }
+ return nil
+}
+
+func mknodDevice(dest string, node *configs.Device) error {
+ fileMode := node.FileMode
+ switch node.Type {
+ case 'c', 'u':
+ fileMode |= unix.S_IFCHR
+ case 'b':
+ fileMode |= unix.S_IFBLK
+ case 'p':
+ fileMode |= unix.S_IFIFO
+ default:
+ return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path)
+ }
+ if err := unix.Mknod(dest, uint32(fileMode), node.Mkdev()); err != nil {
+ return err
+ }
+ return unix.Chown(dest, int(node.Uid), int(node.Gid))
+}
+
+func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info {
+ for _, m := range mountinfo {
+ if m.Mountpoint == dir {
+ return m
+ }
+ }
+ return nil
+}
+
+// Get the parent mount point of directory passed in as argument. Also return
+// optional fields.
+func getParentMount(rootfs string) (string, string, error) {
+ var path string
+
+ mountinfos, err := mount.GetMounts()
+ if err != nil {
+ return "", "", err
+ }
+
+ mountinfo := getMountInfo(mountinfos, rootfs)
+ if mountinfo != nil {
+ return rootfs, mountinfo.Optional, nil
+ }
+
+ path = rootfs
+ for {
+ path = filepath.Dir(path)
+
+ mountinfo = getMountInfo(mountinfos, path)
+ if mountinfo != nil {
+ return path, mountinfo.Optional, nil
+ }
+
+ if path == "/" {
+ break
+ }
+ }
+
+ // If we are here, we did not find parent mount. Something is wrong.
+ return "", "", fmt.Errorf("Could not find parent mount of %s", rootfs)
+}
+
+// Make parent mount private if it was shared
+func rootfsParentMountPrivate(rootfs string) error {
+ sharedMount := false
+
+ parentMount, optionalOpts, err := getParentMount(rootfs)
+ if err != nil {
+ return err
+ }
+
+ optsSplit := strings.Split(optionalOpts, " ")
+ for _, opt := range optsSplit {
+ if strings.HasPrefix(opt, "shared:") {
+ sharedMount = true
+ break
+ }
+ }
+
+ // Make parent mount PRIVATE if it was shared. It is needed for two
+ // reasons. First of all pivot_root() will fail if parent mount is
+ // shared. Secondly when we bind mount rootfs it will propagate to
+ // parent namespace and we don't want that to happen.
+ if sharedMount {
+ return unix.Mount("", parentMount, "", unix.MS_PRIVATE, "")
+ }
+
+ return nil
+}
+
+func prepareRoot(config *configs.Config) error {
+ flag := unix.MS_SLAVE | unix.MS_REC
+ if config.RootPropagation != 0 {
+ flag = config.RootPropagation
+ }
+ if err := unix.Mount("", "/", "", uintptr(flag), ""); err != nil {
+ return err
+ }
+
+ // Make parent mount private to make sure following bind mount does
+ // not propagate in other namespaces. Also it will help with kernel
+ // check pass in pivot_root. (IS_SHARED(new_mnt->mnt_parent))
+ if err := rootfsParentMountPrivate(config.Rootfs); err != nil {
+ return err
+ }
+
+ return unix.Mount(config.Rootfs, config.Rootfs, "bind", unix.MS_BIND|unix.MS_REC, "")
+}
+
+func setReadonly() error {
+ return unix.Mount("/", "/", "bind", unix.MS_BIND|unix.MS_REMOUNT|unix.MS_RDONLY|unix.MS_REC, "")
+}
+
+func setupPtmx(config *configs.Config) error {
+ ptmx := filepath.Join(config.Rootfs, "dev/ptmx")
+ if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err := os.Symlink("pts/ptmx", ptmx); err != nil {
+ return fmt.Errorf("symlink dev ptmx %s", err)
+ }
+ return nil
+}
+
+// pivotRoot will call pivot_root such that rootfs becomes the new root
+// filesystem, and everything else is cleaned up.
+func pivotRoot(rootfs string) error {
+ // While the documentation may claim otherwise, pivot_root(".", ".") is
+ // actually valid. What this results in is / being the new root but
+ // /proc/self/cwd being the old root. Since we can play around with the cwd
+ // with pivot_root this allows us to pivot without creating directories in
+ // the rootfs. Shout-outs to the LXC developers for giving us this idea.
+
+ oldroot, err := unix.Open("/", unix.O_DIRECTORY|unix.O_RDONLY, 0)
+ if err != nil {
+ return err
+ }
+ defer unix.Close(oldroot)
+
+ newroot, err := unix.Open(rootfs, unix.O_DIRECTORY|unix.O_RDONLY, 0)
+ if err != nil {
+ return err
+ }
+ defer unix.Close(newroot)
+
+ // Change to the new root so that the pivot_root actually acts on it.
+ if err := unix.Fchdir(newroot); err != nil {
+ return err
+ }
+
+ if err := unix.PivotRoot(".", "."); err != nil {
+ return fmt.Errorf("pivot_root %s", err)
+ }
+
+ // Currently our "." is oldroot (according to the current kernel code).
+ // However, purely for safety, we will fchdir(oldroot) since there isn't
+ // really any guarantee from the kernel what /proc/self/cwd will be after a
+ // pivot_root(2).
+
+ if err := unix.Fchdir(oldroot); err != nil {
+ return err
+ }
+
+ // Make oldroot rprivate to make sure our unmounts don't propagate to the
+ // host (and thus bork the machine).
+ if err := unix.Mount("", ".", "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil {
+ return err
+ }
+ // Preform the unmount. MNT_DETACH allows us to unmount /proc/self/cwd.
+ if err := unix.Unmount(".", unix.MNT_DETACH); err != nil {
+ return err
+ }
+
+ // Switch back to our shiny new root.
+ if err := unix.Chdir("/"); err != nil {
+ return fmt.Errorf("chdir / %s", err)
+ }
+ return nil
+}
+
+func msMoveRoot(rootfs string) error {
+ if err := unix.Mount(rootfs, "/", "", unix.MS_MOVE, ""); err != nil {
+ return err
+ }
+ if err := unix.Chroot("."); err != nil {
+ return err
+ }
+ return unix.Chdir("/")
+}
+
+// createIfNotExists creates a file or a directory only if it does not already exist.
+func createIfNotExists(path string, isDir bool) error {
+ if _, err := os.Stat(path); err != nil {
+ if os.IsNotExist(err) {
+ if isDir {
+ return os.MkdirAll(path, 0755)
+ }
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return err
+ }
+ f, err := os.OpenFile(path, os.O_CREATE, 0755)
+ if err != nil {
+ return err
+ }
+ f.Close()
+ }
+ }
+ return nil
+}
+
+// readonlyPath will make a path read only.
+func readonlyPath(path string) error {
+ if err := unix.Mount(path, path, "", unix.MS_BIND|unix.MS_REC, ""); err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+ return unix.Mount(path, path, "", unix.MS_BIND|unix.MS_REMOUNT|unix.MS_RDONLY|unix.MS_REC, "")
+}
+
+// remountReadonly will remount an existing mount point and ensure that it is read-only.
+func remountReadonly(m *configs.Mount) error {
+ var (
+ dest = m.Destination
+ flags = m.Flags
+ )
+ for i := 0; i < 5; i++ {
+ if err := unix.Mount("", dest, "", uintptr(flags|unix.MS_REMOUNT|unix.MS_RDONLY), ""); err != nil {
+ switch err {
+ case unix.EBUSY:
+ time.Sleep(100 * time.Millisecond)
+ continue
+ default:
+ return err
+ }
+ }
+ return nil
+ }
+ return fmt.Errorf("unable to mount %s as readonly max retries reached", dest)
+}
+
+// maskPath masks the top of the specified path inside a container to avoid
+// security issues from processes reading information from non-namespace aware
+// mounts ( proc/kcore ).
+// For files, maskPath bind mounts /dev/null over the top of the specified path.
+// For directories, maskPath mounts read-only tmpfs over the top of the specified path.
+func maskPath(path string) error {
+ if err := unix.Mount("/dev/null", path, "", unix.MS_BIND, ""); err != nil && !os.IsNotExist(err) {
+ if err == unix.ENOTDIR {
+ return unix.Mount("tmpfs", path, "tmpfs", unix.MS_RDONLY, "")
+ }
+ return err
+ }
+ return nil
+}
+
+// writeSystemProperty writes the value to a path under /proc/sys as determined from the key.
+// For e.g. net.ipv4.ip_forward translated to /proc/sys/net/ipv4/ip_forward.
+func writeSystemProperty(key, value string) error {
+ keyPath := strings.Replace(key, ".", "/", -1)
+ return ioutil.WriteFile(path.Join("/proc/sys", keyPath), []byte(value), 0644)
+}
+
+func remount(m *configs.Mount, rootfs string) error {
+ var (
+ dest = m.Destination
+ )
+ if !strings.HasPrefix(dest, rootfs) {
+ dest = filepath.Join(rootfs, dest)
+ }
+ if err := unix.Mount(m.Source, dest, m.Device, uintptr(m.Flags|unix.MS_REMOUNT), ""); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Do the mount operation followed by additional mounts required to take care
+// of propagation flags.
+func mountPropagate(m *configs.Mount, rootfs string, mountLabel string) error {
+ var (
+ dest = m.Destination
+ data = label.FormatMountLabel(m.Data, mountLabel)
+ flags = m.Flags
+ )
+ if libcontainerUtils.CleanPath(dest) == "/dev" {
+ flags &= ^unix.MS_RDONLY
+ }
+
+ copyUp := m.Extensions&configs.EXT_COPYUP == configs.EXT_COPYUP
+ if !(copyUp || strings.HasPrefix(dest, rootfs)) {
+ dest = filepath.Join(rootfs, dest)
+ }
+
+ if err := unix.Mount(m.Source, dest, m.Device, uintptr(flags), data); err != nil {
+ return err
+ }
+
+ for _, pflag := range m.PropagationFlags {
+ if err := unix.Mount("", dest, "", uintptr(pflag), ""); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/seccomp/config.go b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/config.go
new file mode 100644
index 000000000..ded5a6bbc
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/config.go
@@ -0,0 +1,76 @@
+package seccomp
+
+import (
+ "fmt"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+var operators = map[string]configs.Operator{
+ "SCMP_CMP_NE": configs.NotEqualTo,
+ "SCMP_CMP_LT": configs.LessThan,
+ "SCMP_CMP_LE": configs.LessThanOrEqualTo,
+ "SCMP_CMP_EQ": configs.EqualTo,
+ "SCMP_CMP_GE": configs.GreaterThanOrEqualTo,
+ "SCMP_CMP_GT": configs.GreaterThan,
+ "SCMP_CMP_MASKED_EQ": configs.MaskEqualTo,
+}
+
+var actions = map[string]configs.Action{
+ "SCMP_ACT_KILL": configs.Kill,
+ "SCMP_ACT_ERRNO": configs.Errno,
+ "SCMP_ACT_TRAP": configs.Trap,
+ "SCMP_ACT_ALLOW": configs.Allow,
+ "SCMP_ACT_TRACE": configs.Trace,
+}
+
+var archs = map[string]string{
+ "SCMP_ARCH_X86": "x86",
+ "SCMP_ARCH_X86_64": "amd64",
+ "SCMP_ARCH_X32": "x32",
+ "SCMP_ARCH_ARM": "arm",
+ "SCMP_ARCH_AARCH64": "arm64",
+ "SCMP_ARCH_MIPS": "mips",
+ "SCMP_ARCH_MIPS64": "mips64",
+ "SCMP_ARCH_MIPS64N32": "mips64n32",
+ "SCMP_ARCH_MIPSEL": "mipsel",
+ "SCMP_ARCH_MIPSEL64": "mipsel64",
+ "SCMP_ARCH_MIPSEL64N32": "mipsel64n32",
+ "SCMP_ARCH_PPC": "ppc",
+ "SCMP_ARCH_PPC64": "ppc64",
+ "SCMP_ARCH_PPC64LE": "ppc64le",
+ "SCMP_ARCH_S390": "s390",
+ "SCMP_ARCH_S390X": "s390x",
+}
+
+// ConvertStringToOperator converts a string into a Seccomp comparison operator.
+// Comparison operators use the names they are assigned by Libseccomp's header.
+// Attempting to convert a string that is not a valid operator results in an
+// error.
+func ConvertStringToOperator(in string) (configs.Operator, error) {
+ if op, ok := operators[in]; ok == true {
+ return op, nil
+ }
+ return 0, fmt.Errorf("string %s is not a valid operator for seccomp", in)
+}
+
+// ConvertStringToAction converts a string into a Seccomp rule match action.
+// Actions use the names they are assigned in Libseccomp's header, though some
+// (notable, SCMP_ACT_TRACE) are not available in this implementation and will
+// return errors.
+// Attempting to convert a string that is not a valid action results in an
+// error.
+func ConvertStringToAction(in string) (configs.Action, error) {
+ if act, ok := actions[in]; ok == true {
+ return act, nil
+ }
+ return 0, fmt.Errorf("string %s is not a valid action for seccomp", in)
+}
+
+// ConvertStringToArch converts a string into a Seccomp comparison arch.
+func ConvertStringToArch(in string) (string, error) {
+ if arch, ok := archs[in]; ok == true {
+ return arch, nil
+ }
+ return "", fmt.Errorf("string %s is not a valid arch for seccomp", in)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go
new file mode 100644
index 000000000..2523cbf99
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go
@@ -0,0 +1,227 @@
+// +build linux,cgo,seccomp
+
+package seccomp
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+ libseccomp "github.com/seccomp/libseccomp-golang"
+
+ "golang.org/x/sys/unix"
+)
+
+var (
+ actAllow = libseccomp.ActAllow
+ actTrap = libseccomp.ActTrap
+ actKill = libseccomp.ActKill
+ actTrace = libseccomp.ActTrace.SetReturnCode(int16(unix.EPERM))
+ actErrno = libseccomp.ActErrno.SetReturnCode(int16(unix.EPERM))
+)
+
+// Filters given syscalls in a container, preventing them from being used
+// Started in the container init process, and carried over to all child processes
+// Setns calls, however, require a separate invocation, as they are not children
+// of the init until they join the namespace
+func InitSeccomp(config *configs.Seccomp) error {
+ if config == nil {
+ return fmt.Errorf("cannot initialize Seccomp - nil config passed")
+ }
+
+ defaultAction, err := getAction(config.DefaultAction)
+ if err != nil {
+ return fmt.Errorf("error initializing seccomp - invalid default action")
+ }
+
+ filter, err := libseccomp.NewFilter(defaultAction)
+ if err != nil {
+ return fmt.Errorf("error creating filter: %s", err)
+ }
+
+ // Add extra architectures
+ for _, arch := range config.Architectures {
+ scmpArch, err := libseccomp.GetArchFromString(arch)
+ if err != nil {
+ return err
+ }
+
+ if err := filter.AddArch(scmpArch); err != nil {
+ return err
+ }
+ }
+
+ // Unset no new privs bit
+ if err := filter.SetNoNewPrivsBit(false); err != nil {
+ return fmt.Errorf("error setting no new privileges: %s", err)
+ }
+
+ // Add a rule for each syscall
+ for _, call := range config.Syscalls {
+ if call == nil {
+ return fmt.Errorf("encountered nil syscall while initializing Seccomp")
+ }
+
+ if err = matchCall(filter, call); err != nil {
+ return err
+ }
+ }
+
+ if err = filter.Load(); err != nil {
+ return fmt.Errorf("error loading seccomp filter into kernel: %s", err)
+ }
+
+ return nil
+}
+
+// IsEnabled returns if the kernel has been configured to support seccomp.
+func IsEnabled() bool {
+ // Try to read from /proc/self/status for kernels > 3.8
+ s, err := parseStatusFile("/proc/self/status")
+ if err != nil {
+ // Check if Seccomp is supported, via CONFIG_SECCOMP.
+ if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL {
+ // Make sure the kernel has CONFIG_SECCOMP_FILTER.
+ if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL {
+ return true
+ }
+ }
+ return false
+ }
+ _, ok := s["Seccomp"]
+ return ok
+}
+
+// Convert Libcontainer Action to Libseccomp ScmpAction
+func getAction(act configs.Action) (libseccomp.ScmpAction, error) {
+ switch act {
+ case configs.Kill:
+ return actKill, nil
+ case configs.Errno:
+ return actErrno, nil
+ case configs.Trap:
+ return actTrap, nil
+ case configs.Allow:
+ return actAllow, nil
+ case configs.Trace:
+ return actTrace, nil
+ default:
+ return libseccomp.ActInvalid, fmt.Errorf("invalid action, cannot use in rule")
+ }
+}
+
+// Convert Libcontainer Operator to Libseccomp ScmpCompareOp
+func getOperator(op configs.Operator) (libseccomp.ScmpCompareOp, error) {
+ switch op {
+ case configs.EqualTo:
+ return libseccomp.CompareEqual, nil
+ case configs.NotEqualTo:
+ return libseccomp.CompareNotEqual, nil
+ case configs.GreaterThan:
+ return libseccomp.CompareGreater, nil
+ case configs.GreaterThanOrEqualTo:
+ return libseccomp.CompareGreaterEqual, nil
+ case configs.LessThan:
+ return libseccomp.CompareLess, nil
+ case configs.LessThanOrEqualTo:
+ return libseccomp.CompareLessOrEqual, nil
+ case configs.MaskEqualTo:
+ return libseccomp.CompareMaskedEqual, nil
+ default:
+ return libseccomp.CompareInvalid, fmt.Errorf("invalid operator, cannot use in rule")
+ }
+}
+
+// Convert Libcontainer Arg to Libseccomp ScmpCondition
+func getCondition(arg *configs.Arg) (libseccomp.ScmpCondition, error) {
+ cond := libseccomp.ScmpCondition{}
+
+ if arg == nil {
+ return cond, fmt.Errorf("cannot convert nil to syscall condition")
+ }
+
+ op, err := getOperator(arg.Op)
+ if err != nil {
+ return cond, err
+ }
+
+ return libseccomp.MakeCondition(arg.Index, op, arg.Value, arg.ValueTwo)
+}
+
+// Add a rule to match a single syscall
+func matchCall(filter *libseccomp.ScmpFilter, call *configs.Syscall) error {
+ if call == nil || filter == nil {
+ return fmt.Errorf("cannot use nil as syscall to block")
+ }
+
+ if len(call.Name) == 0 {
+ return fmt.Errorf("empty string is not a valid syscall")
+ }
+
+ // If we can't resolve the syscall, assume it's not supported on this kernel
+ // Ignore it, don't error out
+ callNum, err := libseccomp.GetSyscallFromName(call.Name)
+ if err != nil {
+ return nil
+ }
+
+ // Convert the call's action to the libseccomp equivalent
+ callAct, err := getAction(call.Action)
+ if err != nil {
+ return err
+ }
+
+ // Unconditional match - just add the rule
+ if len(call.Args) == 0 {
+ if err = filter.AddRule(callNum, callAct); err != nil {
+ return err
+ }
+ } else {
+ // Conditional match - convert the per-arg rules into library format
+ conditions := []libseccomp.ScmpCondition{}
+
+ for _, cond := range call.Args {
+ newCond, err := getCondition(cond)
+ if err != nil {
+ return err
+ }
+
+ conditions = append(conditions, newCond)
+ }
+
+ if err = filter.AddRuleConditional(callNum, callAct, conditions); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func parseStatusFile(path string) (map[string]string, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ status := make(map[string]string)
+
+ for s.Scan() {
+ text := s.Text()
+ parts := strings.Split(text, ":")
+
+ if len(parts) <= 1 {
+ continue
+ }
+
+ status[parts[0]] = parts[1]
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ return status, nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_unsupported.go
new file mode 100644
index 000000000..44df1ad4c
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_unsupported.go
@@ -0,0 +1,24 @@
+// +build !linux !cgo !seccomp
+
+package seccomp
+
+import (
+ "errors"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+var ErrSeccompNotEnabled = errors.New("seccomp: config provided but seccomp not supported")
+
+// InitSeccomp does nothing because seccomp is not supported.
+func InitSeccomp(config *configs.Seccomp) error {
+ if config != nil {
+ return ErrSeccompNotEnabled
+ }
+ return nil
+}
+
+// IsEnabled returns false, because it is not supported.
+func IsEnabled() bool {
+ return false
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go
new file mode 100644
index 000000000..c7bdb605a
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go
@@ -0,0 +1,11 @@
+// +build linux,go1.5
+
+package libcontainer
+
+import "syscall"
+
+// Set the GidMappingsEnableSetgroups member to true, so the process's
+// setgroups proc entry wont be set to 'deny' if GidMappings are set
+func enableSetgroups(sys *syscall.SysProcAttr) {
+ sys.GidMappingsEnableSetgroups = true
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go
new file mode 100644
index 000000000..35b84219c
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go
@@ -0,0 +1,65 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/opencontainers/runc/libcontainer/apparmor"
+ "github.com/opencontainers/runc/libcontainer/keys"
+ "github.com/opencontainers/runc/libcontainer/seccomp"
+ "github.com/opencontainers/runc/libcontainer/system"
+ "github.com/opencontainers/selinux/go-selinux/label"
+
+ "golang.org/x/sys/unix"
+)
+
+// linuxSetnsInit performs the container's initialization for running a new process
+// inside an existing container.
+type linuxSetnsInit struct {
+ pipe *os.File
+ consoleSocket *os.File
+ config *initConfig
+}
+
+func (l *linuxSetnsInit) getSessionRingName() string {
+ return fmt.Sprintf("_ses.%s", l.config.ContainerId)
+}
+
+func (l *linuxSetnsInit) Init() error {
+ if !l.config.Config.NoNewKeyring {
+ // do not inherit the parent's session keyring
+ if _, err := keys.JoinSessionKeyring(l.getSessionRingName()); err != nil {
+ return err
+ }
+ }
+ if l.config.CreateConsole {
+ if err := setupConsole(l.consoleSocket, l.config, false); err != nil {
+ return err
+ }
+ if err := system.Setctty(); err != nil {
+ return err
+ }
+ }
+ if l.config.NoNewPrivileges {
+ if err := unix.Prctl(unix.PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil {
+ return err
+ }
+ }
+ if l.config.Config.Seccomp != nil {
+ if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil {
+ return err
+ }
+ }
+ if err := finalizeNamespace(l.config); err != nil {
+ return err
+ }
+ if err := apparmor.ApplyProfile(l.config.AppArmorProfile); err != nil {
+ return err
+ }
+ if err := label.SetProcessLabel(l.config.ProcessLabel); err != nil {
+ return err
+ }
+ return system.Execv(l.config.Args[0], l.config.Args[0:], os.Environ())
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stacktrace/capture.go b/vendor/github.com/opencontainers/runc/libcontainer/stacktrace/capture.go
new file mode 100644
index 000000000..0bbe14950
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/stacktrace/capture.go
@@ -0,0 +1,27 @@
+package stacktrace
+
+import "runtime"
+
+// Capture captures a stacktrace for the current calling go program
+//
+// skip is the number of frames to skip
+func Capture(userSkip int) Stacktrace {
+ var (
+ skip = userSkip + 1 // add one for our own function
+ frames []Frame
+ prevPc uintptr
+ )
+ for i := skip; ; i++ {
+ pc, file, line, ok := runtime.Caller(i)
+ //detect if caller is repeated to avoid loop, gccgo
+ //currently runs into a loop without this check
+ if !ok || pc == prevPc {
+ break
+ }
+ frames = append(frames, NewFrame(pc, file, line))
+ prevPc = pc
+ }
+ return Stacktrace{
+ Frames: frames,
+ }
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stacktrace/frame.go b/vendor/github.com/opencontainers/runc/libcontainer/stacktrace/frame.go
new file mode 100644
index 000000000..0d590d9a5
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/stacktrace/frame.go
@@ -0,0 +1,38 @@
+package stacktrace
+
+import (
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+// NewFrame returns a new stack frame for the provided information
+func NewFrame(pc uintptr, file string, line int) Frame {
+ fn := runtime.FuncForPC(pc)
+ if fn == nil {
+ return Frame{}
+ }
+ pack, name := parseFunctionName(fn.Name())
+ return Frame{
+ Line: line,
+ File: filepath.Base(file),
+ Package: pack,
+ Function: name,
+ }
+}
+
+func parseFunctionName(name string) (string, string) {
+ i := strings.LastIndex(name, ".")
+ if i == -1 {
+ return "", name
+ }
+ return name[:i], name[i+1:]
+}
+
+// Frame contains all the information for a stack frame within a go program
+type Frame struct {
+ File string
+ Function string
+ Package string
+ Line int
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stacktrace/stacktrace.go b/vendor/github.com/opencontainers/runc/libcontainer/stacktrace/stacktrace.go
new file mode 100644
index 000000000..5e8b58d2d
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/stacktrace/stacktrace.go
@@ -0,0 +1,5 @@
+package stacktrace
+
+type Stacktrace struct {
+ Frames []Frame
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go
new file mode 100644
index 000000000..580b3fe45
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go
@@ -0,0 +1,188 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "syscall" //only for Exec
+
+ "github.com/opencontainers/runc/libcontainer/apparmor"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/keys"
+ "github.com/opencontainers/runc/libcontainer/seccomp"
+ "github.com/opencontainers/runc/libcontainer/system"
+ "github.com/opencontainers/selinux/go-selinux/label"
+
+ "golang.org/x/sys/unix"
+)
+
+type linuxStandardInit struct {
+ pipe *os.File
+ consoleSocket *os.File
+ parentPid int
+ stateDirFD int
+ config *initConfig
+}
+
+func (l *linuxStandardInit) getSessionRingParams() (string, uint32, uint32) {
+ var newperms uint32
+
+ if l.config.Config.Namespaces.Contains(configs.NEWUSER) {
+ // with user ns we need 'other' search permissions
+ newperms = 0x8
+ } else {
+ // without user ns we need 'UID' search permissions
+ newperms = 0x80000
+ }
+
+ // create a unique per session container name that we can
+ // join in setns; however, other containers can also join it
+ return fmt.Sprintf("_ses.%s", l.config.ContainerId), 0xffffffff, newperms
+}
+
+func (l *linuxStandardInit) Init() error {
+ if !l.config.Config.NoNewKeyring {
+ ringname, keepperms, newperms := l.getSessionRingParams()
+
+ // do not inherit the parent's session keyring
+ sessKeyId, err := keys.JoinSessionKeyring(ringname)
+ if err != nil {
+ return err
+ }
+ // make session keyring searcheable
+ if err := keys.ModKeyringPerm(sessKeyId, keepperms, newperms); err != nil {
+ return err
+ }
+ }
+
+ if err := setupNetwork(l.config); err != nil {
+ return err
+ }
+ if err := setupRoute(l.config.Config); err != nil {
+ return err
+ }
+
+ label.Init()
+
+ // prepareRootfs() can be executed only for a new mount namespace.
+ if l.config.Config.Namespaces.Contains(configs.NEWNS) {
+ if err := prepareRootfs(l.pipe, l.config.Config); err != nil {
+ return err
+ }
+ }
+
+ // Set up the console. This has to be done *before* we finalize the rootfs,
+ // but *after* we've given the user the chance to set up all of the mounts
+ // they wanted.
+ if l.config.CreateConsole {
+ if err := setupConsole(l.consoleSocket, l.config, true); err != nil {
+ return err
+ }
+ if err := system.Setctty(); err != nil {
+ return err
+ }
+ }
+
+ // Finish the rootfs setup.
+ if l.config.Config.Namespaces.Contains(configs.NEWNS) {
+ if err := finalizeRootfs(l.config.Config); err != nil {
+ return err
+ }
+ }
+
+ if hostname := l.config.Config.Hostname; hostname != "" {
+ if err := unix.Sethostname([]byte(hostname)); err != nil {
+ return err
+ }
+ }
+ if err := apparmor.ApplyProfile(l.config.AppArmorProfile); err != nil {
+ return err
+ }
+ if err := label.SetProcessLabel(l.config.ProcessLabel); err != nil {
+ return err
+ }
+
+ for key, value := range l.config.Config.Sysctl {
+ if err := writeSystemProperty(key, value); err != nil {
+ return err
+ }
+ }
+ for _, path := range l.config.Config.ReadonlyPaths {
+ if err := readonlyPath(path); err != nil {
+ return err
+ }
+ }
+ for _, path := range l.config.Config.MaskPaths {
+ if err := maskPath(path); err != nil {
+ return err
+ }
+ }
+ pdeath, err := system.GetParentDeathSignal()
+ if err != nil {
+ return err
+ }
+ if l.config.NoNewPrivileges {
+ if err := unix.Prctl(unix.PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil {
+ return err
+ }
+ }
+ // Tell our parent that we're ready to Execv. This must be done before the
+ // Seccomp rules have been applied, because we need to be able to read and
+ // write to a socket.
+ if err := syncParentReady(l.pipe); err != nil {
+ return err
+ }
+ // Without NoNewPrivileges seccomp is a privileged operation, so we need to
+ // do this before dropping capabilities; otherwise do it as late as possible
+ // just before execve so as few syscalls take place after it as possible.
+ if l.config.Config.Seccomp != nil && !l.config.NoNewPrivileges {
+ if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil {
+ return err
+ }
+ }
+ if err := finalizeNamespace(l.config); err != nil {
+ return err
+ }
+ // finalizeNamespace can change user/group which clears the parent death
+ // signal, so we restore it here.
+ if err := pdeath.Restore(); err != nil {
+ return err
+ }
+ // compare the parent from the initial start of the init process and make sure that it did not change.
+ // if the parent changes that means it died and we were reparented to something else so we should
+ // just kill ourself and not cause problems for someone else.
+ if unix.Getppid() != l.parentPid {
+ return unix.Kill(unix.Getpid(), unix.SIGKILL)
+ }
+ // check for the arg before waiting to make sure it exists and it is returned
+ // as a create time error.
+ name, err := exec.LookPath(l.config.Args[0])
+ if err != nil {
+ return err
+ }
+ // close the pipe to signal that we have completed our init.
+ l.pipe.Close()
+ // wait for the fifo to be opened on the other side before
+ // exec'ing the users process.
+ fd, err := unix.Openat(l.stateDirFD, execFifoFilename, os.O_WRONLY|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return newSystemErrorWithCause(err, "openat exec fifo")
+ }
+ if _, err := unix.Write(fd, []byte("0")); err != nil {
+ return newSystemErrorWithCause(err, "write 0 exec fifo")
+ }
+ if l.config.Config.Seccomp != nil && l.config.NoNewPrivileges {
+ if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil {
+ return newSystemErrorWithCause(err, "init seccomp")
+ }
+ }
+ // close the statedir fd before exec because the kernel resets dumpable in the wrong order
+ // https://github.com/torvalds/linux/blob/v4.9/fs/exec.c#L1290-L1318
+ unix.Close(l.stateDirFD)
+ if err := syscall.Exec(name, l.config.Args[0:], os.Environ()); err != nil {
+ return newSystemErrorWithCause(err, "exec user process")
+ }
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go
new file mode 100644
index 000000000..44fa6b43a
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go
@@ -0,0 +1,248 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/utils"
+
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+func newStateTransitionError(from, to containerState) error {
+ return &stateTransitionError{
+ From: from.status().String(),
+ To: to.status().String(),
+ }
+}
+
+// stateTransitionError is returned when an invalid state transition happens from one
+// state to another.
+type stateTransitionError struct {
+ From string
+ To string
+}
+
+func (s *stateTransitionError) Error() string {
+ return fmt.Sprintf("invalid state transition from %s to %s", s.From, s.To)
+}
+
+type containerState interface {
+ transition(containerState) error
+ destroy() error
+ status() Status
+}
+
+func destroy(c *linuxContainer) error {
+ if !c.config.Namespaces.Contains(configs.NEWPID) {
+ if err := signalAllProcesses(c.cgroupManager, unix.SIGKILL); err != nil {
+ logrus.Warn(err)
+ }
+ }
+ err := c.cgroupManager.Destroy()
+ if rerr := os.RemoveAll(c.root); err == nil {
+ err = rerr
+ }
+ c.initProcess = nil
+ if herr := runPoststopHooks(c); err == nil {
+ err = herr
+ }
+ c.state = &stoppedState{c: c}
+ return err
+}
+
+func runPoststopHooks(c *linuxContainer) error {
+ if c.config.Hooks != nil {
+ s := configs.HookState{
+ Version: c.config.Version,
+ ID: c.id,
+ Bundle: utils.SearchLabels(c.config.Labels, "bundle"),
+ }
+ for _, hook := range c.config.Hooks.Poststop {
+ if err := hook.Run(s); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// stoppedState represents a container is a stopped/destroyed state.
+type stoppedState struct {
+ c *linuxContainer
+}
+
+func (b *stoppedState) status() Status {
+ return Stopped
+}
+
+func (b *stoppedState) transition(s containerState) error {
+ switch s.(type) {
+ case *runningState, *restoredState:
+ b.c.state = s
+ return nil
+ case *stoppedState:
+ return nil
+ }
+ return newStateTransitionError(b, s)
+}
+
+func (b *stoppedState) destroy() error {
+ return destroy(b.c)
+}
+
+// runningState represents a container that is currently running.
+type runningState struct {
+ c *linuxContainer
+}
+
+func (r *runningState) status() Status {
+ return Running
+}
+
+func (r *runningState) transition(s containerState) error {
+ switch s.(type) {
+ case *stoppedState:
+ t, err := r.c.runType()
+ if err != nil {
+ return err
+ }
+ if t == Running {
+ return newGenericError(fmt.Errorf("container still running"), ContainerNotStopped)
+ }
+ r.c.state = s
+ return nil
+ case *pausedState:
+ r.c.state = s
+ return nil
+ case *runningState:
+ return nil
+ }
+ return newStateTransitionError(r, s)
+}
+
+func (r *runningState) destroy() error {
+ t, err := r.c.runType()
+ if err != nil {
+ return err
+ }
+ if t == Running {
+ return newGenericError(fmt.Errorf("container is not destroyed"), ContainerNotStopped)
+ }
+ return destroy(r.c)
+}
+
+type createdState struct {
+ c *linuxContainer
+}
+
+func (i *createdState) status() Status {
+ return Created
+}
+
+func (i *createdState) transition(s containerState) error {
+ switch s.(type) {
+ case *runningState, *pausedState, *stoppedState:
+ i.c.state = s
+ return nil
+ case *createdState:
+ return nil
+ }
+ return newStateTransitionError(i, s)
+}
+
+func (i *createdState) destroy() error {
+ i.c.initProcess.signal(unix.SIGKILL)
+ return destroy(i.c)
+}
+
+// pausedState represents a container that is currently pause. It cannot be destroyed in a
+// paused state and must transition back to running first.
+type pausedState struct {
+ c *linuxContainer
+}
+
+func (p *pausedState) status() Status {
+ return Paused
+}
+
+func (p *pausedState) transition(s containerState) error {
+ switch s.(type) {
+ case *runningState, *stoppedState:
+ p.c.state = s
+ return nil
+ case *pausedState:
+ return nil
+ }
+ return newStateTransitionError(p, s)
+}
+
+func (p *pausedState) destroy() error {
+ t, err := p.c.runType()
+ if err != nil {
+ return err
+ }
+ if t != Running && t != Created {
+ if err := p.c.cgroupManager.Freeze(configs.Thawed); err != nil {
+ return err
+ }
+ return destroy(p.c)
+ }
+ return newGenericError(fmt.Errorf("container is paused"), ContainerPaused)
+}
+
+// restoredState is the same as the running state but also has associated checkpoint
+// information that maybe need destroyed when the container is stopped and destroy is called.
+type restoredState struct {
+ imageDir string
+ c *linuxContainer
+}
+
+func (r *restoredState) status() Status {
+ return Running
+}
+
+func (r *restoredState) transition(s containerState) error {
+ switch s.(type) {
+ case *stoppedState, *runningState:
+ return nil
+ }
+ return newStateTransitionError(r, s)
+}
+
+func (r *restoredState) destroy() error {
+ if _, err := os.Stat(filepath.Join(r.c.root, "checkpoint")); err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ }
+ }
+ return destroy(r.c)
+}
+
+// loadedState is used whenever a container is restored, loaded, or setting additional
+// processes inside and it should not be destroyed when it is exiting.
+type loadedState struct {
+ c *linuxContainer
+ s Status
+}
+
+func (n *loadedState) status() Status {
+ return n.s
+}
+
+func (n *loadedState) transition(s containerState) error {
+ n.c.state = s
+ return nil
+}
+
+func (n *loadedState) destroy() error {
+ if err := n.c.refreshState(); err != nil {
+ return err
+ }
+ return n.c.state.destroy()
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats.go b/vendor/github.com/opencontainers/runc/libcontainer/stats.go
new file mode 100644
index 000000000..303e4b94c
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/stats.go
@@ -0,0 +1,15 @@
+package libcontainer
+
+type NetworkInterface struct {
+ // Name is the name of the network interface.
+ Name string
+
+ RxBytes uint64
+ RxPackets uint64
+ RxErrors uint64
+ RxDropped uint64
+ TxBytes uint64
+ TxPackets uint64
+ TxErrors uint64
+ TxDropped uint64
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go
new file mode 100644
index 000000000..f8d1d689c
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go
@@ -0,0 +1,5 @@
+package libcontainer
+
+type Stats struct {
+ Interfaces []*NetworkInterface
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go
new file mode 100644
index 000000000..c629dc67d
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go
@@ -0,0 +1,8 @@
+package libcontainer
+
+import "github.com/opencontainers/runc/libcontainer/cgroups"
+
+type Stats struct {
+ Interfaces []*NetworkInterface
+ CgroupStats *cgroups.Stats
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go
new file mode 100644
index 000000000..da78c1c2e
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go
@@ -0,0 +1,7 @@
+package libcontainer
+
+// Solaris - TODO
+
+type Stats struct {
+ Interfaces []*NetworkInterface
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go
new file mode 100644
index 000000000..f8d1d689c
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go
@@ -0,0 +1,5 @@
+package libcontainer
+
+type Stats struct {
+ Interfaces []*NetworkInterface
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/sync.go b/vendor/github.com/opencontainers/runc/libcontainer/sync.go
new file mode 100644
index 000000000..cf7b45bc3
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/sync.go
@@ -0,0 +1,107 @@
+package libcontainer
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/opencontainers/runc/libcontainer/utils"
+)
+
+type syncType string
+
+// Constants that are used for synchronisation between the parent and child
+// during container setup. They come in pairs (with procError being a generic
+// response which is followed by a &genericError).
+//
+// [ child ] <-> [ parent ]
+//
+// procHooks --> [run hooks]
+// <-- procResume
+//
+// procConsole -->
+// <-- procConsoleReq
+// [send(fd)] --> [recv(fd)]
+// <-- procConsoleAck
+//
+// procReady --> [final setup]
+// <-- procRun
+const (
+ procError syncType = "procError"
+ procReady syncType = "procReady"
+ procRun syncType = "procRun"
+ procHooks syncType = "procHooks"
+ procResume syncType = "procResume"
+)
+
+type syncT struct {
+ Type syncType `json:"type"`
+}
+
+// writeSync is used to write to a synchronisation pipe. An error is returned
+// if there was a problem writing the payload.
+func writeSync(pipe io.Writer, sync syncType) error {
+ if err := utils.WriteJSON(pipe, syncT{sync}); err != nil {
+ return err
+ }
+ return nil
+}
+
+// readSync is used to read from a synchronisation pipe. An error is returned
+// if we got a genericError, the pipe was closed, or we got an unexpected flag.
+func readSync(pipe io.Reader, expected syncType) error {
+ var procSync syncT
+ if err := json.NewDecoder(pipe).Decode(&procSync); err != nil {
+ if err == io.EOF {
+ return fmt.Errorf("parent closed synchronisation channel")
+ }
+
+ if procSync.Type == procError {
+ var ierr genericError
+
+ if err := json.NewDecoder(pipe).Decode(&ierr); err != nil {
+ return fmt.Errorf("failed reading error from parent: %v", err)
+ }
+
+ return &ierr
+ }
+
+ if procSync.Type != expected {
+ return fmt.Errorf("invalid synchronisation flag from parent")
+ }
+ }
+ return nil
+}
+
+// parseSync runs the given callback function on each syncT received from the
+// child. It will return once io.EOF is returned from the given pipe.
+func parseSync(pipe io.Reader, fn func(*syncT) error) error {
+ dec := json.NewDecoder(pipe)
+ for {
+ var sync syncT
+ if err := dec.Decode(&sync); err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+
+ // We handle this case outside fn for cleanliness reasons.
+ var ierr *genericError
+ if sync.Type == procError {
+ if err := dec.Decode(&ierr); err != nil && err != io.EOF {
+ return newSystemErrorWithCause(err, "decoding proc error from init")
+ }
+ if ierr != nil {
+ return ierr
+ }
+ // Programmer error.
+ panic("No error following JSON procError payload.")
+ }
+
+ if err := fn(&sync); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go
new file mode 100644
index 000000000..4837085a7
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go
@@ -0,0 +1,136 @@
+// +build linux
+
+package system
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "os/exec"
+ "syscall" // only for exec
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// If arg2 is nonzero, set the "child subreaper" attribute of the
+// calling process; if arg2 is zero, unset the attribute. When a
+// process is marked as a child subreaper, all of the children
+// that it creates, and their descendants, will be marked as
+// having a subreaper. In effect, a subreaper fulfills the role
+// of init(1) for its descendant processes. Upon termination of
+// a process that is orphaned (i.e., its immediate parent has
+// already terminated) and marked as having a subreaper, the
+// nearest still living ancestor subreaper will receive a SIGCHLD
+// signal and be able to wait(2) on the process to discover its
+// termination status.
+const PR_SET_CHILD_SUBREAPER = 36
+
+type ParentDeathSignal int
+
+func (p ParentDeathSignal) Restore() error {
+ if p == 0 {
+ return nil
+ }
+ current, err := GetParentDeathSignal()
+ if err != nil {
+ return err
+ }
+ if p == current {
+ return nil
+ }
+ return p.Set()
+}
+
+func (p ParentDeathSignal) Set() error {
+ return SetParentDeathSignal(uintptr(p))
+}
+
+func Execv(cmd string, args []string, env []string) error {
+ name, err := exec.LookPath(cmd)
+ if err != nil {
+ return err
+ }
+
+ return syscall.Exec(name, args, env)
+}
+
+func Prlimit(pid, resource int, limit unix.Rlimit) error {
+ _, _, err := unix.RawSyscall6(unix.SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(&limit)), uintptr(unsafe.Pointer(&limit)), 0, 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
+
+func SetParentDeathSignal(sig uintptr) error {
+ if err := unix.Prctl(unix.PR_SET_PDEATHSIG, sig, 0, 0, 0); err != nil {
+ return err
+ }
+ return nil
+}
+
+func GetParentDeathSignal() (ParentDeathSignal, error) {
+ var sig int
+ if err := unix.Prctl(unix.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0, 0, 0); err != nil {
+ return -1, err
+ }
+ return ParentDeathSignal(sig), nil
+}
+
+func SetKeepCaps() error {
+ if err := unix.Prctl(unix.PR_SET_KEEPCAPS, 1, 0, 0, 0); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func ClearKeepCaps() error {
+ if err := unix.Prctl(unix.PR_SET_KEEPCAPS, 0, 0, 0, 0); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func Setctty() error {
+ if err := unix.IoctlSetInt(0, unix.TIOCSCTTY, 0); err != nil {
+ return err
+ }
+ return nil
+}
+
+// RunningInUserNS detects whether we are currently running in a user namespace.
+// Copied from github.com/lxc/lxd/shared/util.go
+func RunningInUserNS() bool {
+ file, err := os.Open("/proc/self/uid_map")
+ if err != nil {
+ // This kernel-provided file only exists if user namespaces are supported
+ return false
+ }
+ defer file.Close()
+
+ buf := bufio.NewReader(file)
+ l, _, err := buf.ReadLine()
+ if err != nil {
+ return false
+ }
+
+ line := string(l)
+ var a, b, c int64
+ fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
+ /*
+ * We assume we are in the initial user namespace if we have a full
+ * range - 4294967295 uids starting at uid 0.
+ */
+ if a == 0 && b == 0 && c == 4294967295 {
+ return false
+ }
+ return true
+}
+
+// SetSubreaper sets the value i as the subreaper setting for the calling process
+func SetSubreaper(i int) error {
+ return unix.Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go b/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go
new file mode 100644
index 000000000..79232a437
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go
@@ -0,0 +1,113 @@
+package system
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+// State is the status of a process.
+type State rune
+
+const ( // Only values for Linux 3.14 and later are listed here
+ Dead State = 'X'
+ DiskSleep State = 'D'
+ Running State = 'R'
+ Sleeping State = 'S'
+ Stopped State = 'T'
+ TracingStop State = 't'
+ Zombie State = 'Z'
+)
+
+// String forms of the state from proc(5)'s documentation for
+// /proc/[pid]/status' "State" field.
+func (s State) String() string {
+ switch s {
+ case Dead:
+ return "dead"
+ case DiskSleep:
+ return "disk sleep"
+ case Running:
+ return "running"
+ case Sleeping:
+ return "sleeping"
+ case Stopped:
+ return "stopped"
+ case TracingStop:
+ return "tracing stop"
+ case Zombie:
+ return "zombie"
+ default:
+ return fmt.Sprintf("unknown (%c)", s)
+ }
+}
+
+// Stat_t represents the information from /proc/[pid]/stat, as
+// described in proc(5) with names based on the /proc/[pid]/status
+// fields.
+type Stat_t struct {
+ // PID is the process ID.
+ PID uint
+
+ // Name is the command run by the process.
+ Name string
+
+ // State is the state of the process.
+ State State
+
+ // StartTime is the number of clock ticks after system boot (since
+ // Linux 2.6).
+ StartTime uint64
+}
+
+// Stat returns a Stat_t instance for the specified process.
+func Stat(pid int) (stat Stat_t, err error) {
+ bytes, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat"))
+ if err != nil {
+ return stat, err
+ }
+ return parseStat(string(bytes))
+}
+
+// GetProcessStartTime is deprecated. Use Stat(pid) and
+// Stat_t.StartTime instead.
+func GetProcessStartTime(pid int) (string, error) {
+ stat, err := Stat(pid)
+ if err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("%d", stat.StartTime), nil
+}
+
+func parseStat(data string) (stat Stat_t, err error) {
+ // From proc(5), field 2 could contain space and is inside `(` and `)`.
+ // The following is an example:
+ // 89653 (gunicorn: maste) S 89630 89653 89653 0 -1 4194560 29689 28896 0 3 146 32 76 19 20 0 1 0 2971844 52965376 3920 18446744073709551615 1 1 0 0 0 0 0 16781312 137447943 0 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0
+ i := strings.LastIndex(data, ")")
+ if i <= 2 || i >= len(data)-1 {
+ return stat, fmt.Errorf("invalid stat data: %q", data)
+ }
+
+ parts := strings.SplitN(data[:i], "(", 2)
+ if len(parts) != 2 {
+ return stat, fmt.Errorf("invalid stat data: %q", data)
+ }
+
+ stat.Name = parts[1]
+ _, err = fmt.Sscanf(parts[0], "%d", &stat.PID)
+ if err != nil {
+ return stat, err
+ }
+
+ // parts indexes should be offset by 3 from the field number given
+ // proc(5), because parts is zero-indexed and we've removed fields
+ // one (PID) and two (Name) in the paren-split.
+ parts = strings.Split(data[i+2:], " ")
+ var state int
+ fmt.Sscanf(parts[3-3], "%c", &state)
+ stat.State = State(state)
+ fmt.Sscanf(parts[22-3], "%d", &stat.StartTime)
+ return stat, nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go
new file mode 100644
index 000000000..3f7235ed1
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go
@@ -0,0 +1,25 @@
+// +build linux,386
+
+package system
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// Setuid sets the uid of the calling thread to the specified uid.
+func Setuid(uid int) (err error) {
+ _, _, e1 := unix.RawSyscall(unix.SYS_SETUID32, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// Setgid sets the gid of the calling thread to the specified gid.
+func Setgid(gid int) (err error) {
+ _, _, e1 := unix.RawSyscall(unix.SYS_SETGID32, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go
new file mode 100644
index 000000000..d7891a2ff
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go
@@ -0,0 +1,25 @@
+// +build linux,arm64 linux,amd64 linux,ppc linux,ppc64 linux,ppc64le linux,s390x
+
+package system
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// Setuid sets the uid of the calling thread to the specified uid.
+func Setuid(uid int) (err error) {
+ _, _, e1 := unix.RawSyscall(unix.SYS_SETUID, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// Setgid sets the gid of the calling thread to the specified gid.
+func Setgid(gid int) (err error) {
+ _, _, e1 := unix.RawSyscall(unix.SYS_SETGID, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go
new file mode 100644
index 000000000..31ff3deb1
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go
@@ -0,0 +1,25 @@
+// +build linux,arm
+
+package system
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// Setuid sets the uid of the calling thread to the specified uid.
+func Setuid(uid int) (err error) {
+ _, _, e1 := unix.RawSyscall(unix.SYS_SETUID32, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// Setgid sets the gid of the calling thread to the specified gid.
+func Setgid(gid int) (err error) {
+ _, _, e1 := unix.RawSyscall(unix.SYS_SETGID32, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go
new file mode 100644
index 000000000..b3a07cba3
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go
@@ -0,0 +1,12 @@
+// +build cgo,linux cgo,freebsd
+
+package system
+
+/*
+#include <unistd.h>
+*/
+import "C"
+
+func GetClockTicks() int {
+ return int(C.sysconf(C._SC_CLK_TCK))
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go
new file mode 100644
index 000000000..d93b5d5fd
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go
@@ -0,0 +1,15 @@
+// +build !cgo windows
+
+package system
+
+func GetClockTicks() int {
+ // TODO figure out a better alternative for platforms where we're missing cgo
+ //
+ // TODO Windows. This could be implemented using Win32 QueryPerformanceFrequency().
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/ms644905(v=vs.85).aspx
+ //
+ // An example of its usage can be found here.
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/dn553408(v=vs.85).aspx
+
+ return 100
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go
new file mode 100644
index 000000000..e7cfd62b2
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go
@@ -0,0 +1,9 @@
+// +build !linux
+
+package system
+
+// RunningInUserNS is a stub for non-Linux systems
+// Always returns false
+func RunningInUserNS() bool {
+ return false
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go
new file mode 100644
index 000000000..a6823fc99
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go
@@ -0,0 +1,35 @@
+package system
+
+import "golang.org/x/sys/unix"
+
+// Returns a []byte slice if the xattr is set and nil otherwise
+// Requires path and its attribute as arguments
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ var sz int
+ // Start with a 128 length byte array
+ dest := make([]byte, 128)
+ sz, errno := unix.Lgetxattr(path, attr, dest)
+
+ switch {
+ case errno == unix.ENODATA:
+ return nil, errno
+ case errno == unix.ENOTSUP:
+ return nil, errno
+ case errno == unix.ERANGE:
+ // 128 byte array might just not be good enough,
+ // A dummy buffer is used to get the real size
+ // of the xattrs on disk
+ sz, errno = unix.Lgetxattr(path, attr, []byte{})
+ if errno != nil {
+ return nil, errno
+ }
+ dest = make([]byte, sz)
+ sz, errno = unix.Lgetxattr(path, attr, dest)
+ if errno != nil {
+ return nil, errno
+ }
+ case errno != nil:
+ return nil, errno
+ }
+ return dest[:sz], nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go
new file mode 100644
index 000000000..95e9eebc0
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go
@@ -0,0 +1,95 @@
+package user
+
+import (
+ "errors"
+)
+
+var (
+ // The current operating system does not provide the required data for user lookups.
+ ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data")
+ // No matching entries found in file.
+ ErrNoPasswdEntries = errors.New("no matching entries in passwd file")
+ ErrNoGroupEntries = errors.New("no matching entries in group file")
+)
+
+func lookupUser(filter func(u User) bool) (User, error) {
+ // Get operating system-specific passwd reader-closer.
+ passwd, err := GetPasswd()
+ if err != nil {
+ return User{}, err
+ }
+ defer passwd.Close()
+
+ // Get the users.
+ users, err := ParsePasswdFilter(passwd, filter)
+ if err != nil {
+ return User{}, err
+ }
+
+ // No user entries found.
+ if len(users) == 0 {
+ return User{}, ErrNoPasswdEntries
+ }
+
+ // Assume the first entry is the "correct" one.
+ return users[0], nil
+}
+
+// LookupUser looks up a user by their username in /etc/passwd. If the user
+// cannot be found (or there is no /etc/passwd file on the filesystem), then
+// LookupUser returns an error.
+func LookupUser(username string) (User, error) {
+ return lookupUser(func(u User) bool {
+ return u.Name == username
+ })
+}
+
+// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot
+// be found (or there is no /etc/passwd file on the filesystem), then LookupId
+// returns an error.
+func LookupUid(uid int) (User, error) {
+ return lookupUser(func(u User) bool {
+ return u.Uid == uid
+ })
+}
+
+func lookupGroup(filter func(g Group) bool) (Group, error) {
+ // Get operating system-specific group reader-closer.
+ group, err := GetGroup()
+ if err != nil {
+ return Group{}, err
+ }
+ defer group.Close()
+
+ // Get the users.
+ groups, err := ParseGroupFilter(group, filter)
+ if err != nil {
+ return Group{}, err
+ }
+
+ // No user entries found.
+ if len(groups) == 0 {
+ return Group{}, ErrNoGroupEntries
+ }
+
+ // Assume the first entry is the "correct" one.
+ return groups[0], nil
+}
+
+// LookupGroup looks up a group by its name in /etc/group. If the group cannot
+// be found (or there is no /etc/group file on the filesystem), then LookupGroup
+// returns an error.
+func LookupGroup(groupname string) (Group, error) {
+ return lookupGroup(func(g Group) bool {
+ return g.Name == groupname
+ })
+}
+
+// LookupGid looks up a group by its group id in /etc/group. If the group cannot
+// be found (or there is no /etc/group file on the filesystem), then LookupGid
+// returns an error.
+func LookupGid(gid int) (Group, error) {
+ return lookupGroup(func(g Group) bool {
+ return g.Gid == gid
+ })
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
new file mode 100644
index 000000000..c2bb9ec90
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
@@ -0,0 +1,46 @@
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package user
+
+import (
+ "io"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// Unix-specific path to the passwd and group formatted files.
+const (
+ unixPasswdPath = "/etc/passwd"
+ unixGroupPath = "/etc/group"
+)
+
+func GetPasswdPath() (string, error) {
+ return unixPasswdPath, nil
+}
+
+func GetPasswd() (io.ReadCloser, error) {
+ return os.Open(unixPasswdPath)
+}
+
+func GetGroupPath() (string, error) {
+ return unixGroupPath, nil
+}
+
+func GetGroup() (io.ReadCloser, error) {
+ return os.Open(unixGroupPath)
+}
+
+// CurrentUser looks up the current user by their user id in /etc/passwd. If the
+// user cannot be found (or there is no /etc/passwd file on the filesystem),
+// then CurrentUser returns an error.
+func CurrentUser() (User, error) {
+ return LookupUid(unix.Getuid())
+}
+
+// CurrentGroup looks up the current user's group by their primary group id's
+// entry in /etc/passwd. If the group cannot be found (or there is no
+// /etc/group file on the filesystem), then CurrentGroup returns an error.
+func CurrentGroup() (Group, error) {
+ return LookupGid(unix.Getgid())
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go
new file mode 100644
index 000000000..4a8d00acb
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go
@@ -0,0 +1,38 @@
+// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
+
+package user
+
+import (
+ "io"
+ "syscall"
+)
+
+func GetPasswdPath() (string, error) {
+ return "", ErrUnsupported
+}
+
+func GetPasswd() (io.ReadCloser, error) {
+ return nil, ErrUnsupported
+}
+
+func GetGroupPath() (string, error) {
+ return "", ErrUnsupported
+}
+
+func GetGroup() (io.ReadCloser, error) {
+ return nil, ErrUnsupported
+}
+
+// CurrentUser looks up the current user by their user id in /etc/passwd. If the
+// user cannot be found (or there is no /etc/passwd file on the filesystem),
+// then CurrentUser returns an error.
+func CurrentUser() (User, error) {
+ return LookupUid(syscall.Getuid())
+}
+
+// CurrentGroup looks up the current user's group by their primary group id's
+// entry in /etc/passwd. If the group cannot be found (or there is no
+// /etc/group file on the filesystem), then CurrentGroup returns an error.
+func CurrentGroup() (Group, error) {
+ return LookupGid(syscall.Getgid())
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go
new file mode 100644
index 000000000..8962cab33
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go
@@ -0,0 +1,441 @@
+package user
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+const (
+ minId = 0
+ maxId = 1<<31 - 1 //for 32-bit systems compatibility
+)
+
+var (
+ ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minId, maxId)
+)
+
+type User struct {
+ Name string
+ Pass string
+ Uid int
+ Gid int
+ Gecos string
+ Home string
+ Shell string
+}
+
+type Group struct {
+ Name string
+ Pass string
+ Gid int
+ List []string
+}
+
+func parseLine(line string, v ...interface{}) {
+ if line == "" {
+ return
+ }
+
+ parts := strings.Split(line, ":")
+ for i, p := range parts {
+ // Ignore cases where we don't have enough fields to populate the arguments.
+ // Some configuration files like to misbehave.
+ if len(v) <= i {
+ break
+ }
+
+ // Use the type of the argument to figure out how to parse it, scanf() style.
+ // This is legit.
+ switch e := v[i].(type) {
+ case *string:
+ *e = p
+ case *int:
+ // "numbers", with conversion errors ignored because of some misbehaving configuration files.
+ *e, _ = strconv.Atoi(p)
+ case *[]string:
+ // Comma-separated lists.
+ if p != "" {
+ *e = strings.Split(p, ",")
+ } else {
+ *e = []string{}
+ }
+ default:
+ // Someone goof'd when writing code using this function. Scream so they can hear us.
+ panic(fmt.Sprintf("parseLine only accepts {*string, *int, *[]string} as arguments! %#v is not a pointer!", e))
+ }
+ }
+}
+
+func ParsePasswdFile(path string) ([]User, error) {
+ passwd, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer passwd.Close()
+ return ParsePasswd(passwd)
+}
+
+func ParsePasswd(passwd io.Reader) ([]User, error) {
+ return ParsePasswdFilter(passwd, nil)
+}
+
+func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) {
+ passwd, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer passwd.Close()
+ return ParsePasswdFilter(passwd, filter)
+}
+
+func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
+ if r == nil {
+ return nil, fmt.Errorf("nil source for passwd-formatted data")
+ }
+
+ var (
+ s = bufio.NewScanner(r)
+ out = []User{}
+ )
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ line := strings.TrimSpace(s.Text())
+ if line == "" {
+ continue
+ }
+
+ // see: man 5 passwd
+ // name:password:UID:GID:GECOS:directory:shell
+ // Name:Pass:Uid:Gid:Gecos:Home:Shell
+ // root:x:0:0:root:/root:/bin/bash
+ // adm:x:3:4:adm:/var/adm:/bin/false
+ p := User{}
+ parseLine(line, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell)
+
+ if filter == nil || filter(p) {
+ out = append(out, p)
+ }
+ }
+
+ return out, nil
+}
+
+func ParseGroupFile(path string) ([]Group, error) {
+ group, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+
+ defer group.Close()
+ return ParseGroup(group)
+}
+
+func ParseGroup(group io.Reader) ([]Group, error) {
+ return ParseGroupFilter(group, nil)
+}
+
+func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) {
+ group, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer group.Close()
+ return ParseGroupFilter(group, filter)
+}
+
+func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
+ if r == nil {
+ return nil, fmt.Errorf("nil source for group-formatted data")
+ }
+
+ var (
+ s = bufio.NewScanner(r)
+ out = []Group{}
+ )
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ text := s.Text()
+ if text == "" {
+ continue
+ }
+
+ // see: man 5 group
+ // group_name:password:GID:user_list
+ // Name:Pass:Gid:List
+ // root:x:0:root
+ // adm:x:4:root,adm,daemon
+ p := Group{}
+ parseLine(text, &p.Name, &p.Pass, &p.Gid, &p.List)
+
+ if filter == nil || filter(p) {
+ out = append(out, p)
+ }
+ }
+
+ return out, nil
+}
+
+type ExecUser struct {
+ Uid int
+ Gid int
+ Sgids []int
+ Home string
+}
+
+// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the
+// given file paths and uses that data as the arguments to GetExecUser. If the
+// files cannot be opened for any reason, the error is ignored and a nil
+// io.Reader is passed instead.
+func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) {
+ var passwd, group io.Reader
+
+ if passwdFile, err := os.Open(passwdPath); err == nil {
+ passwd = passwdFile
+ defer passwdFile.Close()
+ }
+
+ if groupFile, err := os.Open(groupPath); err == nil {
+ group = groupFile
+ defer groupFile.Close()
+ }
+
+ return GetExecUser(userSpec, defaults, passwd, group)
+}
+
+// GetExecUser parses a user specification string (using the passwd and group
+// readers as sources for /etc/passwd and /etc/group data, respectively). In
+// the case of blank fields or missing data from the sources, the values in
+// defaults is used.
+//
+// GetExecUser will return an error if a user or group literal could not be
+// found in any entry in passwd and group respectively.
+//
+// Examples of valid user specifications are:
+// * ""
+// * "user"
+// * "uid"
+// * "user:group"
+// * "uid:gid
+// * "user:gid"
+// * "uid:group"
+//
+// It should be noted that if you specify a numeric user or group id, they will
+// not be evaluated as usernames (only the metadata will be filled). So attempting
+// to parse a user with user.Name = "1337" will produce the user with a UID of
+// 1337.
+func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) {
+ if defaults == nil {
+ defaults = new(ExecUser)
+ }
+
+ // Copy over defaults.
+ user := &ExecUser{
+ Uid: defaults.Uid,
+ Gid: defaults.Gid,
+ Sgids: defaults.Sgids,
+ Home: defaults.Home,
+ }
+
+ // Sgids slice *cannot* be nil.
+ if user.Sgids == nil {
+ user.Sgids = []int{}
+ }
+
+ // Allow for userArg to have either "user" syntax, or optionally "user:group" syntax
+ var userArg, groupArg string
+ parseLine(userSpec, &userArg, &groupArg)
+
+ // Convert userArg and groupArg to be numeric, so we don't have to execute
+ // Atoi *twice* for each iteration over lines.
+ uidArg, uidErr := strconv.Atoi(userArg)
+ gidArg, gidErr := strconv.Atoi(groupArg)
+
+ // Find the matching user.
+ users, err := ParsePasswdFilter(passwd, func(u User) bool {
+ if userArg == "" {
+ // Default to current state of the user.
+ return u.Uid == user.Uid
+ }
+
+ if uidErr == nil {
+ // If the userArg is numeric, always treat it as a UID.
+ return uidArg == u.Uid
+ }
+
+ return u.Name == userArg
+ })
+
+ // If we can't find the user, we have to bail.
+ if err != nil && passwd != nil {
+ if userArg == "" {
+ userArg = strconv.Itoa(user.Uid)
+ }
+ return nil, fmt.Errorf("unable to find user %s: %v", userArg, err)
+ }
+
+ var matchedUserName string
+ if len(users) > 0 {
+ // First match wins, even if there's more than one matching entry.
+ matchedUserName = users[0].Name
+ user.Uid = users[0].Uid
+ user.Gid = users[0].Gid
+ user.Home = users[0].Home
+ } else if userArg != "" {
+ // If we can't find a user with the given username, the only other valid
+ // option is if it's a numeric username with no associated entry in passwd.
+
+ if uidErr != nil {
+ // Not numeric.
+ return nil, fmt.Errorf("unable to find user %s: %v", userArg, ErrNoPasswdEntries)
+ }
+ user.Uid = uidArg
+
+ // Must be inside valid uid range.
+ if user.Uid < minId || user.Uid > maxId {
+ return nil, ErrRange
+ }
+
+ // Okay, so it's numeric. We can just roll with this.
+ }
+
+ // On to the groups. If we matched a username, we need to do this because of
+ // the supplementary group IDs.
+ if groupArg != "" || matchedUserName != "" {
+ groups, err := ParseGroupFilter(group, func(g Group) bool {
+ // If the group argument isn't explicit, we'll just search for it.
+ if groupArg == "" {
+ // Check if user is a member of this group.
+ for _, u := range g.List {
+ if u == matchedUserName {
+ return true
+ }
+ }
+ return false
+ }
+
+ if gidErr == nil {
+ // If the groupArg is numeric, always treat it as a GID.
+ return gidArg == g.Gid
+ }
+
+ return g.Name == groupArg
+ })
+ if err != nil && group != nil {
+ return nil, fmt.Errorf("unable to find groups for spec %v: %v", matchedUserName, err)
+ }
+
+ // Only start modifying user.Gid if it is in explicit form.
+ if groupArg != "" {
+ if len(groups) > 0 {
+ // First match wins, even if there's more than one matching entry.
+ user.Gid = groups[0].Gid
+ } else {
+ // If we can't find a group with the given name, the only other valid
+ // option is if it's a numeric group name with no associated entry in group.
+
+ if gidErr != nil {
+ // Not numeric.
+ return nil, fmt.Errorf("unable to find group %s: %v", groupArg, ErrNoGroupEntries)
+ }
+ user.Gid = gidArg
+
+ // Must be inside valid gid range.
+ if user.Gid < minId || user.Gid > maxId {
+ return nil, ErrRange
+ }
+
+ // Okay, so it's numeric. We can just roll with this.
+ }
+ } else if len(groups) > 0 {
+ // Supplementary group ids only make sense if in the implicit form.
+ user.Sgids = make([]int, len(groups))
+ for i, group := range groups {
+ user.Sgids[i] = group.Gid
+ }
+ }
+ }
+
+ return user, nil
+}
+
+// GetAdditionalGroups looks up a list of groups by name or group id
+// against the given /etc/group formatted data. If a group name cannot
+// be found, an error will be returned. If a group id cannot be found,
+// or the given group data is nil, the id will be returned as-is
+// provided it is in the legal range.
+func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) {
+ var groups = []Group{}
+ if group != nil {
+ var err error
+ groups, err = ParseGroupFilter(group, func(g Group) bool {
+ for _, ag := range additionalGroups {
+ if g.Name == ag || strconv.Itoa(g.Gid) == ag {
+ return true
+ }
+ }
+ return false
+ })
+ if err != nil {
+ return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err)
+ }
+ }
+
+ gidMap := make(map[int]struct{})
+ for _, ag := range additionalGroups {
+ var found bool
+ for _, g := range groups {
+ // if we found a matched group either by name or gid, take the
+ // first matched as correct
+ if g.Name == ag || strconv.Itoa(g.Gid) == ag {
+ if _, ok := gidMap[g.Gid]; !ok {
+ gidMap[g.Gid] = struct{}{}
+ found = true
+ break
+ }
+ }
+ }
+ // we asked for a group but didn't find it. let's check to see
+ // if we wanted a numeric group
+ if !found {
+ gid, err := strconv.Atoi(ag)
+ if err != nil {
+ return nil, fmt.Errorf("Unable to find group %s", ag)
+ }
+ // Ensure gid is inside gid range.
+ if gid < minId || gid > maxId {
+ return nil, ErrRange
+ }
+ gidMap[gid] = struct{}{}
+ }
+ }
+ gids := []int{}
+ for gid := range gidMap {
+ gids = append(gids, gid)
+ }
+ return gids, nil
+}
+
+// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups
+// that opens the groupPath given and gives it as an argument to
+// GetAdditionalGroups.
+func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) {
+ var group io.Reader
+
+ if groupFile, err := os.Open(groupPath); err == nil {
+ group = groupFile
+ defer groupFile.Close()
+ }
+ return GetAdditionalGroups(additionalGroups, group)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go
new file mode 100644
index 000000000..2cbb6491a
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go
@@ -0,0 +1,95 @@
+// +build linux
+
+package utils
+
+/*
+ * Copyright 2016, 2017 SUSE LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import (
+ "fmt"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// MaxSendfdLen is the maximum length of the name of a file descriptor being
+// sent using SendFd. The name of the file handle returned by RecvFd will never
+// be larger than this value.
+const MaxNameLen = 4096
+
+// oobSpace is the size of the oob slice required to store a single FD. Note
+// that unix.UnixRights appears to make the assumption that fd is always int32,
+// so sizeof(fd) = 4.
+var oobSpace = unix.CmsgSpace(4)
+
+// RecvFd waits for a file descriptor to be sent over the given AF_UNIX
+// socket. The file name of the remote file descriptor will be recreated
+// locally (it is sent as non-auxiliary data in the same payload).
+func RecvFd(socket *os.File) (*os.File, error) {
+ // For some reason, unix.Recvmsg uses the length rather than the capacity
+ // when passing the msg_controllen and other attributes to recvmsg. So we
+ // have to actually set the length.
+ name := make([]byte, MaxNameLen)
+ oob := make([]byte, oobSpace)
+
+ sockfd := socket.Fd()
+ n, oobn, _, _, err := unix.Recvmsg(int(sockfd), name, oob, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ if n >= MaxNameLen || oobn != oobSpace {
+ return nil, fmt.Errorf("recvfd: incorrect number of bytes read (n=%d oobn=%d)", n, oobn)
+ }
+
+ // Truncate.
+ name = name[:n]
+ oob = oob[:oobn]
+
+ scms, err := unix.ParseSocketControlMessage(oob)
+ if err != nil {
+ return nil, err
+ }
+ if len(scms) != 1 {
+ return nil, fmt.Errorf("recvfd: number of SCMs is not 1: %d", len(scms))
+ }
+ scm := scms[0]
+
+ fds, err := unix.ParseUnixRights(&scm)
+ if err != nil {
+ return nil, err
+ }
+ if len(fds) != 1 {
+ return nil, fmt.Errorf("recvfd: number of fds is not 1: %d", len(fds))
+ }
+ fd := uintptr(fds[0])
+
+ return os.NewFile(fd, string(name)), nil
+}
+
+// SendFd sends a file descriptor over the given AF_UNIX socket. In
+// addition, the file.Name() of the given file will also be sent as
+// non-auxiliary data in the same payload (allowing to send contextual
+// information for a file descriptor).
+func SendFd(socket, file *os.File) error {
+ name := []byte(file.Name())
+ if len(name) >= MaxNameLen {
+ return fmt.Errorf("sendfd: filename too long: %s", file.Name())
+ }
+ oob := unix.UnixRights(int(file.Fd()))
+
+ return unix.Sendmsg(int(socket.Fd()), name, oob, nil, 0)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go
new file mode 100644
index 000000000..baa54c9ba
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go
@@ -0,0 +1,127 @@
+package utils
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+ "encoding/json"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ exitSignalOffset = 128
+)
+
+// GenerateRandomName returns a new name joined with a prefix. This size
+// specified is used to truncate the randomly generated value
+func GenerateRandomName(prefix string, size int) (string, error) {
+ id := make([]byte, 32)
+ if _, err := io.ReadFull(rand.Reader, id); err != nil {
+ return "", err
+ }
+ if size > 64 {
+ size = 64
+ }
+ return prefix + hex.EncodeToString(id)[:size], nil
+}
+
+// ResolveRootfs ensures that the current working directory is
+// not a symlink and returns the absolute path to the rootfs
+func ResolveRootfs(uncleanRootfs string) (string, error) {
+ rootfs, err := filepath.Abs(uncleanRootfs)
+ if err != nil {
+ return "", err
+ }
+ return filepath.EvalSymlinks(rootfs)
+}
+
+// ExitStatus returns the correct exit status for a process based on if it
+// was signaled or exited cleanly
+func ExitStatus(status unix.WaitStatus) int {
+ if status.Signaled() {
+ return exitSignalOffset + int(status.Signal())
+ }
+ return status.ExitStatus()
+}
+
+// WriteJSON writes the provided struct v to w using standard json marshaling
+func WriteJSON(w io.Writer, v interface{}) error {
+ data, err := json.Marshal(v)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(data)
+ return err
+}
+
+// CleanPath makes a path safe for use with filepath.Join. This is done by not
+// only cleaning the path, but also (if the path is relative) adding a leading
+// '/' and cleaning it (then removing the leading '/'). This ensures that a
+// path resulting from prepending another path will always resolve to lexically
+// be a subdirectory of the prefixed path. This is all done lexically, so paths
+// that include symlinks won't be safe as a result of using CleanPath.
+func CleanPath(path string) string {
+ // Deal with empty strings nicely.
+ if path == "" {
+ return ""
+ }
+
+ // Ensure that all paths are cleaned (especially problematic ones like
+ // "/../../../../../" which can cause lots of issues).
+ path = filepath.Clean(path)
+
+ // If the path isn't absolute, we need to do more processing to fix paths
+ // such as "../../../../<etc>/some/path". We also shouldn't convert absolute
+ // paths to relative ones.
+ if !filepath.IsAbs(path) {
+ path = filepath.Clean(string(os.PathSeparator) + path)
+ // This can't fail, as (by definition) all paths are relative to root.
+ path, _ = filepath.Rel(string(os.PathSeparator), path)
+ }
+
+ // Clean the path again for good measure.
+ return filepath.Clean(path)
+}
+
+// SearchLabels searches a list of key-value pairs for the provided key and
+// returns the corresponding value. The pairs must be separated with '='.
+func SearchLabels(labels []string, query string) string {
+ for _, l := range labels {
+ parts := strings.SplitN(l, "=", 2)
+ if len(parts) < 2 {
+ continue
+ }
+ if parts[0] == query {
+ return parts[1]
+ }
+ }
+ return ""
+}
+
+// Annotations returns the bundle path and user defined annotations from the
+// libcontainer state. We need to remove the bundle because that is a label
+// added by libcontainer.
+func Annotations(labels []string) (bundle string, userAnnotations map[string]string) {
+ userAnnotations = make(map[string]string)
+ for _, l := range labels {
+ parts := strings.SplitN(l, "=", 2)
+ if len(parts) < 2 {
+ continue
+ }
+ if parts[0] == "bundle" {
+ bundle = parts[1]
+ } else {
+ userAnnotations[parts[0]] = parts[1]
+ }
+ }
+ return
+}
+
+func GetIntSize() int {
+ return int(unsafe.Sizeof(1))
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go
new file mode 100644
index 000000000..c96088988
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go
@@ -0,0 +1,44 @@
+// +build !windows
+
+package utils
+
+import (
+ "io/ioutil"
+ "os"
+ "strconv"
+
+ "golang.org/x/sys/unix"
+)
+
+func CloseExecFrom(minFd int) error {
+ fdList, err := ioutil.ReadDir("/proc/self/fd")
+ if err != nil {
+ return err
+ }
+ for _, fi := range fdList {
+ fd, err := strconv.Atoi(fi.Name())
+ if err != nil {
+ // ignore non-numeric file names
+ continue
+ }
+
+ if fd < minFd {
+ // ignore descriptors lower than our specified minimum
+ continue
+ }
+
+ // intentionally ignore errors from unix.CloseOnExec
+ unix.CloseOnExec(fd)
+ // the cases where this might fail are basically file descriptors that have already been closed (including and especially the one that was created when ioutil.ReadDir did the "opendir" syscall)
+ }
+ return nil
+}
+
+// NewSockPair returns a new unix socket pair
+func NewSockPair(name string) (parent *os.File, child *os.File, err error) {
+ fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM|unix.SOCK_CLOEXEC, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+ return os.NewFile(uintptr(fds[1]), name+"-p"), os.NewFile(uintptr(fds[0]), name+"-c"), nil
+}
diff --git a/vendor/github.com/opencontainers/runc/vendor.conf b/vendor/github.com/opencontainers/runc/vendor.conf
new file mode 100644
index 000000000..9506b5c67
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/vendor.conf
@@ -0,0 +1,21 @@
+# OCI runtime-spec. When updating this, make sure you use a version tag rather
+# than a commit ID so it's much more obvious what version of the spec we are
+# using.
+github.com/opencontainers/runtime-spec v1.0.0
+# Core libcontainer functionality.
+github.com/mrunalp/fileutils ed869b029674c0e9ce4c0dfa781405c2d9946d08
+github.com/opencontainers/selinux v1.0.0-rc1
+github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
+github.com/sirupsen/logrus a3f95b5c423586578a4e099b11a46c2479628cac
+github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
+github.com/vishvananda/netlink 1e2e08e8a2dcdacaae3f14ac44c5cfa31361f270
+# systemd integration.
+github.com/coreos/go-systemd v14
+github.com/coreos/pkg v3
+github.com/godbus/dbus v3
+github.com/golang/protobuf 18c9bb3261723cd5401db4d0c9fbc5c3b6c70fe8
+# Command-line interface.
+github.com/docker/docker 0f5c9d301b9b1cca66b3ea0f9dec3b5317d3686d
+github.com/docker/go-units v0.2.0
+github.com/urfave/cli d53eb991652b1d438abdd34ce4bfa3ef1539108e
+golang.org/x/sys 0e0164865330d5cf1c00247be08330bf96e2f87c https://github.com/golang/sys
diff --git a/vendor/github.com/opencontainers/runtime-spec/LICENSE b/vendor/github.com/opencontainers/runtime-spec/LICENSE
new file mode 100644
index 000000000..bdc403653
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-spec/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 The Linux Foundation.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/opencontainers/runtime-spec/README.md b/vendor/github.com/opencontainers/runtime-spec/README.md
new file mode 100644
index 000000000..2f7eb6086
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-spec/README.md
@@ -0,0 +1,158 @@
+# Open Container Initiative Runtime Specification
+
+The [Open Container Initiative][oci] develops specifications for standards on Operating System process and application containers.
+
+The specification can be found [here](spec.md).
+
+## Table of Contents
+
+Additional documentation about how this group operates:
+
+- [Code of Conduct][code-of-conduct]
+- [Style and Conventions](style.md)
+- [Implementations](implementations.md)
+- [Releases](RELEASES.md)
+- [project](project.md)
+- [charter][charter]
+
+## Use Cases
+
+To provide context for users the following section gives example use cases for each part of the spec.
+
+### Application Bundle Builders
+
+Application bundle builders can create a [bundle](bundle.md) directory that includes all of the files required for launching an application as a container.
+The bundle contains an OCI [configuration file](config.md) where the builder can specify host-independent details such as [which executable to launch](config.md#process) and host-specific settings such as [mount](config.md#mounts) locations, [hook](config.md#hooks) paths, Linux [namespaces](config-linux.md#namespaces) and [cgroups](config-linux.md#control-groups).
+Because the configuration includes host-specific settings, application bundle directories copied between two hosts may require configuration adjustments.
+
+### Hook Developers
+
+[Hook](config.md#hooks) developers can extend the functionality of an OCI-compliant runtime by hooking into a container's lifecycle with an external application.
+Example use cases include sophisticated network configuration, volume garbage collection, etc.
+
+### Runtime Developers
+
+Runtime developers can build runtime implementations that run OCI-compliant bundles and container configuration, containing low-level OS and host-specific details, on a particular platform.
+
+## Contributing
+
+Development happens on GitHub for the spec.
+Issues are used for bugs and actionable items and longer discussions can happen on the [mailing list](#mailing-list).
+
+The specification and code is licensed under the Apache 2.0 license found in the [LICENSE](./LICENSE) file.
+
+### Discuss your design
+
+The project welcomes submissions, but please let everyone know what you are working on.
+
+Before undertaking a nontrivial change to this specification, send mail to the [mailing list](#mailing-list) to discuss what you plan to do.
+This gives everyone a chance to validate the design, helps prevent duplication of effort, and ensures that the idea fits.
+It also guarantees that the design is sound before code is written; a GitHub pull-request is not the place for high-level discussions.
+
+Typos and grammatical errors can go straight to a pull-request.
+When in doubt, start on the [mailing-list](#mailing-list).
+
+### Weekly Call
+
+The contributors and maintainers of all OCI projects have a weekly meeting on Wednesdays at:
+
+* 8:00 AM (USA Pacific), during [odd weeks][iso-week].
+* 2:00 PM (USA Pacific), during [even weeks][iso-week].
+
+There is an [iCalendar][rfc5545] format for the meetings [here](meeting.ics).
+
+Everyone is welcome to participate via [UberConference web][uberconference] or audio-only: +1 415 968 0849 (no PIN needed).
+An initial agenda will be posted to the [mailing list](#mailing-list) earlier in the week, and everyone is welcome to propose additional topics or suggest other agenda alterations there.
+Minutes are posted to the [mailing list](#mailing-list) and minutes from past calls are archived [here][minutes], with minutes from especially old meetings (September 2015 and earlier) archived [here][runtime-wiki].
+
+### Mailing List
+
+You can subscribe and join the mailing list on [Google Groups][dev-list].
+
+### IRC
+
+OCI discussion happens on #opencontainers on Freenode ([logs][irc-logs]).
+
+### Git commit
+
+#### Sign your work
+
+The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch.
+The rules are pretty simple: if you can certify the below (from http://developercertificate.org):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith <joe@gmail.com>
+
+using your real name (sorry, no pseudonyms or anonymous contributions.)
+
+You can add the sign off when creating the git commit via `git commit -s`.
+
+#### Commit Style
+
+Simple house-keeping for clean git history.
+Read more on [How to Write a Git Commit Message][how-to-git-commit] or the Discussion section of [git-commit(1)][git-commit.1].
+
+1. Separate the subject from body with a blank line
+2. Limit the subject line to 50 characters
+3. Capitalize the subject line
+4. Do not end the subject line with a period
+5. Use the imperative mood in the subject line
+6. Wrap the body at 72 characters
+7. Use the body to explain what and why vs. how
+ * If there was important/useful/essential conversation or information, copy or include a reference
+8. When possible, one keyword to scope the change in the subject (i.e. "README: ...", "runtime: ...")
+
+
+[charter]: https://www.opencontainers.org/about/governance
+[code-of-conduct]: https://github.com/opencontainers/tob/blob/master/code-of-conduct.md
+[dev-list]: https://groups.google.com/a/opencontainers.org/forum/#!forum/dev
+[how-to-git-commit]: http://chris.beams.io/posts/git-commit
+[irc-logs]: http://ircbot.wl.linuxfoundation.org/eavesdrop/%23opencontainers/
+[iso-week]: https://en.wikipedia.org/wiki/ISO_week_date#Calculating_the_week_number_of_a_given_date
+[minutes]: http://ircbot.wl.linuxfoundation.org/meetings/opencontainers/
+[oci]: https://www.opencontainers.org
+[rfc5545]: https://tools.ietf.org/html/rfc5545
+[runtime-wiki]: https://github.com/opencontainers/runtime-spec/wiki
+[uberconference]: https://www.uberconference.com/opencontainers
+
+[git-commit.1]: http://git-scm.com/docs/git-commit
diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
new file mode 100644
index 000000000..f3f37d42d
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
@@ -0,0 +1,570 @@
+package specs
+
+import "os"
+
+// Spec is the base configuration for the container.
+type Spec struct {
+ // Version of the Open Container Runtime Specification with which the bundle complies.
+ Version string `json:"ociVersion"`
+ // Process configures the container process.
+ Process *Process `json:"process,omitempty"`
+ // Root configures the container's root filesystem.
+ Root *Root `json:"root,omitempty"`
+ // Hostname configures the container's hostname.
+ Hostname string `json:"hostname,omitempty"`
+ // Mounts configures additional mounts (on top of Root).
+ Mounts []Mount `json:"mounts,omitempty"`
+ // Hooks configures callbacks for container lifecycle events.
+ Hooks *Hooks `json:"hooks,omitempty" platform:"linux,solaris"`
+ // Annotations contains arbitrary metadata for the container.
+ Annotations map[string]string `json:"annotations,omitempty"`
+
+ // Linux is platform-specific configuration for Linux based containers.
+ Linux *Linux `json:"linux,omitempty" platform:"linux"`
+ // Solaris is platform-specific configuration for Solaris based containers.
+ Solaris *Solaris `json:"solaris,omitempty" platform:"solaris"`
+ // Windows is platform-specific configuration for Windows based containers.
+ Windows *Windows `json:"windows,omitempty" platform:"windows"`
+}
+
+// Process contains information to start a specific application inside the container.
+type Process struct {
+ // Terminal creates an interactive terminal for the container.
+ Terminal bool `json:"terminal,omitempty"`
+ // ConsoleSize specifies the size of the console.
+ ConsoleSize *Box `json:"consoleSize,omitempty"`
+ // User specifies user information for the process.
+ User User `json:"user"`
+ // Args specifies the binary and arguments for the application to execute.
+ Args []string `json:"args"`
+ // Env populates the process environment for the process.
+ Env []string `json:"env,omitempty"`
+ // Cwd is the current working directory for the process and must be
+ // relative to the container's root.
+ Cwd string `json:"cwd"`
+ // Capabilities are Linux capabilities that are kept for the process.
+ Capabilities *LinuxCapabilities `json:"capabilities,omitempty" platform:"linux"`
+ // Rlimits specifies rlimit options to apply to the process.
+ Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris"`
+ // NoNewPrivileges controls whether additional privileges could be gained by processes in the container.
+ NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux"`
+ // ApparmorProfile specifies the apparmor profile for the container.
+ ApparmorProfile string `json:"apparmorProfile,omitempty" platform:"linux"`
+ // Specify an oom_score_adj for the container.
+ OOMScoreAdj *int `json:"oomScoreAdj,omitempty" platform:"linux"`
+ // SelinuxLabel specifies the selinux context that the container process is run as.
+ SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"`
+}
+
+// LinuxCapabilities specifies the whitelist of capabilities that are kept for a process.
+// http://man7.org/linux/man-pages/man7/capabilities.7.html
+type LinuxCapabilities struct {
+ // Bounding is the set of capabilities checked by the kernel.
+ Bounding []string `json:"bounding,omitempty" platform:"linux"`
+ // Effective is the set of capabilities checked by the kernel.
+ Effective []string `json:"effective,omitempty" platform:"linux"`
+ // Inheritable is the capabilities preserved across execve.
+ Inheritable []string `json:"inheritable,omitempty" platform:"linux"`
+ // Permitted is the limiting superset for effective capabilities.
+ Permitted []string `json:"permitted,omitempty" platform:"linux"`
+ // Ambient is the ambient set of capabilities that are kept.
+ Ambient []string `json:"ambient,omitempty" platform:"linux"`
+}
+
+// Box specifies dimensions of a rectangle. Used for specifying the size of a console.
+type Box struct {
+ // Height is the vertical dimension of a box.
+ Height uint `json:"height"`
+ // Width is the horizontal dimension of a box.
+ Width uint `json:"width"`
+}
+
+// User specifies specific user (and group) information for the container process.
+type User struct {
+ // UID is the user id.
+ UID uint32 `json:"uid" platform:"linux,solaris"`
+ // GID is the group id.
+ GID uint32 `json:"gid" platform:"linux,solaris"`
+ // AdditionalGids are additional group ids set for the container's process.
+ AdditionalGids []uint32 `json:"additionalGids,omitempty" platform:"linux,solaris"`
+ // Username is the user name.
+ Username string `json:"username,omitempty" platform:"windows"`
+}
+
+// Root contains information about the container's root filesystem on the host.
+type Root struct {
+ // Path is the absolute path to the container's root filesystem.
+ Path string `json:"path"`
+ // Readonly makes the root filesystem for the container readonly before the process is executed.
+ Readonly bool `json:"readonly,omitempty"`
+}
+
+// Mount specifies a mount for a container.
+type Mount struct {
+ // Destination is the absolute path where the mount will be placed in the container.
+ Destination string `json:"destination"`
+ // Type specifies the mount kind.
+ Type string `json:"type,omitempty" platform:"linux,solaris"`
+ // Source specifies the source path of the mount.
+ Source string `json:"source,omitempty"`
+ // Options are fstab style mount options.
+ Options []string `json:"options,omitempty"`
+}
+
+// Hook specifies a command that is run at a particular event in the lifecycle of a container
+type Hook struct {
+ Path string `json:"path"`
+ Args []string `json:"args,omitempty"`
+ Env []string `json:"env,omitempty"`
+ Timeout *int `json:"timeout,omitempty"`
+}
+
+// Hooks for container setup and teardown
+type Hooks struct {
+ // Prestart is a list of hooks to be run before the container process is executed.
+ Prestart []Hook `json:"prestart,omitempty"`
+ // Poststart is a list of hooks to be run after the container process is started.
+ Poststart []Hook `json:"poststart,omitempty"`
+ // Poststop is a list of hooks to be run after the container process exits.
+ Poststop []Hook `json:"poststop,omitempty"`
+}
+
+// Linux contains platform-specific configuration for Linux based containers.
+type Linux struct {
+ // UIDMapping specifies user mappings for supporting user namespaces.
+ UIDMappings []LinuxIDMapping `json:"uidMappings,omitempty"`
+ // GIDMapping specifies group mappings for supporting user namespaces.
+ GIDMappings []LinuxIDMapping `json:"gidMappings,omitempty"`
+ // Sysctl are a set of key value pairs that are set for the container on start
+ Sysctl map[string]string `json:"sysctl,omitempty"`
+ // Resources contain cgroup information for handling resource constraints
+ // for the container
+ Resources *LinuxResources `json:"resources,omitempty"`
+ // CgroupsPath specifies the path to cgroups that are created and/or joined by the container.
+ // The path is expected to be relative to the cgroups mountpoint.
+ // If resources are specified, the cgroups at CgroupsPath will be updated based on resources.
+ CgroupsPath string `json:"cgroupsPath,omitempty"`
+ // Namespaces contains the namespaces that are created and/or joined by the container
+ Namespaces []LinuxNamespace `json:"namespaces,omitempty"`
+ // Devices are a list of device nodes that are created for the container
+ Devices []LinuxDevice `json:"devices,omitempty"`
+ // Seccomp specifies the seccomp security settings for the container.
+ Seccomp *LinuxSeccomp `json:"seccomp,omitempty"`
+ // RootfsPropagation is the rootfs mount propagation mode for the container.
+ RootfsPropagation string `json:"rootfsPropagation,omitempty"`
+ // MaskedPaths masks over the provided paths inside the container.
+ MaskedPaths []string `json:"maskedPaths,omitempty"`
+ // ReadonlyPaths sets the provided paths as RO inside the container.
+ ReadonlyPaths []string `json:"readonlyPaths,omitempty"`
+ // MountLabel specifies the selinux context for the mounts in the container.
+ MountLabel string `json:"mountLabel,omitempty"`
+ // IntelRdt contains Intel Resource Director Technology (RDT) information
+ // for handling resource constraints (e.g., L3 cache) for the container
+ IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"`
+}
+
+// LinuxNamespace is the configuration for a Linux namespace
+type LinuxNamespace struct {
+ // Type is the type of namespace
+ Type LinuxNamespaceType `json:"type"`
+ // Path is a path to an existing namespace persisted on disk that can be joined
+ // and is of the same type
+ Path string `json:"path,omitempty"`
+}
+
+// LinuxNamespaceType is one of the Linux namespaces
+type LinuxNamespaceType string
+
+const (
+ // PIDNamespace for isolating process IDs
+ PIDNamespace LinuxNamespaceType = "pid"
+ // NetworkNamespace for isolating network devices, stacks, ports, etc
+ NetworkNamespace = "network"
+ // MountNamespace for isolating mount points
+ MountNamespace = "mount"
+ // IPCNamespace for isolating System V IPC, POSIX message queues
+ IPCNamespace = "ipc"
+ // UTSNamespace for isolating hostname and NIS domain name
+ UTSNamespace = "uts"
+ // UserNamespace for isolating user and group IDs
+ UserNamespace = "user"
+ // CgroupNamespace for isolating cgroup hierarchies
+ CgroupNamespace = "cgroup"
+)
+
+// LinuxIDMapping specifies UID/GID mappings
+type LinuxIDMapping struct {
+ // HostID is the starting UID/GID on the host to be mapped to 'ContainerID'
+ HostID uint32 `json:"hostID"`
+ // ContainerID is the starting UID/GID in the container
+ ContainerID uint32 `json:"containerID"`
+ // Size is the number of IDs to be mapped
+ Size uint32 `json:"size"`
+}
+
+// POSIXRlimit type and restrictions
+type POSIXRlimit struct {
+ // Type of the rlimit to set
+ Type string `json:"type"`
+ // Hard is the hard limit for the specified type
+ Hard uint64 `json:"hard"`
+ // Soft is the soft limit for the specified type
+ Soft uint64 `json:"soft"`
+}
+
+// LinuxHugepageLimit structure corresponds to limiting kernel hugepages
+type LinuxHugepageLimit struct {
+ // Pagesize is the hugepage size
+ Pagesize string `json:"pageSize"`
+ // Limit is the limit of "hugepagesize" hugetlb usage
+ Limit uint64 `json:"limit"`
+}
+
+// LinuxInterfacePriority for network interfaces
+type LinuxInterfacePriority struct {
+ // Name is the name of the network interface
+ Name string `json:"name"`
+ // Priority for the interface
+ Priority uint32 `json:"priority"`
+}
+
+// linuxBlockIODevice holds major:minor format supported in blkio cgroup
+type linuxBlockIODevice struct {
+ // Major is the device's major number.
+ Major int64 `json:"major"`
+ // Minor is the device's minor number.
+ Minor int64 `json:"minor"`
+}
+
+// LinuxWeightDevice struct holds a `major:minor weight` pair for weightDevice
+type LinuxWeightDevice struct {
+ linuxBlockIODevice
+ // Weight is the bandwidth rate for the device.
+ Weight *uint16 `json:"weight,omitempty"`
+ // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, CFQ scheduler only
+ LeafWeight *uint16 `json:"leafWeight,omitempty"`
+}
+
+// LinuxThrottleDevice struct holds a `major:minor rate_per_second` pair
+type LinuxThrottleDevice struct {
+ linuxBlockIODevice
+ // Rate is the IO rate limit per cgroup per device
+ Rate uint64 `json:"rate"`
+}
+
+// LinuxBlockIO for Linux cgroup 'blkio' resource management
+type LinuxBlockIO struct {
+ // Specifies per cgroup weight
+ Weight *uint16 `json:"weight,omitempty"`
+ // Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, CFQ scheduler only
+ LeafWeight *uint16 `json:"leafWeight,omitempty"`
+ // Weight per cgroup per device, can override BlkioWeight
+ WeightDevice []LinuxWeightDevice `json:"weightDevice,omitempty"`
+ // IO read rate limit per cgroup per device, bytes per second
+ ThrottleReadBpsDevice []LinuxThrottleDevice `json:"throttleReadBpsDevice,omitempty"`
+ // IO write rate limit per cgroup per device, bytes per second
+ ThrottleWriteBpsDevice []LinuxThrottleDevice `json:"throttleWriteBpsDevice,omitempty"`
+ // IO read rate limit per cgroup per device, IO per second
+ ThrottleReadIOPSDevice []LinuxThrottleDevice `json:"throttleReadIOPSDevice,omitempty"`
+ // IO write rate limit per cgroup per device, IO per second
+ ThrottleWriteIOPSDevice []LinuxThrottleDevice `json:"throttleWriteIOPSDevice,omitempty"`
+}
+
+// LinuxMemory for Linux cgroup 'memory' resource management
+type LinuxMemory struct {
+ // Memory limit (in bytes).
+ Limit *int64 `json:"limit,omitempty"`
+ // Memory reservation or soft_limit (in bytes).
+ Reservation *int64 `json:"reservation,omitempty"`
+ // Total memory limit (memory + swap).
+ Swap *int64 `json:"swap,omitempty"`
+ // Kernel memory limit (in bytes).
+ Kernel *int64 `json:"kernel,omitempty"`
+ // Kernel memory limit for tcp (in bytes)
+ KernelTCP *int64 `json:"kernelTCP,omitempty"`
+ // How aggressive the kernel will swap memory pages.
+ Swappiness *uint64 `json:"swappiness,omitempty"`
+ // DisableOOMKiller disables the OOM killer for out of memory conditions
+ DisableOOMKiller *bool `json:"disableOOMKiller,omitempty"`
+}
+
+// LinuxCPU for Linux cgroup 'cpu' resource management
+type LinuxCPU struct {
+ // CPU shares (relative weight (ratio) vs. other cgroups with cpu shares).
+ Shares *uint64 `json:"shares,omitempty"`
+ // CPU hardcap limit (in usecs). Allowed cpu time in a given period.
+ Quota *int64 `json:"quota,omitempty"`
+ // CPU period to be used for hardcapping (in usecs).
+ Period *uint64 `json:"period,omitempty"`
+ // How much time realtime scheduling may use (in usecs).
+ RealtimeRuntime *int64 `json:"realtimeRuntime,omitempty"`
+ // CPU period to be used for realtime scheduling (in usecs).
+ RealtimePeriod *uint64 `json:"realtimePeriod,omitempty"`
+ // CPUs to use within the cpuset. Default is to use any CPU available.
+ Cpus string `json:"cpus,omitempty"`
+ // List of memory nodes in the cpuset. Default is to use any available memory node.
+ Mems string `json:"mems,omitempty"`
+}
+
+// LinuxPids for Linux cgroup 'pids' resource management (Linux 4.3)
+type LinuxPids struct {
+ // Maximum number of PIDs. Default is "no limit".
+ Limit int64 `json:"limit"`
+}
+
+// LinuxNetwork identification and priority configuration
+type LinuxNetwork struct {
+ // Set class identifier for container's network packets
+ ClassID *uint32 `json:"classID,omitempty"`
+ // Set priority of network traffic for container
+ Priorities []LinuxInterfacePriority `json:"priorities,omitempty"`
+}
+
+// LinuxResources has container runtime resource constraints
+type LinuxResources struct {
+ // Devices configures the device whitelist.
+ Devices []LinuxDeviceCgroup `json:"devices,omitempty"`
+ // Memory restriction configuration
+ Memory *LinuxMemory `json:"memory,omitempty"`
+ // CPU resource restriction configuration
+ CPU *LinuxCPU `json:"cpu,omitempty"`
+ // Task resource restriction configuration.
+ Pids *LinuxPids `json:"pids,omitempty"`
+ // BlockIO restriction configuration
+ BlockIO *LinuxBlockIO `json:"blockIO,omitempty"`
+ // Hugetlb limit (in bytes)
+ HugepageLimits []LinuxHugepageLimit `json:"hugepageLimits,omitempty"`
+ // Network restriction configuration
+ Network *LinuxNetwork `json:"network,omitempty"`
+}
+
+// LinuxDevice represents the mknod information for a Linux special device file
+type LinuxDevice struct {
+ // Path to the device.
+ Path string `json:"path"`
+ // Device type, block, char, etc.
+ Type string `json:"type"`
+ // Major is the device's major number.
+ Major int64 `json:"major"`
+ // Minor is the device's minor number.
+ Minor int64 `json:"minor"`
+ // FileMode permission bits for the device.
+ FileMode *os.FileMode `json:"fileMode,omitempty"`
+ // UID of the device.
+ UID *uint32 `json:"uid,omitempty"`
+ // Gid of the device.
+ GID *uint32 `json:"gid,omitempty"`
+}
+
+// LinuxDeviceCgroup represents a device rule for the whitelist controller
+type LinuxDeviceCgroup struct {
+ // Allow or deny
+ Allow bool `json:"allow"`
+ // Device type, block, char, etc.
+ Type string `json:"type,omitempty"`
+ // Major is the device's major number.
+ Major *int64 `json:"major,omitempty"`
+ // Minor is the device's minor number.
+ Minor *int64 `json:"minor,omitempty"`
+ // Cgroup access permissions format, rwm.
+ Access string `json:"access,omitempty"`
+}
+
+// Solaris contains platform-specific configuration for Solaris application containers.
+type Solaris struct {
+ // SMF FMRI which should go "online" before we start the container process.
+ Milestone string `json:"milestone,omitempty"`
+ // Maximum set of privileges any process in this container can obtain.
+ LimitPriv string `json:"limitpriv,omitempty"`
+ // The maximum amount of shared memory allowed for this container.
+ MaxShmMemory string `json:"maxShmMemory,omitempty"`
+ // Specification for automatic creation of network resources for this container.
+ Anet []SolarisAnet `json:"anet,omitempty"`
+ // Set limit on the amount of CPU time that can be used by container.
+ CappedCPU *SolarisCappedCPU `json:"cappedCPU,omitempty"`
+ // The physical and swap caps on the memory that can be used by this container.
+ CappedMemory *SolarisCappedMemory `json:"cappedMemory,omitempty"`
+}
+
+// SolarisCappedCPU allows users to set limit on the amount of CPU time that can be used by container.
+type SolarisCappedCPU struct {
+ Ncpus string `json:"ncpus,omitempty"`
+}
+
+// SolarisCappedMemory allows users to set the physical and swap caps on the memory that can be used by this container.
+type SolarisCappedMemory struct {
+ Physical string `json:"physical,omitempty"`
+ Swap string `json:"swap,omitempty"`
+}
+
+// SolarisAnet provides the specification for automatic creation of network resources for this container.
+type SolarisAnet struct {
+ // Specify a name for the automatically created VNIC datalink.
+ Linkname string `json:"linkname,omitempty"`
+ // Specify the link over which the VNIC will be created.
+ Lowerlink string `json:"lowerLink,omitempty"`
+ // The set of IP addresses that the container can use.
+ Allowedaddr string `json:"allowedAddress,omitempty"`
+ // Specifies whether allowedAddress limitation is to be applied to the VNIC.
+ Configallowedaddr string `json:"configureAllowedAddress,omitempty"`
+ // The value of the optional default router.
+ Defrouter string `json:"defrouter,omitempty"`
+ // Enable one or more types of link protection.
+ Linkprotection string `json:"linkProtection,omitempty"`
+ // Set the VNIC's macAddress
+ Macaddress string `json:"macAddress,omitempty"`
+}
+
+// Windows defines the runtime configuration for Windows based containers, including Hyper-V containers.
+type Windows struct {
+ // LayerFolders contains a list of absolute paths to directories containing image layers.
+ LayerFolders []string `json:"layerFolders"`
+ // Resources contains information for handling resource constraints for the container.
+ Resources *WindowsResources `json:"resources,omitempty"`
+ // CredentialSpec contains a JSON object describing a group Managed Service Account (gMSA) specification.
+ CredentialSpec interface{} `json:"credentialSpec,omitempty"`
+ // Servicing indicates if the container is being started in a mode to apply a Windows Update servicing operation.
+ Servicing bool `json:"servicing,omitempty"`
+ // IgnoreFlushesDuringBoot indicates if the container is being started in a mode where disk writes are not flushed during its boot process.
+ IgnoreFlushesDuringBoot bool `json:"ignoreFlushesDuringBoot,omitempty"`
+ // HyperV contains information for running a container with Hyper-V isolation.
+ HyperV *WindowsHyperV `json:"hyperv,omitempty"`
+ // Network restriction configuration.
+ Network *WindowsNetwork `json:"network,omitempty"`
+}
+
+// WindowsResources has container runtime resource constraints for containers running on Windows.
+type WindowsResources struct {
+ // Memory restriction configuration.
+ Memory *WindowsMemoryResources `json:"memory,omitempty"`
+ // CPU resource restriction configuration.
+ CPU *WindowsCPUResources `json:"cpu,omitempty"`
+ // Storage restriction configuration.
+ Storage *WindowsStorageResources `json:"storage,omitempty"`
+}
+
+// WindowsMemoryResources contains memory resource management settings.
+type WindowsMemoryResources struct {
+ // Memory limit in bytes.
+ Limit *uint64 `json:"limit,omitempty"`
+}
+
+// WindowsCPUResources contains CPU resource management settings.
+type WindowsCPUResources struct {
+ // Number of CPUs available to the container.
+ Count *uint64 `json:"count,omitempty"`
+ // CPU shares (relative weight to other containers with cpu shares).
+ Shares *uint16 `json:"shares,omitempty"`
+ // Specifies the portion of processor cycles that this container can use as a percentage times 100.
+ Maximum *uint16 `json:"maximum,omitempty"`
+}
+
+// WindowsStorageResources contains storage resource management settings.
+type WindowsStorageResources struct {
+ // Specifies maximum Iops for the system drive.
+ Iops *uint64 `json:"iops,omitempty"`
+ // Specifies maximum bytes per second for the system drive.
+ Bps *uint64 `json:"bps,omitempty"`
+ // Sandbox size specifies the minimum size of the system drive in bytes.
+ SandboxSize *uint64 `json:"sandboxSize,omitempty"`
+}
+
+// WindowsNetwork contains network settings for Windows containers.
+type WindowsNetwork struct {
+ // List of HNS endpoints that the container should connect to.
+ EndpointList []string `json:"endpointList,omitempty"`
+ // Specifies if unqualified DNS name resolution is allowed.
+ AllowUnqualifiedDNSQuery bool `json:"allowUnqualifiedDNSQuery,omitempty"`
+ // Comma separated list of DNS suffixes to use for name resolution.
+ DNSSearchList []string `json:"DNSSearchList,omitempty"`
+ // Name (ID) of the container that we will share with the network stack.
+ NetworkSharedContainerName string `json:"networkSharedContainerName,omitempty"`
+}
+
+// WindowsHyperV contains information for configuring a container to run with Hyper-V isolation.
+type WindowsHyperV struct {
+ // UtilityVMPath is an optional path to the image used for the Utility VM.
+ UtilityVMPath string `json:"utilityVMPath,omitempty"`
+}
+
+// LinuxSeccomp represents syscall restrictions
+type LinuxSeccomp struct {
+ DefaultAction LinuxSeccompAction `json:"defaultAction"`
+ Architectures []Arch `json:"architectures,omitempty"`
+ Syscalls []LinuxSyscall `json:"syscalls,omitempty"`
+}
+
+// Arch used for additional architectures
+type Arch string
+
+// Additional architectures permitted to be used for system calls
+// By default only the native architecture of the kernel is permitted
+const (
+ ArchX86 Arch = "SCMP_ARCH_X86"
+ ArchX86_64 Arch = "SCMP_ARCH_X86_64"
+ ArchX32 Arch = "SCMP_ARCH_X32"
+ ArchARM Arch = "SCMP_ARCH_ARM"
+ ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
+ ArchMIPS Arch = "SCMP_ARCH_MIPS"
+ ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
+ ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
+ ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
+ ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
+ ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
+ ArchPPC Arch = "SCMP_ARCH_PPC"
+ ArchPPC64 Arch = "SCMP_ARCH_PPC64"
+ ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
+ ArchS390 Arch = "SCMP_ARCH_S390"
+ ArchS390X Arch = "SCMP_ARCH_S390X"
+ ArchPARISC Arch = "SCMP_ARCH_PARISC"
+ ArchPARISC64 Arch = "SCMP_ARCH_PARISC64"
+)
+
+// LinuxSeccompAction taken upon Seccomp rule match
+type LinuxSeccompAction string
+
+// Define actions for Seccomp rules
+const (
+ ActKill LinuxSeccompAction = "SCMP_ACT_KILL"
+ ActTrap LinuxSeccompAction = "SCMP_ACT_TRAP"
+ ActErrno LinuxSeccompAction = "SCMP_ACT_ERRNO"
+ ActTrace LinuxSeccompAction = "SCMP_ACT_TRACE"
+ ActAllow LinuxSeccompAction = "SCMP_ACT_ALLOW"
+)
+
+// LinuxSeccompOperator used to match syscall arguments in Seccomp
+type LinuxSeccompOperator string
+
+// Define operators for syscall arguments in Seccomp
+const (
+ OpNotEqual LinuxSeccompOperator = "SCMP_CMP_NE"
+ OpLessThan LinuxSeccompOperator = "SCMP_CMP_LT"
+ OpLessEqual LinuxSeccompOperator = "SCMP_CMP_LE"
+ OpEqualTo LinuxSeccompOperator = "SCMP_CMP_EQ"
+ OpGreaterEqual LinuxSeccompOperator = "SCMP_CMP_GE"
+ OpGreaterThan LinuxSeccompOperator = "SCMP_CMP_GT"
+ OpMaskedEqual LinuxSeccompOperator = "SCMP_CMP_MASKED_EQ"
+)
+
+// LinuxSeccompArg used for matching specific syscall arguments in Seccomp
+type LinuxSeccompArg struct {
+ Index uint `json:"index"`
+ Value uint64 `json:"value"`
+ ValueTwo uint64 `json:"valueTwo,omitempty"`
+ Op LinuxSeccompOperator `json:"op"`
+}
+
+// LinuxSyscall is used to match a syscall in Seccomp
+type LinuxSyscall struct {
+ Names []string `json:"names"`
+ Action LinuxSeccompAction `json:"action"`
+ Args []LinuxSeccompArg `json:"args,omitempty"`
+}
+
+// LinuxIntelRdt has container runtime resource constraints
+// for Intel RDT/CAT which introduced in Linux 4.10 kernel
+type LinuxIntelRdt struct {
+ // The schema for L3 cache id and capacity bitmask (CBM)
+ // Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
+ L3CacheSchema string `json:"l3CacheSchema,omitempty"`
+}
diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go
new file mode 100644
index 000000000..89dce34be
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go
@@ -0,0 +1,17 @@
+package specs
+
+// State holds information about the runtime state of the container.
+type State struct {
+ // Version is the version of the specification that is supported.
+ Version string `json:"ociVersion"`
+ // ID is the container ID
+ ID string `json:"id"`
+ // Status is the runtime status of the container.
+ Status string `json:"status"`
+ // Pid is the process ID for the container process.
+ Pid int `json:"pid,omitempty"`
+ // Bundle is the path to the container's bundle directory.
+ Bundle string `json:"bundle"`
+ // Annotations are key values associated with the container.
+ Annotations map[string]string `json:"annotations,omitempty"`
+}
diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
new file mode 100644
index 000000000..926ce6650
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
@@ -0,0 +1,18 @@
+package specs
+
+import "fmt"
+
+const (
+ // VersionMajor is for an API incompatible changes
+ VersionMajor = 1
+ // VersionMinor is for functionality in a backwards-compatible manner
+ VersionMinor = 0
+ // VersionPatch is for backwards-compatible bug fixes
+ VersionPatch = 0
+
+ // VersionDev indicates development branch. Releases will be empty string.
+ VersionDev = ""
+)
+
+// Version is the specification version that the package types support.
+var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev)
diff --git a/vendor/github.com/opencontainers/runtime-tools/LICENSE b/vendor/github.com/opencontainers/runtime-tools/LICENSE
new file mode 100644
index 000000000..bdc403653
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 The Linux Foundation.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/opencontainers/runtime-tools/README.md b/vendor/github.com/opencontainers/runtime-tools/README.md
new file mode 100644
index 000000000..bbcafc26e
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/README.md
@@ -0,0 +1,84 @@
+# oci-runtime-tool [![Build Status](https://travis-ci.org/opencontainers/runtime-tools.svg?branch=master)](https://travis-ci.org/opencontainers/runtime-tools) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/runtime-tools)](https://goreportcard.com/report/github.com/opencontainers/runtime-tools)
+
+oci-runtime-tool is a collection of tools for working with the [OCI runtime specification][runtime-spec].
+To build from source code, runtime-tools requires Go 1.7.x or above.
+
+## Generating an OCI runtime spec configuration files
+
+[`oci-runtime-tool generate`][generate.1] generates [configuration JSON][config.json] for an [OCI bundle][bundle].
+[OCI-compatible runtimes][runtime-spec] like [runC][] expect to read the configuration from `config.json`.
+
+```sh
+$ oci-runtime-tool generate --output config.json
+$ cat config.json
+{
+ "ociVersion": "0.5.0",
+ …
+}
+```
+
+## Validating an OCI bundle
+
+[`oci-runtime-tool validate`][validate.1] validates an OCI bundle.
+The error message will be printed if the OCI bundle failed the validation procedure.
+
+```sh
+$ oci-runtime-tool generate
+$ oci-runtime-tool validate
+INFO[0000] Bundle validation succeeded.
+```
+
+## Testing OCI runtimes
+
+```sh
+$ sudo make RUNTIME=runc localvalidation
+RUNTIME=runc go test -tags "" -v github.com/opencontainers/runtime-tools/validation
+=== RUN TestValidateBasic
+TAP version 13
+ok 1 - root filesystem
+ok 2 - hostname
+ok 3 - mounts
+ok 4 - capabilities
+ok 5 - default symlinks
+ok 6 - default devices
+ok 7 - linux devices
+ok 8 - linux process
+ok 9 - masked paths
+ok 10 - oom score adj
+ok 11 - read only paths
+ok 12 - rlimits
+ok 13 - sysctls
+ok 14 - uid mappings
+ok 15 - gid mappings
+1..15
+--- PASS: TestValidateBasic (0.08s)
+=== RUN TestValidateSysctls
+TAP version 13
+ok 1 - root filesystem
+ok 2 - hostname
+ok 3 - mounts
+ok 4 - capabilities
+ok 5 - default symlinks
+ok 6 - default devices
+ok 7 - linux devices
+ok 8 - linux process
+ok 9 - masked paths
+ok 10 - oom score adj
+ok 11 - read only paths
+ok 12 - rlimits
+ok 13 - sysctls
+ok 14 - uid mappings
+ok 15 - gid mappings
+1..15
+--- PASS: TestValidateSysctls (0.20s)
+PASS
+ok github.com/opencontainers/runtime-tools/validation 0.281s
+```
+
+[bundle]: https://github.com/opencontainers/runtime-spec/blob/master/bundle.md
+[config.json]: https://github.com/opencontainers/runtime-spec/blob/master/config.md
+[runC]: https://github.com/opencontainers/runc
+[runtime-spec]: https://github.com/opencontainers/runtime-spec
+
+[generate.1]: man/oci-runtime-tool-generate.1.md
+[validate.1]: man/oci-runtime-tool-validate.1.md
diff --git a/vendor/github.com/opencontainers/runtime-tools/error/error.go b/vendor/github.com/opencontainers/runtime-tools/error/error.go
new file mode 100644
index 000000000..f5a90800e
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/error/error.go
@@ -0,0 +1,92 @@
+// Package error implements generic tooling for tracking RFC 2119
+// violations and linking back to the appropriate specification section.
+package error
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Level represents the RFC 2119 compliance levels
+type Level int
+
+const (
+ // MAY-level
+
+ // May represents 'MAY' in RFC 2119.
+ May Level = iota
+ // Optional represents 'OPTIONAL' in RFC 2119.
+ Optional
+
+ // SHOULD-level
+
+ // Should represents 'SHOULD' in RFC 2119.
+ Should
+ // ShouldNot represents 'SHOULD NOT' in RFC 2119.
+ ShouldNot
+ // Recommended represents 'RECOMMENDED' in RFC 2119.
+ Recommended
+ // NotRecommended represents 'NOT RECOMMENDED' in RFC 2119.
+ NotRecommended
+
+ // MUST-level
+
+ // Must represents 'MUST' in RFC 2119
+ Must
+ // MustNot represents 'MUST NOT' in RFC 2119.
+ MustNot
+ // Shall represents 'SHALL' in RFC 2119.
+ Shall
+ // ShallNot represents 'SHALL NOT' in RFC 2119.
+ ShallNot
+ // Required represents 'REQUIRED' in RFC 2119.
+ Required
+)
+
+// Error represents an error with compliance level and specification reference.
+type Error struct {
+ // Level represents the RFC 2119 compliance level.
+ Level Level
+
+ // Reference is a URL for the violated specification requirement.
+ Reference string
+
+ // Err holds additional details about the violation.
+ Err error
+}
+
+// ParseLevel takes a string level and returns the RFC 2119 compliance level constant.
+func ParseLevel(level string) (Level, error) {
+ switch strings.ToUpper(level) {
+ case "MAY":
+ fallthrough
+ case "OPTIONAL":
+ return May, nil
+ case "SHOULD":
+ fallthrough
+ case "SHOULDNOT":
+ fallthrough
+ case "RECOMMENDED":
+ fallthrough
+ case "NOTRECOMMENDED":
+ return Should, nil
+ case "MUST":
+ fallthrough
+ case "MUSTNOT":
+ fallthrough
+ case "SHALL":
+ fallthrough
+ case "SHALLNOT":
+ fallthrough
+ case "REQUIRED":
+ return Must, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("%q is not a valid compliance level", level)
+}
+
+// Error returns the error message with specification reference.
+func (err *Error) Error() string {
+ return fmt.Sprintf("%s\nRefer to: %s", err.Err.Error(), err.Reference)
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/generate.go b/vendor/github.com/opencontainers/runtime-tools/generate/generate.go
new file mode 100644
index 000000000..fce88f5e2
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/generate/generate.go
@@ -0,0 +1,1256 @@
+// Package generate implements functions generating container config files.
+package generate
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ rspec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate/seccomp"
+ "github.com/opencontainers/runtime-tools/validate"
+ "github.com/syndtr/gocapability/capability"
+)
+
+var (
+ // Namespaces include the names of supported namespaces.
+ Namespaces = []string{"network", "pid", "mount", "ipc", "uts", "user", "cgroup"}
+)
+
+// Generator represents a generator for a container spec.
+type Generator struct {
+ spec *rspec.Spec
+ HostSpecific bool
+}
+
+// ExportOptions have toggles for exporting only certain parts of the specification
+type ExportOptions struct {
+ Seccomp bool // seccomp toggles if only seccomp should be exported
+}
+
+// New creates a spec Generator with the default spec.
+func New() Generator {
+ spec := rspec.Spec{
+ Version: rspec.Version,
+ Root: &rspec.Root{
+ Path: "",
+ Readonly: false,
+ },
+ Process: &rspec.Process{
+ Terminal: false,
+ User: rspec.User{},
+ Args: []string{
+ "sh",
+ },
+ Env: []string{
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "TERM=xterm",
+ },
+ Cwd: "/",
+ Capabilities: &rspec.LinuxCapabilities{
+ Bounding: []string{
+ "CAP_CHOWN",
+ "CAP_DAC_OVERRIDE",
+ "CAP_FSETID",
+ "CAP_FOWNER",
+ "CAP_MKNOD",
+ "CAP_NET_RAW",
+ "CAP_SETGID",
+ "CAP_SETUID",
+ "CAP_SETFCAP",
+ "CAP_SETPCAP",
+ "CAP_NET_BIND_SERVICE",
+ "CAP_SYS_CHROOT",
+ "CAP_KILL",
+ "CAP_AUDIT_WRITE",
+ },
+ Permitted: []string{
+ "CAP_CHOWN",
+ "CAP_DAC_OVERRIDE",
+ "CAP_FSETID",
+ "CAP_FOWNER",
+ "CAP_MKNOD",
+ "CAP_NET_RAW",
+ "CAP_SETGID",
+ "CAP_SETUID",
+ "CAP_SETFCAP",
+ "CAP_SETPCAP",
+ "CAP_NET_BIND_SERVICE",
+ "CAP_SYS_CHROOT",
+ "CAP_KILL",
+ "CAP_AUDIT_WRITE",
+ },
+ Inheritable: []string{
+ "CAP_CHOWN",
+ "CAP_DAC_OVERRIDE",
+ "CAP_FSETID",
+ "CAP_FOWNER",
+ "CAP_MKNOD",
+ "CAP_NET_RAW",
+ "CAP_SETGID",
+ "CAP_SETUID",
+ "CAP_SETFCAP",
+ "CAP_SETPCAP",
+ "CAP_NET_BIND_SERVICE",
+ "CAP_SYS_CHROOT",
+ "CAP_KILL",
+ "CAP_AUDIT_WRITE",
+ },
+ Effective: []string{
+ "CAP_CHOWN",
+ "CAP_DAC_OVERRIDE",
+ "CAP_FSETID",
+ "CAP_FOWNER",
+ "CAP_MKNOD",
+ "CAP_NET_RAW",
+ "CAP_SETGID",
+ "CAP_SETUID",
+ "CAP_SETFCAP",
+ "CAP_SETPCAP",
+ "CAP_NET_BIND_SERVICE",
+ "CAP_SYS_CHROOT",
+ "CAP_KILL",
+ "CAP_AUDIT_WRITE",
+ },
+ Ambient: []string{
+ "CAP_CHOWN",
+ "CAP_DAC_OVERRIDE",
+ "CAP_FSETID",
+ "CAP_FOWNER",
+ "CAP_MKNOD",
+ "CAP_NET_RAW",
+ "CAP_SETGID",
+ "CAP_SETUID",
+ "CAP_SETFCAP",
+ "CAP_SETPCAP",
+ "CAP_NET_BIND_SERVICE",
+ "CAP_SYS_CHROOT",
+ "CAP_KILL",
+ "CAP_AUDIT_WRITE",
+ },
+ },
+ Rlimits: []rspec.POSIXRlimit{
+ {
+ Type: "RLIMIT_NOFILE",
+ Hard: uint64(1024),
+ Soft: uint64(1024),
+ },
+ },
+ },
+ Hostname: "mrsdalloway",
+ Mounts: []rspec.Mount{
+ {
+ Destination: "/proc",
+ Type: "proc",
+ Source: "proc",
+ Options: nil,
+ },
+ {
+ Destination: "/dev",
+ Type: "tmpfs",
+ Source: "tmpfs",
+ Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"},
+ },
+ {
+ Destination: "/dev/pts",
+ Type: "devpts",
+ Source: "devpts",
+ Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"},
+ },
+ {
+ Destination: "/dev/shm",
+ Type: "tmpfs",
+ Source: "shm",
+ Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"},
+ },
+ {
+ Destination: "/dev/mqueue",
+ Type: "mqueue",
+ Source: "mqueue",
+ Options: []string{"nosuid", "noexec", "nodev"},
+ },
+ {
+ Destination: "/sys",
+ Type: "sysfs",
+ Source: "sysfs",
+ Options: []string{"nosuid", "noexec", "nodev", "ro"},
+ },
+ },
+ Linux: &rspec.Linux{
+ Resources: &rspec.LinuxResources{
+ Devices: []rspec.LinuxDeviceCgroup{
+ {
+ Allow: false,
+ Access: "rwm",
+ },
+ },
+ },
+ Namespaces: []rspec.LinuxNamespace{
+ {
+ Type: "pid",
+ },
+ {
+ Type: "network",
+ },
+ {
+ Type: "ipc",
+ },
+ {
+ Type: "uts",
+ },
+ {
+ Type: "mount",
+ },
+ },
+ Devices: []rspec.LinuxDevice{},
+ },
+ }
+ spec.Linux.Seccomp = seccomp.DefaultProfile(&spec)
+ return Generator{
+ spec: &spec,
+ }
+}
+
+// NewFromSpec creates a spec Generator from a given spec.
+func NewFromSpec(spec *rspec.Spec) Generator {
+ return Generator{
+ spec: spec,
+ }
+}
+
+// NewFromFile loads the template specified in a file into a spec Generator.
+func NewFromFile(path string) (Generator, error) {
+ cf, err := os.Open(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return Generator{}, fmt.Errorf("template configuration at %s not found", path)
+ }
+ }
+ defer cf.Close()
+
+ return NewFromTemplate(cf)
+}
+
+// NewFromTemplate loads the template from io.Reader into a spec Generator.
+func NewFromTemplate(r io.Reader) (Generator, error) {
+ var spec rspec.Spec
+ if err := json.NewDecoder(r).Decode(&spec); err != nil {
+ return Generator{}, err
+ }
+ return Generator{
+ spec: &spec,
+ }, nil
+}
+
+// SetSpec sets the spec in the Generator g.
+func (g *Generator) SetSpec(spec *rspec.Spec) {
+ g.spec = spec
+}
+
+// Spec gets the spec in the Generator g.
+func (g *Generator) Spec() *rspec.Spec {
+ return g.spec
+}
+
+// Save writes the spec into w.
+func (g *Generator) Save(w io.Writer, exportOpts ExportOptions) (err error) {
+ var data []byte
+
+ if g.spec.Linux != nil {
+ buf, err := json.Marshal(g.spec.Linux)
+ if err != nil {
+ return err
+ }
+ if string(buf) == "{}" {
+ g.spec.Linux = nil
+ }
+ }
+
+ if exportOpts.Seccomp {
+ data, err = json.MarshalIndent(g.spec.Linux.Seccomp, "", "\t")
+ } else {
+ data, err = json.MarshalIndent(g.spec, "", "\t")
+ }
+ if err != nil {
+ return err
+ }
+
+ _, err = w.Write(data)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// SaveToFile writes the spec into a file.
+func (g *Generator) SaveToFile(path string, exportOpts ExportOptions) error {
+ f, err := os.Create(path)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ return g.Save(f, exportOpts)
+}
+
+// SetVersion sets g.spec.Version.
+func (g *Generator) SetVersion(version string) {
+ g.initSpec()
+ g.spec.Version = version
+}
+
+// SetRootPath sets g.spec.Root.Path.
+func (g *Generator) SetRootPath(path string) {
+ g.initSpecRoot()
+ g.spec.Root.Path = path
+}
+
+// SetRootReadonly sets g.spec.Root.Readonly.
+func (g *Generator) SetRootReadonly(b bool) {
+ g.initSpecRoot()
+ g.spec.Root.Readonly = b
+}
+
+// SetHostname sets g.spec.Hostname.
+func (g *Generator) SetHostname(s string) {
+ g.initSpec()
+ g.spec.Hostname = s
+}
+
+// ClearAnnotations clears g.spec.Annotations.
+func (g *Generator) ClearAnnotations() {
+ if g.spec == nil {
+ return
+ }
+ g.spec.Annotations = make(map[string]string)
+}
+
+// AddAnnotation adds an annotation into g.spec.Annotations.
+func (g *Generator) AddAnnotation(key, value string) {
+ g.initSpecAnnotations()
+ g.spec.Annotations[key] = value
+}
+
+// RemoveAnnotation remove an annotation from g.spec.Annotations.
+func (g *Generator) RemoveAnnotation(key string) {
+ if g.spec == nil || g.spec.Annotations == nil {
+ return
+ }
+ delete(g.spec.Annotations, key)
+}
+
+// SetProcessConsoleSize sets g.spec.Process.ConsoleSize.
+func (g *Generator) SetProcessConsoleSize(width, height uint) {
+ g.initSpecProcessConsoleSize()
+ g.spec.Process.ConsoleSize.Width = width
+ g.spec.Process.ConsoleSize.Height = height
+}
+
+// SetProcessUID sets g.spec.Process.User.UID.
+func (g *Generator) SetProcessUID(uid uint32) {
+ g.initSpecProcess()
+ g.spec.Process.User.UID = uid
+}
+
+// SetProcessGID sets g.spec.Process.User.GID.
+func (g *Generator) SetProcessGID(gid uint32) {
+ g.initSpecProcess()
+ g.spec.Process.User.GID = gid
+}
+
+// SetProcessCwd sets g.spec.Process.Cwd.
+func (g *Generator) SetProcessCwd(cwd string) {
+ g.initSpecProcess()
+ g.spec.Process.Cwd = cwd
+}
+
+// SetProcessNoNewPrivileges sets g.spec.Process.NoNewPrivileges.
+func (g *Generator) SetProcessNoNewPrivileges(b bool) {
+ g.initSpecProcess()
+ g.spec.Process.NoNewPrivileges = b
+}
+
+// SetProcessTerminal sets g.spec.Process.Terminal.
+func (g *Generator) SetProcessTerminal(b bool) {
+ g.initSpecProcess()
+ g.spec.Process.Terminal = b
+}
+
+// SetProcessApparmorProfile sets g.spec.Process.ApparmorProfile.
+func (g *Generator) SetProcessApparmorProfile(prof string) {
+ g.initSpecProcess()
+ g.spec.Process.ApparmorProfile = prof
+}
+
+// SetProcessArgs sets g.spec.Process.Args.
+func (g *Generator) SetProcessArgs(args []string) {
+ g.initSpecProcess()
+ g.spec.Process.Args = args
+}
+
+// ClearProcessEnv clears g.spec.Process.Env.
+func (g *Generator) ClearProcessEnv() {
+ if g.spec == nil {
+ return
+ }
+ g.spec.Process.Env = []string{}
+}
+
+// AddProcessEnv adds name=value into g.spec.Process.Env, or replaces an
+// existing entry with the given name.
+func (g *Generator) AddProcessEnv(name, value string) {
+ g.initSpecProcess()
+
+ env := fmt.Sprintf("%s=%s", name, value)
+ for idx := range g.spec.Process.Env {
+ if strings.HasPrefix(g.spec.Process.Env[idx], name+"=") {
+ g.spec.Process.Env[idx] = env
+ return
+ }
+ }
+ g.spec.Process.Env = append(g.spec.Process.Env, env)
+}
+
+// AddProcessRlimits adds rlimit into g.spec.Process.Rlimits.
+func (g *Generator) AddProcessRlimits(rType string, rHard uint64, rSoft uint64) {
+ g.initSpecProcess()
+ for i, rlimit := range g.spec.Process.Rlimits {
+ if rlimit.Type == rType {
+ g.spec.Process.Rlimits[i].Hard = rHard
+ g.spec.Process.Rlimits[i].Soft = rSoft
+ return
+ }
+ }
+
+ newRlimit := rspec.POSIXRlimit{
+ Type: rType,
+ Hard: rHard,
+ Soft: rSoft,
+ }
+ g.spec.Process.Rlimits = append(g.spec.Process.Rlimits, newRlimit)
+}
+
+// RemoveProcessRlimits removes a rlimit from g.spec.Process.Rlimits.
+func (g *Generator) RemoveProcessRlimits(rType string) error {
+ if g.spec == nil {
+ return nil
+ }
+ for i, rlimit := range g.spec.Process.Rlimits {
+ if rlimit.Type == rType {
+ g.spec.Process.Rlimits = append(g.spec.Process.Rlimits[:i], g.spec.Process.Rlimits[i+1:]...)
+ return nil
+ }
+ }
+ return nil
+}
+
+// ClearProcessRlimits clear g.spec.Process.Rlimits.
+func (g *Generator) ClearProcessRlimits() {
+ if g.spec == nil {
+ return
+ }
+ g.spec.Process.Rlimits = []rspec.POSIXRlimit{}
+}
+
+// ClearProcessAdditionalGids clear g.spec.Process.AdditionalGids.
+func (g *Generator) ClearProcessAdditionalGids() {
+ if g.spec == nil {
+ return
+ }
+ g.spec.Process.User.AdditionalGids = []uint32{}
+}
+
+// AddProcessAdditionalGid adds an additional gid into g.spec.Process.AdditionalGids.
+func (g *Generator) AddProcessAdditionalGid(gid uint32) {
+ g.initSpecProcess()
+ for _, group := range g.spec.Process.User.AdditionalGids {
+ if group == gid {
+ return
+ }
+ }
+ g.spec.Process.User.AdditionalGids = append(g.spec.Process.User.AdditionalGids, gid)
+}
+
+// SetProcessSelinuxLabel sets g.spec.Process.SelinuxLabel.
+func (g *Generator) SetProcessSelinuxLabel(label string) {
+ g.initSpecProcess()
+ g.spec.Process.SelinuxLabel = label
+}
+
+// SetLinuxCgroupsPath sets g.spec.Linux.CgroupsPath.
+func (g *Generator) SetLinuxCgroupsPath(path string) {
+ g.initSpecLinux()
+ g.spec.Linux.CgroupsPath = path
+}
+
+// SetLinuxMountLabel sets g.spec.Linux.MountLabel.
+func (g *Generator) SetLinuxMountLabel(label string) {
+ g.initSpecLinux()
+ g.spec.Linux.MountLabel = label
+}
+
+// SetProcessOOMScoreAdj sets g.spec.Process.OOMScoreAdj.
+func (g *Generator) SetProcessOOMScoreAdj(adj int) {
+ g.initSpecProcess()
+ g.spec.Process.OOMScoreAdj = &adj
+}
+
+// SetLinuxResourcesCPUShares sets g.spec.Linux.Resources.CPU.Shares.
+func (g *Generator) SetLinuxResourcesCPUShares(shares uint64) {
+ g.initSpecLinuxResourcesCPU()
+ g.spec.Linux.Resources.CPU.Shares = &shares
+}
+
+// SetLinuxResourcesCPUQuota sets g.spec.Linux.Resources.CPU.Quota.
+func (g *Generator) SetLinuxResourcesCPUQuota(quota int64) {
+ g.initSpecLinuxResourcesCPU()
+ g.spec.Linux.Resources.CPU.Quota = &quota
+}
+
+// SetLinuxResourcesCPUPeriod sets g.spec.Linux.Resources.CPU.Period.
+func (g *Generator) SetLinuxResourcesCPUPeriod(period uint64) {
+ g.initSpecLinuxResourcesCPU()
+ g.spec.Linux.Resources.CPU.Period = &period
+}
+
+// SetLinuxResourcesCPURealtimeRuntime sets g.spec.Linux.Resources.CPU.RealtimeRuntime.
+func (g *Generator) SetLinuxResourcesCPURealtimeRuntime(time int64) {
+ g.initSpecLinuxResourcesCPU()
+ g.spec.Linux.Resources.CPU.RealtimeRuntime = &time
+}
+
+// SetLinuxResourcesCPURealtimePeriod sets g.spec.Linux.Resources.CPU.RealtimePeriod.
+func (g *Generator) SetLinuxResourcesCPURealtimePeriod(period uint64) {
+ g.initSpecLinuxResourcesCPU()
+ g.spec.Linux.Resources.CPU.RealtimePeriod = &period
+}
+
+// SetLinuxResourcesCPUCpus sets g.spec.Linux.Resources.CPU.Cpus.
+func (g *Generator) SetLinuxResourcesCPUCpus(cpus string) {
+ g.initSpecLinuxResourcesCPU()
+ g.spec.Linux.Resources.CPU.Cpus = cpus
+}
+
+// SetLinuxResourcesCPUMems sets g.spec.Linux.Resources.CPU.Mems.
+func (g *Generator) SetLinuxResourcesCPUMems(mems string) {
+ g.initSpecLinuxResourcesCPU()
+ g.spec.Linux.Resources.CPU.Mems = mems
+}
+
+// AddLinuxResourcesHugepageLimit adds or sets g.spec.Linux.Resources.HugepageLimits.
+func (g *Generator) AddLinuxResourcesHugepageLimit(pageSize string, limit uint64) {
+ hugepageLimit := rspec.LinuxHugepageLimit{
+ Pagesize: pageSize,
+ Limit: limit,
+ }
+
+ g.initSpecLinuxResources()
+ for i, pageLimit := range g.spec.Linux.Resources.HugepageLimits {
+ if pageLimit.Pagesize == pageSize {
+ g.spec.Linux.Resources.HugepageLimits[i].Limit = limit
+ return
+ }
+ }
+ g.spec.Linux.Resources.HugepageLimits = append(g.spec.Linux.Resources.HugepageLimits, hugepageLimit)
+}
+
+// DropLinuxResourcesHugepageLimit drops a hugepage limit from g.spec.Linux.Resources.HugepageLimits.
+func (g *Generator) DropLinuxResourcesHugepageLimit(pageSize string) error {
+ g.initSpecLinuxResources()
+ for i, pageLimit := range g.spec.Linux.Resources.HugepageLimits {
+ if pageLimit.Pagesize == pageSize {
+ g.spec.Linux.Resources.HugepageLimits = append(g.spec.Linux.Resources.HugepageLimits[:i], g.spec.Linux.Resources.HugepageLimits[i+1:]...)
+ return nil
+ }
+ }
+
+ return nil
+}
+
+// SetLinuxResourcesMemoryLimit sets g.spec.Linux.Resources.Memory.Limit.
+func (g *Generator) SetLinuxResourcesMemoryLimit(limit int64) {
+ g.initSpecLinuxResourcesMemory()
+ g.spec.Linux.Resources.Memory.Limit = &limit
+}
+
+// SetLinuxResourcesMemoryReservation sets g.spec.Linux.Resources.Memory.Reservation.
+func (g *Generator) SetLinuxResourcesMemoryReservation(reservation int64) {
+ g.initSpecLinuxResourcesMemory()
+ g.spec.Linux.Resources.Memory.Reservation = &reservation
+}
+
+// SetLinuxResourcesMemorySwap sets g.spec.Linux.Resources.Memory.Swap.
+func (g *Generator) SetLinuxResourcesMemorySwap(swap int64) {
+ g.initSpecLinuxResourcesMemory()
+ g.spec.Linux.Resources.Memory.Swap = &swap
+}
+
+// SetLinuxResourcesMemoryKernel sets g.spec.Linux.Resources.Memory.Kernel.
+func (g *Generator) SetLinuxResourcesMemoryKernel(kernel int64) {
+ g.initSpecLinuxResourcesMemory()
+ g.spec.Linux.Resources.Memory.Kernel = &kernel
+}
+
+// SetLinuxResourcesMemoryKernelTCP sets g.spec.Linux.Resources.Memory.KernelTCP.
+func (g *Generator) SetLinuxResourcesMemoryKernelTCP(kernelTCP int64) {
+ g.initSpecLinuxResourcesMemory()
+ g.spec.Linux.Resources.Memory.KernelTCP = &kernelTCP
+}
+
+// SetLinuxResourcesMemorySwappiness sets g.spec.Linux.Resources.Memory.Swappiness.
+func (g *Generator) SetLinuxResourcesMemorySwappiness(swappiness uint64) {
+ g.initSpecLinuxResourcesMemory()
+ g.spec.Linux.Resources.Memory.Swappiness = &swappiness
+}
+
+// SetLinuxResourcesMemoryDisableOOMKiller sets g.spec.Linux.Resources.Memory.DisableOOMKiller.
+func (g *Generator) SetLinuxResourcesMemoryDisableOOMKiller(disable bool) {
+ g.initSpecLinuxResourcesMemory()
+ g.spec.Linux.Resources.Memory.DisableOOMKiller = &disable
+}
+
+// SetLinuxResourcesNetworkClassID sets g.spec.Linux.Resources.Network.ClassID.
+func (g *Generator) SetLinuxResourcesNetworkClassID(classid uint32) {
+ g.initSpecLinuxResourcesNetwork()
+ g.spec.Linux.Resources.Network.ClassID = &classid
+}
+
+// AddLinuxResourcesNetworkPriorities adds or sets g.spec.Linux.Resources.Network.Priorities.
+func (g *Generator) AddLinuxResourcesNetworkPriorities(name string, prio uint32) {
+ g.initSpecLinuxResourcesNetwork()
+ for i, netPriority := range g.spec.Linux.Resources.Network.Priorities {
+ if netPriority.Name == name {
+ g.spec.Linux.Resources.Network.Priorities[i].Priority = prio
+ return
+ }
+ }
+ interfacePrio := new(rspec.LinuxInterfacePriority)
+ interfacePrio.Name = name
+ interfacePrio.Priority = prio
+ g.spec.Linux.Resources.Network.Priorities = append(g.spec.Linux.Resources.Network.Priorities, *interfacePrio)
+}
+
+// DropLinuxResourcesNetworkPriorities drops one item from g.spec.Linux.Resources.Network.Priorities.
+func (g *Generator) DropLinuxResourcesNetworkPriorities(name string) {
+ g.initSpecLinuxResourcesNetwork()
+ for i, netPriority := range g.spec.Linux.Resources.Network.Priorities {
+ if netPriority.Name == name {
+ g.spec.Linux.Resources.Network.Priorities = append(g.spec.Linux.Resources.Network.Priorities[:i], g.spec.Linux.Resources.Network.Priorities[i+1:]...)
+ return
+ }
+ }
+}
+
+// SetLinuxResourcesPidsLimit sets g.spec.Linux.Resources.Pids.Limit.
+func (g *Generator) SetLinuxResourcesPidsLimit(limit int64) {
+ g.initSpecLinuxResourcesPids()
+ g.spec.Linux.Resources.Pids.Limit = limit
+}
+
+// ClearLinuxSysctl clears g.spec.Linux.Sysctl.
+func (g *Generator) ClearLinuxSysctl() {
+ if g.spec == nil || g.spec.Linux == nil {
+ return
+ }
+ g.spec.Linux.Sysctl = make(map[string]string)
+}
+
+// AddLinuxSysctl adds a new sysctl config into g.spec.Linux.Sysctl.
+func (g *Generator) AddLinuxSysctl(key, value string) {
+ g.initSpecLinuxSysctl()
+ g.spec.Linux.Sysctl[key] = value
+}
+
+// RemoveLinuxSysctl removes a sysctl config from g.spec.Linux.Sysctl.
+func (g *Generator) RemoveLinuxSysctl(key string) {
+ if g.spec == nil || g.spec.Linux == nil || g.spec.Linux.Sysctl == nil {
+ return
+ }
+ delete(g.spec.Linux.Sysctl, key)
+}
+
+// ClearLinuxUIDMappings clear g.spec.Linux.UIDMappings.
+func (g *Generator) ClearLinuxUIDMappings() {
+ if g.spec == nil || g.spec.Linux == nil {
+ return
+ }
+ g.spec.Linux.UIDMappings = []rspec.LinuxIDMapping{}
+}
+
+// AddLinuxUIDMapping adds uidMap into g.spec.Linux.UIDMappings.
+func (g *Generator) AddLinuxUIDMapping(hid, cid, size uint32) {
+ idMapping := rspec.LinuxIDMapping{
+ HostID: hid,
+ ContainerID: cid,
+ Size: size,
+ }
+
+ g.initSpecLinux()
+ g.spec.Linux.UIDMappings = append(g.spec.Linux.UIDMappings, idMapping)
+}
+
+// ClearLinuxGIDMappings clear g.spec.Linux.GIDMappings.
+func (g *Generator) ClearLinuxGIDMappings() {
+ if g.spec == nil || g.spec.Linux == nil {
+ return
+ }
+ g.spec.Linux.GIDMappings = []rspec.LinuxIDMapping{}
+}
+
+// AddLinuxGIDMapping adds gidMap into g.spec.Linux.GIDMappings.
+func (g *Generator) AddLinuxGIDMapping(hid, cid, size uint32) {
+ idMapping := rspec.LinuxIDMapping{
+ HostID: hid,
+ ContainerID: cid,
+ Size: size,
+ }
+
+ g.initSpecLinux()
+ g.spec.Linux.GIDMappings = append(g.spec.Linux.GIDMappings, idMapping)
+}
+
+// SetLinuxRootPropagation sets g.spec.Linux.RootfsPropagation.
+func (g *Generator) SetLinuxRootPropagation(rp string) error {
+ switch rp {
+ case "":
+ case "private":
+ case "rprivate":
+ case "slave":
+ case "rslave":
+ case "shared":
+ case "rshared":
+ default:
+ return fmt.Errorf("rootfs-propagation must be empty or one of private|rprivate|slave|rslave|shared|rshared")
+ }
+ g.initSpecLinux()
+ g.spec.Linux.RootfsPropagation = rp
+ return nil
+}
+
+// ClearPreStartHooks clear g.spec.Hooks.Prestart.
+func (g *Generator) ClearPreStartHooks() {
+ if g.spec == nil {
+ return
+ }
+ if g.spec.Hooks == nil {
+ return
+ }
+ g.spec.Hooks.Prestart = []rspec.Hook{}
+}
+
+// AddPreStartHook add a prestart hook into g.spec.Hooks.Prestart.
+func (g *Generator) AddPreStartHook(path string, args []string) {
+ g.initSpecHooks()
+ hook := rspec.Hook{Path: path, Args: args}
+ for i, hook := range g.spec.Hooks.Prestart {
+ if hook.Path == path {
+ g.spec.Hooks.Prestart[i] = hook
+ return
+ }
+ }
+ g.spec.Hooks.Prestart = append(g.spec.Hooks.Prestart, hook)
+}
+
+// AddPreStartHookEnv adds envs of a prestart hook into g.spec.Hooks.Prestart.
+func (g *Generator) AddPreStartHookEnv(path string, envs []string) {
+ g.initSpecHooks()
+ for i, hook := range g.spec.Hooks.Prestart {
+ if hook.Path == path {
+ g.spec.Hooks.Prestart[i].Env = envs
+ return
+ }
+ }
+ hook := rspec.Hook{Path: path, Env: envs}
+ g.spec.Hooks.Prestart = append(g.spec.Hooks.Prestart, hook)
+}
+
+// AddPreStartHookTimeout adds timeout of a prestart hook into g.spec.Hooks.Prestart.
+func (g *Generator) AddPreStartHookTimeout(path string, timeout int) {
+ g.initSpecHooks()
+ for i, hook := range g.spec.Hooks.Prestart {
+ if hook.Path == path {
+ g.spec.Hooks.Prestart[i].Timeout = &timeout
+ return
+ }
+ }
+ hook := rspec.Hook{Path: path, Timeout: &timeout}
+ g.spec.Hooks.Prestart = append(g.spec.Hooks.Prestart, hook)
+}
+
+// ClearPostStopHooks clear g.spec.Hooks.Poststop.
+func (g *Generator) ClearPostStopHooks() {
+ if g.spec == nil {
+ return
+ }
+ if g.spec.Hooks == nil {
+ return
+ }
+ g.spec.Hooks.Poststop = []rspec.Hook{}
+}
+
+// AddPostStopHook adds a poststop hook into g.spec.Hooks.Poststop.
+func (g *Generator) AddPostStopHook(path string, args []string) {
+ g.initSpecHooks()
+ hook := rspec.Hook{Path: path, Args: args}
+ for i, hook := range g.spec.Hooks.Poststop {
+ if hook.Path == path {
+ g.spec.Hooks.Poststop[i] = hook
+ return
+ }
+ }
+ g.spec.Hooks.Poststop = append(g.spec.Hooks.Poststop, hook)
+}
+
+// AddPostStopHookEnv adds envs of a poststop hook into g.spec.Hooks.Poststop.
+func (g *Generator) AddPostStopHookEnv(path string, envs []string) {
+ g.initSpecHooks()
+ for i, hook := range g.spec.Hooks.Poststop {
+ if hook.Path == path {
+ g.spec.Hooks.Poststop[i].Env = envs
+ return
+ }
+ }
+ hook := rspec.Hook{Path: path, Env: envs}
+ g.spec.Hooks.Poststop = append(g.spec.Hooks.Poststop, hook)
+}
+
+// AddPostStopHookTimeout adds timeout of a poststop hook into g.spec.Hooks.Poststop.
+func (g *Generator) AddPostStopHookTimeout(path string, timeout int) {
+ g.initSpecHooks()
+ for i, hook := range g.spec.Hooks.Poststop {
+ if hook.Path == path {
+ g.spec.Hooks.Poststop[i].Timeout = &timeout
+ return
+ }
+ }
+ hook := rspec.Hook{Path: path, Timeout: &timeout}
+ g.spec.Hooks.Poststop = append(g.spec.Hooks.Poststop, hook)
+}
+
+// ClearPostStartHooks clear g.spec.Hooks.Poststart.
+func (g *Generator) ClearPostStartHooks() {
+ if g.spec == nil {
+ return
+ }
+ if g.spec.Hooks == nil {
+ return
+ }
+ g.spec.Hooks.Poststart = []rspec.Hook{}
+}
+
+// AddPostStartHook adds a poststart hook into g.spec.Hooks.Poststart.
+func (g *Generator) AddPostStartHook(path string, args []string) {
+ g.initSpecHooks()
+ hook := rspec.Hook{Path: path, Args: args}
+ for i, hook := range g.spec.Hooks.Poststart {
+ if hook.Path == path {
+ g.spec.Hooks.Poststart[i] = hook
+ return
+ }
+ }
+ g.spec.Hooks.Poststart = append(g.spec.Hooks.Poststart, hook)
+}
+
+// AddPostStartHookEnv adds envs of a poststart hook into g.spec.Hooks.Poststart.
+func (g *Generator) AddPostStartHookEnv(path string, envs []string) {
+ g.initSpecHooks()
+ for i, hook := range g.spec.Hooks.Poststart {
+ if hook.Path == path {
+ g.spec.Hooks.Poststart[i].Env = envs
+ return
+ }
+ }
+ hook := rspec.Hook{Path: path, Env: envs}
+ g.spec.Hooks.Poststart = append(g.spec.Hooks.Poststart, hook)
+}
+
+// AddPostStartHookTimeout adds timeout of a poststart hook into g.spec.Hooks.Poststart.
+func (g *Generator) AddPostStartHookTimeout(path string, timeout int) {
+ g.initSpecHooks()
+ for i, hook := range g.spec.Hooks.Poststart {
+ if hook.Path == path {
+ g.spec.Hooks.Poststart[i].Timeout = &timeout
+ return
+ }
+ }
+ hook := rspec.Hook{Path: path, Timeout: &timeout}
+ g.spec.Hooks.Poststart = append(g.spec.Hooks.Poststart, hook)
+}
+
+// AddTmpfsMount adds a tmpfs mount into g.spec.Mounts.
+func (g *Generator) AddTmpfsMount(dest string, options []string) {
+ mnt := rspec.Mount{
+ Destination: dest,
+ Type: "tmpfs",
+ Source: "tmpfs",
+ Options: options,
+ }
+
+ g.initSpec()
+ g.spec.Mounts = append(g.spec.Mounts, mnt)
+}
+
+// AddCgroupsMount adds a cgroup mount into g.spec.Mounts.
+func (g *Generator) AddCgroupsMount(mountCgroupOption string) error {
+ switch mountCgroupOption {
+ case "ro":
+ case "rw":
+ case "no":
+ return nil
+ default:
+ return fmt.Errorf("--mount-cgroups should be one of (ro,rw,no)")
+ }
+
+ mnt := rspec.Mount{
+ Destination: "/sys/fs/cgroup",
+ Type: "cgroup",
+ Source: "cgroup",
+ Options: []string{"nosuid", "noexec", "nodev", "relatime", mountCgroupOption},
+ }
+ g.initSpec()
+ g.spec.Mounts = append(g.spec.Mounts, mnt)
+
+ return nil
+}
+
+// AddBindMount adds a bind mount into g.spec.Mounts.
+func (g *Generator) AddBindMount(source, dest string, options []string) {
+ if len(options) == 0 {
+ options = []string{"rw"}
+ }
+
+ // We have to make sure that there is a bind option set, otherwise it won't
+ // be an actual bindmount.
+ foundBindOption := false
+ for _, opt := range options {
+ if opt == "bind" || opt == "rbind" {
+ foundBindOption = true
+ break
+ }
+ }
+ if !foundBindOption {
+ options = append(options, "bind")
+ }
+
+ mnt := rspec.Mount{
+ Destination: dest,
+ Type: "bind",
+ Source: source,
+ Options: options,
+ }
+ g.initSpec()
+ g.spec.Mounts = append(g.spec.Mounts, mnt)
+}
+
+// SetupPrivileged sets up the privilege-related fields inside g.spec.
+func (g *Generator) SetupPrivileged(privileged bool) {
+ if privileged { // Add all capabilities in privileged mode.
+ var finalCapList []string
+ for _, cap := range capability.List() {
+ if g.HostSpecific && cap > validate.LastCap() {
+ continue
+ }
+ finalCapList = append(finalCapList, fmt.Sprintf("CAP_%s", strings.ToUpper(cap.String())))
+ }
+ g.initSpecLinux()
+ g.initSpecProcessCapabilities()
+ g.ClearProcessCapabilities()
+ g.spec.Process.Capabilities.Bounding = append(g.spec.Process.Capabilities.Bounding, finalCapList...)
+ g.spec.Process.Capabilities.Effective = append(g.spec.Process.Capabilities.Effective, finalCapList...)
+ g.spec.Process.Capabilities.Inheritable = append(g.spec.Process.Capabilities.Inheritable, finalCapList...)
+ g.spec.Process.Capabilities.Permitted = append(g.spec.Process.Capabilities.Permitted, finalCapList...)
+ g.spec.Process.Capabilities.Ambient = append(g.spec.Process.Capabilities.Ambient, finalCapList...)
+ g.spec.Process.SelinuxLabel = ""
+ g.spec.Process.ApparmorProfile = ""
+ g.spec.Linux.Seccomp = nil
+ }
+}
+
+// ClearProcessCapabilities clear g.spec.Process.Capabilities.
+func (g *Generator) ClearProcessCapabilities() {
+ if g.spec == nil {
+ return
+ }
+ g.spec.Process.Capabilities.Bounding = []string{}
+ g.spec.Process.Capabilities.Effective = []string{}
+ g.spec.Process.Capabilities.Inheritable = []string{}
+ g.spec.Process.Capabilities.Permitted = []string{}
+ g.spec.Process.Capabilities.Ambient = []string{}
+}
+
+// AddProcessCapability adds a process capability into g.spec.Process.Capabilities.
+func (g *Generator) AddProcessCapability(c string) error {
+ cp := strings.ToUpper(c)
+ if err := validate.CapValid(cp, g.HostSpecific); err != nil {
+ return err
+ }
+
+ g.initSpecProcessCapabilities()
+
+ var foundBounding bool
+ for _, cap := range g.spec.Process.Capabilities.Bounding {
+ if strings.ToUpper(cap) == cp {
+ foundBounding = true
+ break
+ }
+ }
+ if !foundBounding {
+ g.spec.Process.Capabilities.Bounding = append(g.spec.Process.Capabilities.Bounding, cp)
+ }
+
+ var foundEffective bool
+ for _, cap := range g.spec.Process.Capabilities.Effective {
+ if strings.ToUpper(cap) == cp {
+ foundEffective = true
+ break
+ }
+ }
+ if !foundEffective {
+ g.spec.Process.Capabilities.Effective = append(g.spec.Process.Capabilities.Effective, cp)
+ }
+
+ var foundInheritable bool
+ for _, cap := range g.spec.Process.Capabilities.Inheritable {
+ if strings.ToUpper(cap) == cp {
+ foundInheritable = true
+ break
+ }
+ }
+ if !foundInheritable {
+ g.spec.Process.Capabilities.Inheritable = append(g.spec.Process.Capabilities.Inheritable, cp)
+ }
+
+ var foundPermitted bool
+ for _, cap := range g.spec.Process.Capabilities.Permitted {
+ if strings.ToUpper(cap) == cp {
+ foundPermitted = true
+ break
+ }
+ }
+ if !foundPermitted {
+ g.spec.Process.Capabilities.Permitted = append(g.spec.Process.Capabilities.Permitted, cp)
+ }
+
+ var foundAmbient bool
+ for _, cap := range g.spec.Process.Capabilities.Ambient {
+ if strings.ToUpper(cap) == cp {
+ foundAmbient = true
+ break
+ }
+ }
+ if !foundAmbient {
+ g.spec.Process.Capabilities.Ambient = append(g.spec.Process.Capabilities.Ambient, cp)
+ }
+
+ return nil
+}
+
+// DropProcessCapability drops a process capability from g.spec.Process.Capabilities.
+func (g *Generator) DropProcessCapability(c string) error {
+ cp := strings.ToUpper(c)
+ if err := validate.CapValid(cp, g.HostSpecific); err != nil {
+ return err
+ }
+
+ g.initSpecProcessCapabilities()
+
+ // we don't care about order...and this is way faster...
+ removeFunc := func(s []string, i int) []string {
+ s[i] = s[len(s)-1]
+ return s[:len(s)-1]
+ }
+
+ for i, cap := range g.spec.Process.Capabilities.Bounding {
+ if strings.ToUpper(cap) == cp {
+ g.spec.Process.Capabilities.Bounding = removeFunc(g.spec.Process.Capabilities.Bounding, i)
+ }
+ }
+
+ for i, cap := range g.spec.Process.Capabilities.Effective {
+ if strings.ToUpper(cap) == cp {
+ g.spec.Process.Capabilities.Effective = removeFunc(g.spec.Process.Capabilities.Effective, i)
+ }
+ }
+
+ for i, cap := range g.spec.Process.Capabilities.Inheritable {
+ if strings.ToUpper(cap) == cp {
+ g.spec.Process.Capabilities.Inheritable = removeFunc(g.spec.Process.Capabilities.Inheritable, i)
+ }
+ }
+
+ for i, cap := range g.spec.Process.Capabilities.Permitted {
+ if strings.ToUpper(cap) == cp {
+ g.spec.Process.Capabilities.Permitted = removeFunc(g.spec.Process.Capabilities.Permitted, i)
+ }
+ }
+
+ for i, cap := range g.spec.Process.Capabilities.Ambient {
+ if strings.ToUpper(cap) == cp {
+ g.spec.Process.Capabilities.Ambient = removeFunc(g.spec.Process.Capabilities.Ambient, i)
+ }
+ }
+
+ return nil
+}
+
+func mapStrToNamespace(ns string, path string) (rspec.LinuxNamespace, error) {
+ switch ns {
+ case "network":
+ return rspec.LinuxNamespace{Type: rspec.NetworkNamespace, Path: path}, nil
+ case "pid":
+ return rspec.LinuxNamespace{Type: rspec.PIDNamespace, Path: path}, nil
+ case "mount":
+ return rspec.LinuxNamespace{Type: rspec.MountNamespace, Path: path}, nil
+ case "ipc":
+ return rspec.LinuxNamespace{Type: rspec.IPCNamespace, Path: path}, nil
+ case "uts":
+ return rspec.LinuxNamespace{Type: rspec.UTSNamespace, Path: path}, nil
+ case "user":
+ return rspec.LinuxNamespace{Type: rspec.UserNamespace, Path: path}, nil
+ case "cgroup":
+ return rspec.LinuxNamespace{Type: rspec.CgroupNamespace, Path: path}, nil
+ default:
+ return rspec.LinuxNamespace{}, fmt.Errorf("unrecognized namespace %q", ns)
+ }
+}
+
+// ClearLinuxNamespaces clear g.spec.Linux.Namespaces.
+func (g *Generator) ClearLinuxNamespaces() {
+ if g.spec == nil || g.spec.Linux == nil {
+ return
+ }
+ g.spec.Linux.Namespaces = []rspec.LinuxNamespace{}
+}
+
+// AddOrReplaceLinuxNamespace adds or replaces a namespace inside
+// g.spec.Linux.Namespaces.
+func (g *Generator) AddOrReplaceLinuxNamespace(ns string, path string) error {
+ namespace, err := mapStrToNamespace(ns, path)
+ if err != nil {
+ return err
+ }
+
+ g.initSpecLinux()
+ for i, ns := range g.spec.Linux.Namespaces {
+ if ns.Type == namespace.Type {
+ g.spec.Linux.Namespaces[i] = namespace
+ return nil
+ }
+ }
+ g.spec.Linux.Namespaces = append(g.spec.Linux.Namespaces, namespace)
+ return nil
+}
+
+// RemoveLinuxNamespace removes a namespace from g.spec.Linux.Namespaces.
+func (g *Generator) RemoveLinuxNamespace(ns string) error {
+ namespace, err := mapStrToNamespace(ns, "")
+ if err != nil {
+ return err
+ }
+
+ if g.spec == nil || g.spec.Linux == nil {
+ return nil
+ }
+ for i, ns := range g.spec.Linux.Namespaces {
+ if ns.Type == namespace.Type {
+ g.spec.Linux.Namespaces = append(g.spec.Linux.Namespaces[:i], g.spec.Linux.Namespaces[i+1:]...)
+ return nil
+ }
+ }
+ return nil
+}
+
+// AddDevice - add a device into g.spec.Linux.Devices
+func (g *Generator) AddDevice(device rspec.LinuxDevice) {
+ g.initSpecLinux()
+
+ for i, dev := range g.spec.Linux.Devices {
+ if dev.Path == device.Path {
+ g.spec.Linux.Devices[i] = device
+ return
+ }
+ if dev.Type == device.Type && dev.Major == device.Major && dev.Minor == device.Minor {
+ fmt.Fprintln(os.Stderr, "WARNING: The same type, major and minor should not be used for multiple devices.")
+ }
+ }
+
+ g.spec.Linux.Devices = append(g.spec.Linux.Devices, device)
+}
+
+// RemoveDevice remove a device from g.spec.Linux.Devices
+func (g *Generator) RemoveDevice(path string) error {
+ if g.spec == nil || g.spec.Linux == nil || g.spec.Linux.Devices == nil {
+ return nil
+ }
+
+ for i, device := range g.spec.Linux.Devices {
+ if device.Path == path {
+ g.spec.Linux.Devices = append(g.spec.Linux.Devices[:i], g.spec.Linux.Devices[i+1:]...)
+ return nil
+ }
+ }
+ return nil
+}
+
+// ClearLinuxDevices clears g.spec.Linux.Devices
+func (g *Generator) ClearLinuxDevices() {
+ if g.spec == nil || g.spec.Linux == nil || g.spec.Linux.Devices == nil {
+ return
+ }
+
+ g.spec.Linux.Devices = []rspec.LinuxDevice{}
+}
+
+// strPtr returns the pointer pointing to the string s.
+func strPtr(s string) *string { return &s }
+
+// SetSyscallAction adds rules for syscalls with the specified action
+func (g *Generator) SetSyscallAction(arguments seccomp.SyscallOpts) error {
+ g.initSpecLinuxSeccomp()
+ return seccomp.ParseSyscallFlag(arguments, g.spec.Linux.Seccomp)
+}
+
+// SetDefaultSeccompAction sets the default action for all syscalls not defined
+// and then removes any syscall rules with this action already specified.
+func (g *Generator) SetDefaultSeccompAction(action string) error {
+ g.initSpecLinuxSeccomp()
+ return seccomp.ParseDefaultAction(action, g.spec.Linux.Seccomp)
+}
+
+// SetDefaultSeccompActionForce only sets the default action for all syscalls not defined
+func (g *Generator) SetDefaultSeccompActionForce(action string) error {
+ g.initSpecLinuxSeccomp()
+ return seccomp.ParseDefaultActionForce(action, g.spec.Linux.Seccomp)
+}
+
+// SetSeccompArchitecture sets the supported seccomp architectures
+func (g *Generator) SetSeccompArchitecture(architecture string) error {
+ g.initSpecLinuxSeccomp()
+ return seccomp.ParseArchitectureFlag(architecture, g.spec.Linux.Seccomp)
+}
+
+// RemoveSeccompRule removes rules for any specified syscalls
+func (g *Generator) RemoveSeccompRule(arguments string) error {
+ g.initSpecLinuxSeccomp()
+ return seccomp.RemoveAction(arguments, g.spec.Linux.Seccomp)
+}
+
+// RemoveAllSeccompRules removes all syscall rules
+func (g *Generator) RemoveAllSeccompRules() error {
+ g.initSpecLinuxSeccomp()
+ return seccomp.RemoveAllSeccompRules(g.spec.Linux.Seccomp)
+}
+
+// AddLinuxMaskedPaths adds masked paths into g.spec.Linux.MaskedPaths.
+func (g *Generator) AddLinuxMaskedPaths(path string) {
+ g.initSpecLinux()
+ g.spec.Linux.MaskedPaths = append(g.spec.Linux.MaskedPaths, path)
+}
+
+// AddLinuxReadonlyPaths adds readonly paths into g.spec.Linux.MaskedPaths.
+func (g *Generator) AddLinuxReadonlyPaths(path string) {
+ g.initSpecLinux()
+ g.spec.Linux.ReadonlyPaths = append(g.spec.Linux.ReadonlyPaths, path)
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/consts.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/consts.go
new file mode 100644
index 000000000..eade5718e
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/consts.go
@@ -0,0 +1,12 @@
+package seccomp
+
+const (
+ seccompOverwrite = "overwrite"
+ seccompAppend = "append"
+ nothing = "nothing"
+ kill = "kill"
+ trap = "trap"
+ trace = "trace"
+ allow = "allow"
+ errno = "errno"
+)
diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_action.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_action.go
new file mode 100644
index 000000000..25daf0752
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_action.go
@@ -0,0 +1,135 @@
+package seccomp
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ rspec "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// SyscallOpts contain options for parsing syscall rules
+type SyscallOpts struct {
+ Action string
+ Syscall string
+ Index string
+ Value string
+ ValueTwo string
+ Operator string
+}
+
+// ParseSyscallFlag takes a SyscallOpts struct and the seccomp configuration
+// and sets the new syscall rule accordingly
+func ParseSyscallFlag(args SyscallOpts, config *rspec.LinuxSeccomp) error {
+ var arguments []string
+ if args.Index != "" && args.Value != "" && args.ValueTwo != "" && args.Operator != "" {
+ arguments = []string{args.Action, args.Syscall, args.Index, args.Value,
+ args.ValueTwo, args.Operator}
+ } else {
+ arguments = []string{args.Action, args.Syscall}
+ }
+
+ action, _ := parseAction(arguments[0])
+ if action == config.DefaultAction && args.argsAreEmpty() {
+ // default already set, no need to make changes
+ return nil
+ }
+
+ var newSyscall rspec.LinuxSyscall
+ numOfArgs := len(arguments)
+ if numOfArgs == 6 || numOfArgs == 2 {
+ argStruct, err := parseArguments(arguments[1:])
+ if err != nil {
+ return err
+ }
+ newSyscall = newSyscallStruct(arguments[1], action, argStruct)
+ } else {
+ return fmt.Errorf("incorrect number of arguments to ParseSyscall: %d", numOfArgs)
+ }
+
+ descison, err := decideCourseOfAction(&newSyscall, config.Syscalls)
+ if err != nil {
+ return err
+ }
+ delimDescison := strings.Split(descison, ":")
+
+ if delimDescison[0] == seccompAppend {
+ config.Syscalls = append(config.Syscalls, newSyscall)
+ }
+
+ if delimDescison[0] == seccompOverwrite {
+ indexForOverwrite, err := strconv.ParseInt(delimDescison[1], 10, 32)
+ if err != nil {
+ return err
+ }
+ config.Syscalls[indexForOverwrite] = newSyscall
+ }
+
+ return nil
+}
+
+var actions = map[string]rspec.LinuxSeccompAction{
+ "allow": rspec.ActAllow,
+ "errno": rspec.ActErrno,
+ "kill": rspec.ActKill,
+ "trace": rspec.ActTrace,
+ "trap": rspec.ActTrap,
+}
+
+// Take passed action, return the SCMP_ACT_<ACTION> version of it
+func parseAction(action string) (rspec.LinuxSeccompAction, error) {
+ a, ok := actions[action]
+ if !ok {
+ return "", fmt.Errorf("unrecognized action: %s", action)
+ }
+ return a, nil
+}
+
+// ParseDefaultAction sets the default action of the seccomp configuration
+// and then removes any rules that were already specified with this action
+func ParseDefaultAction(action string, config *rspec.LinuxSeccomp) error {
+ if action == "" {
+ return nil
+ }
+
+ defaultAction, err := parseAction(action)
+ if err != nil {
+ return err
+ }
+ config.DefaultAction = defaultAction
+ err = RemoveAllMatchingRules(config, defaultAction)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// ParseDefaultActionForce simply sets the default action of the seccomp configuration
+func ParseDefaultActionForce(action string, config *rspec.LinuxSeccomp) error {
+ if action == "" {
+ return nil
+ }
+
+ defaultAction, err := parseAction(action)
+ if err != nil {
+ return err
+ }
+ config.DefaultAction = defaultAction
+ return nil
+}
+
+func newSyscallStruct(name string, action rspec.LinuxSeccompAction, args []rspec.LinuxSeccompArg) rspec.LinuxSyscall {
+ syscallStruct := rspec.LinuxSyscall{
+ Names: []string{name},
+ Action: action,
+ Args: args,
+ }
+ return syscallStruct
+}
+
+func (s SyscallOpts) argsAreEmpty() bool {
+ return (s.Index == "" &&
+ s.Value == "" &&
+ s.ValueTwo == "" &&
+ s.Operator == "")
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_architecture.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_architecture.go
new file mode 100644
index 000000000..9b2bdfd2f
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_architecture.go
@@ -0,0 +1,55 @@
+package seccomp
+
+import (
+ "fmt"
+
+ rspec "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// ParseArchitectureFlag takes the raw string passed with the --arch flag, parses it
+// and updates the Seccomp config accordingly
+func ParseArchitectureFlag(architectureArg string, config *rspec.LinuxSeccomp) error {
+ correctedArch, err := parseArch(architectureArg)
+ if err != nil {
+ return err
+ }
+
+ shouldAppend := true
+ for _, alreadySpecified := range config.Architectures {
+ if correctedArch == alreadySpecified {
+ shouldAppend = false
+ }
+ }
+ if shouldAppend {
+ config.Architectures = append(config.Architectures, correctedArch)
+ }
+ return nil
+}
+
+func parseArch(arch string) (rspec.Arch, error) {
+ arches := map[string]rspec.Arch{
+ "x86": rspec.ArchX86,
+ "amd64": rspec.ArchX86_64,
+ "x32": rspec.ArchX32,
+ "arm": rspec.ArchARM,
+ "arm64": rspec.ArchAARCH64,
+ "mips": rspec.ArchMIPS,
+ "mips64": rspec.ArchMIPS64,
+ "mips64n32": rspec.ArchMIPS64N32,
+ "mipsel": rspec.ArchMIPSEL,
+ "mipsel64": rspec.ArchMIPSEL64,
+ "mipsel64n32": rspec.ArchMIPSEL64N32,
+ "parisc": rspec.ArchPARISC,
+ "parisc64": rspec.ArchPARISC64,
+ "ppc": rspec.ArchPPC,
+ "ppc64": rspec.ArchPPC64,
+ "ppc64le": rspec.ArchPPC64LE,
+ "s390": rspec.ArchS390,
+ "s390x": rspec.ArchS390X,
+ }
+ a, ok := arches[arch]
+ if !ok {
+ return "", fmt.Errorf("unrecognized architecture: %s", arch)
+ }
+ return a, nil
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_arguments.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_arguments.go
new file mode 100644
index 000000000..2b4c394e6
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_arguments.go
@@ -0,0 +1,73 @@
+package seccomp
+
+import (
+ "fmt"
+ "strconv"
+
+ rspec "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// parseArguments takes a list of arguments (delimArgs). It parses and fills out
+// the argument information and returns a slice of arg structs
+func parseArguments(delimArgs []string) ([]rspec.LinuxSeccompArg, error) {
+ nilArgSlice := []rspec.LinuxSeccompArg{}
+ numberOfArgs := len(delimArgs)
+
+ // No parameters passed with syscall
+ if numberOfArgs == 1 {
+ return nilArgSlice, nil
+ }
+
+ // Correct number of parameters passed with syscall
+ if numberOfArgs == 5 {
+ syscallIndex, err := strconv.ParseUint(delimArgs[1], 10, 0)
+ if err != nil {
+ return nilArgSlice, err
+ }
+
+ syscallValue, err := strconv.ParseUint(delimArgs[2], 10, 64)
+ if err != nil {
+ return nilArgSlice, err
+ }
+
+ syscallValueTwo, err := strconv.ParseUint(delimArgs[3], 10, 64)
+ if err != nil {
+ return nilArgSlice, err
+ }
+
+ syscallOp, err := parseOperator(delimArgs[4])
+ if err != nil {
+ return nilArgSlice, err
+ }
+
+ argStruct := rspec.LinuxSeccompArg{
+ Index: uint(syscallIndex),
+ Value: syscallValue,
+ ValueTwo: syscallValueTwo,
+ Op: syscallOp,
+ }
+
+ argSlice := []rspec.LinuxSeccompArg{}
+ argSlice = append(argSlice, argStruct)
+ return argSlice, nil
+ }
+
+ return nilArgSlice, fmt.Errorf("incorrect number of arguments passed with syscall: %d", numberOfArgs)
+}
+
+func parseOperator(operator string) (rspec.LinuxSeccompOperator, error) {
+ operators := map[string]rspec.LinuxSeccompOperator{
+ "NE": rspec.OpNotEqual,
+ "LT": rspec.OpLessThan,
+ "LE": rspec.OpLessEqual,
+ "EQ": rspec.OpEqualTo,
+ "GE": rspec.OpGreaterEqual,
+ "GT": rspec.OpGreaterThan,
+ "ME": rspec.OpMaskedEqual,
+ }
+ o, ok := operators[operator]
+ if !ok {
+ return "", fmt.Errorf("unrecognized operator: %s", operator)
+ }
+ return o, nil
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_remove.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_remove.go
new file mode 100644
index 000000000..59537d49c
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_remove.go
@@ -0,0 +1,52 @@
+package seccomp
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ rspec "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// RemoveAction takes the argument string that was passed with the --remove flag,
+// parses it, and updates the Seccomp config accordingly
+func RemoveAction(arguments string, config *rspec.LinuxSeccomp) error {
+ if config == nil {
+ return fmt.Errorf("Cannot remove action from nil Seccomp pointer")
+ }
+
+ syscallsToRemove := strings.Split(arguments, ",")
+
+ for counter, syscallStruct := range config.Syscalls {
+ if reflect.DeepEqual(syscallsToRemove, syscallStruct.Names) {
+ config.Syscalls = append(config.Syscalls[:counter], config.Syscalls[counter+1:]...)
+ }
+ }
+
+ return nil
+}
+
+// RemoveAllSeccompRules removes all seccomp syscall rules
+func RemoveAllSeccompRules(config *rspec.LinuxSeccomp) error {
+ if config == nil {
+ return fmt.Errorf("Cannot remove action from nil Seccomp pointer")
+ }
+ newSyscallSlice := []rspec.LinuxSyscall{}
+ config.Syscalls = newSyscallSlice
+ return nil
+}
+
+// RemoveAllMatchingRules will remove any syscall rules that match the specified action
+func RemoveAllMatchingRules(config *rspec.LinuxSeccomp, seccompAction rspec.LinuxSeccompAction) error {
+ if config == nil {
+ return fmt.Errorf("Cannot remove action from nil Seccomp pointer")
+ }
+
+ for _, syscall := range config.Syscalls {
+ if reflect.DeepEqual(syscall.Action, seccompAction) {
+ RemoveAction(strings.Join(syscall.Names, ","), config)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go
new file mode 100644
index 000000000..35b12cd65
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go
@@ -0,0 +1,577 @@
+package seccomp
+
+import (
+ "runtime"
+ "syscall"
+
+ "github.com/opencontainers/runtime-spec/specs-go"
+ rspec "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func arches() []rspec.Arch {
+ native := runtime.GOARCH
+
+ switch native {
+ case "amd64":
+ return []rspec.Arch{rspec.ArchX86_64, rspec.ArchX86, rspec.ArchX32}
+ case "arm64":
+ return []rspec.Arch{rspec.ArchARM, rspec.ArchAARCH64}
+ case "mips64":
+ return []rspec.Arch{rspec.ArchMIPS, rspec.ArchMIPS64, rspec.ArchMIPS64N32}
+ case "mips64n32":
+ return []rspec.Arch{rspec.ArchMIPS, rspec.ArchMIPS64, rspec.ArchMIPS64N32}
+ case "mipsel64":
+ return []rspec.Arch{rspec.ArchMIPSEL, rspec.ArchMIPSEL64, rspec.ArchMIPSEL64N32}
+ case "mipsel64n32":
+ return []rspec.Arch{rspec.ArchMIPSEL, rspec.ArchMIPSEL64, rspec.ArchMIPSEL64N32}
+ case "s390x":
+ return []rspec.Arch{rspec.ArchS390, rspec.ArchS390X}
+ default:
+ return []rspec.Arch{}
+ }
+}
+
+// DefaultProfile defines the whitelist for the default seccomp profile.
+func DefaultProfile(rs *specs.Spec) *rspec.LinuxSeccomp {
+
+ syscalls := []rspec.LinuxSyscall{
+ {
+ Names: []string{
+ "accept",
+ "accept4",
+ "access",
+ "alarm",
+ "bind",
+ "brk",
+ "capget",
+ "capset",
+ "chdir",
+ "chmod",
+ "chown",
+ "chown32",
+ "clock_getres",
+ "clock_gettime",
+ "clock_nanosleep",
+ "close",
+ "connect",
+ "copy_file_range",
+ "creat",
+ "dup",
+ "dup2",
+ "dup3",
+ "epoll_create",
+ "epoll_create1",
+ "epoll_ctl",
+ "epoll_ctl_old",
+ "epoll_pwait",
+ "epoll_wait",
+ "epoll_wait_old",
+ "eventfd",
+ "eventfd2",
+ "execve",
+ "execveat",
+ "exit",
+ "exit_group",
+ "faccessat",
+ "fadvise64",
+ "fadvise64_64",
+ "fallocate",
+ "fanotify_mark",
+ "fchdir",
+ "fchmod",
+ "fchmodat",
+ "fchown",
+ "fchown32",
+ "fchownat",
+ "fcntl",
+ "fcntl64",
+ "fdatasync",
+ "fgetxattr",
+ "flistxattr",
+ "flock",
+ "fork",
+ "fremovexattr",
+ "fsetxattr",
+ "fstat",
+ "fstat64",
+ "fstatat64",
+ "fstatfs",
+ "fstatfs64",
+ "fsync",
+ "ftruncate",
+ "ftruncate64",
+ "futex",
+ "futimesat",
+ "getcpu",
+ "getcwd",
+ "getdents",
+ "getdents64",
+ "getegid",
+ "getegid32",
+ "geteuid",
+ "geteuid32",
+ "getgid",
+ "getgid32",
+ "getgroups",
+ "getgroups32",
+ "getitimer",
+ "getpeername",
+ "getpgid",
+ "getpgrp",
+ "getpid",
+ "getppid",
+ "getpriority",
+ "getrandom",
+ "getresgid",
+ "getresgid32",
+ "getresuid",
+ "getresuid32",
+ "getrlimit",
+ "get_robust_list",
+ "getrusage",
+ "getsid",
+ "getsockname",
+ "getsockopt",
+ "get_thread_area",
+ "gettid",
+ "gettimeofday",
+ "getuid",
+ "getuid32",
+ "getxattr",
+ "inotify_add_watch",
+ "inotify_init",
+ "inotify_init1",
+ "inotify_rm_watch",
+ "io_cancel",
+ "ioctl",
+ "io_destroy",
+ "io_getevents",
+ "ioprio_get",
+ "ioprio_set",
+ "io_setup",
+ "io_submit",
+ "ipc",
+ "kill",
+ "lchown",
+ "lchown32",
+ "lgetxattr",
+ "link",
+ "linkat",
+ "listen",
+ "listxattr",
+ "llistxattr",
+ "_llseek",
+ "lremovexattr",
+ "lseek",
+ "lsetxattr",
+ "lstat",
+ "lstat64",
+ "madvise",
+ "memfd_create",
+ "mincore",
+ "mkdir",
+ "mkdirat",
+ "mknod",
+ "mknodat",
+ "mlock",
+ "mlock2",
+ "mlockall",
+ "mmap",
+ "mmap2",
+ "mprotect",
+ "mq_getsetattr",
+ "mq_notify",
+ "mq_open",
+ "mq_timedreceive",
+ "mq_timedsend",
+ "mq_unlink",
+ "mremap",
+ "msgctl",
+ "msgget",
+ "msgrcv",
+ "msgsnd",
+ "msync",
+ "munlock",
+ "munlockall",
+ "munmap",
+ "nanosleep",
+ "newfstatat",
+ "_newselect",
+ "open",
+ "openat",
+ "pause",
+ "pipe",
+ "pipe2",
+ "poll",
+ "ppoll",
+ "prctl",
+ "pread64",
+ "preadv",
+ "prlimit64",
+ "pselect6",
+ "pwrite64",
+ "pwritev",
+ "read",
+ "readahead",
+ "readlink",
+ "readlinkat",
+ "readv",
+ "recv",
+ "recvfrom",
+ "recvmmsg",
+ "recvmsg",
+ "remap_file_pages",
+ "removexattr",
+ "rename",
+ "renameat",
+ "renameat2",
+ "restart_syscall",
+ "rmdir",
+ "rt_sigaction",
+ "rt_sigpending",
+ "rt_sigprocmask",
+ "rt_sigqueueinfo",
+ "rt_sigreturn",
+ "rt_sigsuspend",
+ "rt_sigtimedwait",
+ "rt_tgsigqueueinfo",
+ "sched_getaffinity",
+ "sched_getattr",
+ "sched_getparam",
+ "sched_get_priority_max",
+ "sched_get_priority_min",
+ "sched_getscheduler",
+ "sched_rr_get_interval",
+ "sched_setaffinity",
+ "sched_setattr",
+ "sched_setparam",
+ "sched_setscheduler",
+ "sched_yield",
+ "seccomp",
+ "select",
+ "semctl",
+ "semget",
+ "semop",
+ "semtimedop",
+ "send",
+ "sendfile",
+ "sendfile64",
+ "sendmmsg",
+ "sendmsg",
+ "sendto",
+ "setfsgid",
+ "setfsgid32",
+ "setfsuid",
+ "setfsuid32",
+ "setgid",
+ "setgid32",
+ "setgroups",
+ "setgroups32",
+ "setitimer",
+ "setpgid",
+ "setpriority",
+ "setregid",
+ "setregid32",
+ "setresgid",
+ "setresgid32",
+ "setresuid",
+ "setresuid32",
+ "setreuid",
+ "setreuid32",
+ "setrlimit",
+ "set_robust_list",
+ "setsid",
+ "setsockopt",
+ "set_thread_area",
+ "set_tid_address",
+ "setuid",
+ "setuid32",
+ "setxattr",
+ "shmat",
+ "shmctl",
+ "shmdt",
+ "shmget",
+ "shutdown",
+ "sigaltstack",
+ "signalfd",
+ "signalfd4",
+ "sigreturn",
+ "socket",
+ "socketcall",
+ "socketpair",
+ "splice",
+ "stat",
+ "stat64",
+ "statfs",
+ "statfs64",
+ "symlink",
+ "symlinkat",
+ "sync",
+ "sync_file_range",
+ "syncfs",
+ "sysinfo",
+ "syslog",
+ "tee",
+ "tgkill",
+ "time",
+ "timer_create",
+ "timer_delete",
+ "timerfd_create",
+ "timerfd_gettime",
+ "timerfd_settime",
+ "timer_getoverrun",
+ "timer_gettime",
+ "timer_settime",
+ "times",
+ "tkill",
+ "truncate",
+ "truncate64",
+ "ugetrlimit",
+ "umask",
+ "uname",
+ "unlink",
+ "unlinkat",
+ "utime",
+ "utimensat",
+ "utimes",
+ "vfork",
+ "vmsplice",
+ "wait4",
+ "waitid",
+ "waitpid",
+ "write",
+ "writev",
+ },
+ Action: rspec.ActAllow,
+ Args: []rspec.LinuxSeccompArg{},
+ },
+ {
+ Names: []string{"personality"},
+ Action: rspec.ActAllow,
+ Args: []rspec.LinuxSeccompArg{
+ {
+ Index: 0,
+ Value: 0x0,
+ Op: rspec.OpEqualTo,
+ },
+ {
+ Index: 0,
+ Value: 0x0008,
+ Op: rspec.OpEqualTo,
+ },
+ {
+ Index: 0,
+ Value: 0xffffffff,
+ Op: rspec.OpEqualTo,
+ },
+ },
+ },
+ }
+ var sysCloneFlagsIndex uint
+
+ capSysAdmin := false
+ caps := make(map[string]bool)
+
+ for _, cap := range rs.Process.Capabilities.Bounding {
+ caps[cap] = true
+ }
+ for _, cap := range rs.Process.Capabilities.Effective {
+ caps[cap] = true
+ }
+ for _, cap := range rs.Process.Capabilities.Inheritable {
+ caps[cap] = true
+ }
+ for _, cap := range rs.Process.Capabilities.Permitted {
+ caps[cap] = true
+ }
+ for _, cap := range rs.Process.Capabilities.Ambient {
+ caps[cap] = true
+ }
+
+ for cap := range caps {
+ switch cap {
+ case "CAP_DAC_READ_SEARCH":
+ syscalls = append(syscalls, []rspec.LinuxSyscall{
+ {
+ Names: []string{"open_by_handle_at"},
+ Action: rspec.ActAllow,
+ Args: []rspec.LinuxSeccompArg{},
+ },
+ }...)
+ case "CAP_SYS_ADMIN":
+ capSysAdmin = true
+ syscalls = append(syscalls, []rspec.LinuxSyscall{
+ {
+ Names: []string{
+ "bpf",
+ "clone",
+ "fanotify_init",
+ "lookup_dcookie",
+ "mount",
+ "name_to_handle_at",
+ "perf_event_open",
+ "setdomainname",
+ "sethostname",
+ "setns",
+ "umount",
+ "umount2",
+ "unshare",
+ },
+ Action: rspec.ActAllow,
+ Args: []rspec.LinuxSeccompArg{},
+ },
+ }...)
+ case "CAP_SYS_BOOT":
+ syscalls = append(syscalls, []rspec.LinuxSyscall{
+ {
+ Names: []string{"reboot"},
+ Action: rspec.ActAllow,
+ Args: []rspec.LinuxSeccompArg{},
+ },
+ }...)
+ case "CAP_SYS_CHROOT":
+ syscalls = append(syscalls, []rspec.LinuxSyscall{
+ {
+ Names: []string{"chroot"},
+ Action: rspec.ActAllow,
+ Args: []rspec.LinuxSeccompArg{},
+ },
+ }...)
+ case "CAP_SYS_MODULE":
+ syscalls = append(syscalls, []rspec.LinuxSyscall{
+ {
+ Names: []string{
+ "delete_module",
+ "init_module",
+ "finit_module",
+ "query_module",
+ },
+ Action: rspec.ActAllow,
+ Args: []rspec.LinuxSeccompArg{},
+ },
+ }...)
+ case "CAP_SYS_PACCT":
+ syscalls = append(syscalls, []rspec.LinuxSyscall{
+ {
+ Names: []string{"acct"},
+ Action: rspec.ActAllow,
+ Args: []rspec.LinuxSeccompArg{},
+ },
+ }...)
+ case "CAP_SYS_PTRACE":
+ syscalls = append(syscalls, []rspec.LinuxSyscall{
+ {
+ Names: []string{
+ "kcmp",
+ "process_vm_readv",
+ "process_vm_writev",
+ "ptrace",
+ },
+ Action: rspec.ActAllow,
+ Args: []rspec.LinuxSeccompArg{},
+ },
+ }...)
+ case "CAP_SYS_RAWIO":
+ syscalls = append(syscalls, []rspec.LinuxSyscall{
+ {
+ Names: []string{
+ "iopl",
+ "ioperm",
+ },
+ Action: rspec.ActAllow,
+ Args: []rspec.LinuxSeccompArg{},
+ },
+ }...)
+ case "CAP_SYS_TIME":
+ syscalls = append(syscalls, []rspec.LinuxSyscall{
+ {
+ Names: []string{
+ "settimeofday",
+ "stime",
+ "adjtimex",
+ },
+ Action: rspec.ActAllow,
+ Args: []rspec.LinuxSeccompArg{},
+ },
+ }...)
+ case "CAP_SYS_TTY_CONFIG":
+ syscalls = append(syscalls, []rspec.LinuxSyscall{
+ {
+ Names: []string{"vhangup"},
+ Action: rspec.ActAllow,
+ Args: []rspec.LinuxSeccompArg{},
+ },
+ }...)
+ }
+ }
+
+ if !capSysAdmin {
+ syscalls = append(syscalls, []rspec.LinuxSyscall{
+ {
+ Names: []string{"clone"},
+ Action: rspec.ActAllow,
+ Args: []rspec.LinuxSeccompArg{
+ {
+ Index: sysCloneFlagsIndex,
+ Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET,
+ ValueTwo: 0,
+ Op: rspec.OpMaskedEqual,
+ },
+ },
+ },
+ }...)
+
+ }
+
+ arch := runtime.GOARCH
+ switch arch {
+ case "arm", "arm64":
+ syscalls = append(syscalls, []rspec.LinuxSyscall{
+ {
+ Names: []string{
+ "breakpoint",
+ "cacheflush",
+ "set_tls",
+ },
+ Action: rspec.ActAllow,
+ Args: []rspec.LinuxSeccompArg{},
+ },
+ }...)
+ case "amd64", "x32":
+ syscalls = append(syscalls, []rspec.LinuxSyscall{
+ {
+ Names: []string{"arch_prctl"},
+ Action: rspec.ActAllow,
+ Args: []rspec.LinuxSeccompArg{},
+ },
+ }...)
+ fallthrough
+ case "x86":
+ syscalls = append(syscalls, []rspec.LinuxSyscall{
+ {
+ Names: []string{"modify_ldt"},
+ Action: rspec.ActAllow,
+ Args: []rspec.LinuxSeccompArg{},
+ },
+ }...)
+ case "s390", "s390x":
+ syscalls = append(syscalls, []rspec.LinuxSyscall{
+ {
+ Names: []string{
+ "s390_pci_mmio_read",
+ "s390_pci_mmio_write",
+ "s390_runtime_instr",
+ },
+ Action: rspec.ActAllow,
+ Args: []rspec.LinuxSeccompArg{},
+ },
+ }...)
+ /* Flags parameter of the clone syscall is the 2nd on s390 */
+ }
+
+ return &rspec.LinuxSeccomp{
+ DefaultAction: rspec.ActErrno,
+ Architectures: arches(),
+ Syscalls: syscalls,
+ }
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/syscall_compare.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/syscall_compare.go
new file mode 100644
index 000000000..dbf2aec1c
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/syscall_compare.go
@@ -0,0 +1,140 @@
+package seccomp
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+
+ rspec "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// Determine if a new syscall rule should be appended, overwrite an existing rule
+// or if no action should be taken at all
+func decideCourseOfAction(newSyscall *rspec.LinuxSyscall, syscalls []rspec.LinuxSyscall) (string, error) {
+ ruleForSyscallAlreadyExists := false
+
+ var sliceOfDeterminedActions []string
+ for i, syscall := range syscalls {
+ if sameName(&syscall, newSyscall) {
+ ruleForSyscallAlreadyExists = true
+
+ if identical(newSyscall, &syscall) {
+ sliceOfDeterminedActions = append(sliceOfDeterminedActions, nothing)
+ }
+
+ if sameAction(newSyscall, &syscall) {
+ if bothHaveArgs(newSyscall, &syscall) {
+ sliceOfDeterminedActions = append(sliceOfDeterminedActions, seccompAppend)
+ }
+ if onlyOneHasArgs(newSyscall, &syscall) {
+ if firstParamOnlyHasArgs(newSyscall, &syscall) {
+ sliceOfDeterminedActions = append(sliceOfDeterminedActions, "overwrite:"+strconv.Itoa(i))
+ } else {
+ sliceOfDeterminedActions = append(sliceOfDeterminedActions, nothing)
+ }
+ }
+ }
+
+ if !sameAction(newSyscall, &syscall) {
+ if bothHaveArgs(newSyscall, &syscall) {
+ if sameArgs(newSyscall, &syscall) {
+ sliceOfDeterminedActions = append(sliceOfDeterminedActions, "overwrite:"+strconv.Itoa(i))
+ }
+ if !sameArgs(newSyscall, &syscall) {
+ sliceOfDeterminedActions = append(sliceOfDeterminedActions, seccompAppend)
+ }
+ }
+ if onlyOneHasArgs(newSyscall, &syscall) {
+ sliceOfDeterminedActions = append(sliceOfDeterminedActions, seccompAppend)
+ }
+ if neitherHasArgs(newSyscall, &syscall) {
+ sliceOfDeterminedActions = append(sliceOfDeterminedActions, "overwrite:"+strconv.Itoa(i))
+ }
+ }
+ }
+ }
+
+ if !ruleForSyscallAlreadyExists {
+ sliceOfDeterminedActions = append(sliceOfDeterminedActions, seccompAppend)
+ }
+
+ // Nothing has highest priority
+ for _, determinedAction := range sliceOfDeterminedActions {
+ if determinedAction == nothing {
+ return determinedAction, nil
+ }
+ }
+
+ // Overwrite has second highest priority
+ for _, determinedAction := range sliceOfDeterminedActions {
+ if strings.Contains(determinedAction, seccompOverwrite) {
+ return determinedAction, nil
+ }
+ }
+
+ // Append has the lowest priority
+ for _, determinedAction := range sliceOfDeterminedActions {
+ if determinedAction == seccompAppend {
+ return determinedAction, nil
+ }
+ }
+
+ return "", fmt.Errorf("Trouble determining action: %s", sliceOfDeterminedActions)
+}
+
+func hasArguments(config *rspec.LinuxSyscall) bool {
+ nilSyscall := new(rspec.LinuxSyscall)
+ return !sameArgs(nilSyscall, config)
+}
+
+func identical(config1, config2 *rspec.LinuxSyscall) bool {
+ return reflect.DeepEqual(config1, config2)
+}
+
+func identicalExceptAction(config1, config2 *rspec.LinuxSyscall) bool {
+ samename := sameName(config1, config2)
+ sameAction := sameAction(config1, config2)
+ sameArgs := sameArgs(config1, config2)
+
+ return samename && !sameAction && sameArgs
+}
+
+func identicalExceptArgs(config1, config2 *rspec.LinuxSyscall) bool {
+ samename := sameName(config1, config2)
+ sameAction := sameAction(config1, config2)
+ sameArgs := sameArgs(config1, config2)
+
+ return samename && sameAction && !sameArgs
+}
+
+func sameName(config1, config2 *rspec.LinuxSyscall) bool {
+ return reflect.DeepEqual(config1.Names, config2.Names)
+}
+
+func sameAction(config1, config2 *rspec.LinuxSyscall) bool {
+ return config1.Action == config2.Action
+}
+
+func sameArgs(config1, config2 *rspec.LinuxSyscall) bool {
+ return reflect.DeepEqual(config1.Args, config2.Args)
+}
+
+func bothHaveArgs(config1, config2 *rspec.LinuxSyscall) bool {
+ return hasArguments(config1) && hasArguments(config2)
+}
+
+func onlyOneHasArgs(config1, config2 *rspec.LinuxSyscall) bool {
+ conf1 := hasArguments(config1)
+ conf2 := hasArguments(config2)
+
+ return (conf1 && !conf2) || (!conf1 && conf2)
+}
+
+func neitherHasArgs(config1, config2 *rspec.LinuxSyscall) bool {
+ return !hasArguments(config1) && !hasArguments(config2)
+}
+
+func firstParamOnlyHasArgs(config1, config2 *rspec.LinuxSyscall) bool {
+ return !hasArguments(config1) && hasArguments(config2)
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/spec.go b/vendor/github.com/opencontainers/runtime-tools/generate/spec.go
new file mode 100644
index 000000000..519d25448
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/generate/spec.go
@@ -0,0 +1,109 @@
+package generate
+
+import (
+ rspec "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func (g *Generator) initSpec() {
+ if g.spec == nil {
+ g.spec = &rspec.Spec{}
+ }
+}
+
+func (g *Generator) initSpecProcess() {
+ g.initSpec()
+ if g.spec.Process == nil {
+ g.spec.Process = &rspec.Process{}
+ }
+}
+
+func (g *Generator) initSpecProcessConsoleSize() {
+ g.initSpecProcess()
+ if g.spec.Process.ConsoleSize == nil {
+ g.spec.Process.ConsoleSize = &rspec.Box{}
+ }
+}
+
+func (g *Generator) initSpecProcessCapabilities() {
+ g.initSpecProcess()
+ if g.spec.Process.Capabilities == nil {
+ g.spec.Process.Capabilities = &rspec.LinuxCapabilities{}
+ }
+}
+
+func (g *Generator) initSpecRoot() {
+ g.initSpec()
+ if g.spec.Root == nil {
+ g.spec.Root = &rspec.Root{}
+ }
+}
+
+func (g *Generator) initSpecAnnotations() {
+ g.initSpec()
+ if g.spec.Annotations == nil {
+ g.spec.Annotations = make(map[string]string)
+ }
+}
+
+func (g *Generator) initSpecHooks() {
+ g.initSpec()
+ if g.spec.Hooks == nil {
+ g.spec.Hooks = &rspec.Hooks{}
+ }
+}
+
+func (g *Generator) initSpecLinux() {
+ g.initSpec()
+ if g.spec.Linux == nil {
+ g.spec.Linux = &rspec.Linux{}
+ }
+}
+
+func (g *Generator) initSpecLinuxSysctl() {
+ g.initSpecLinux()
+ if g.spec.Linux.Sysctl == nil {
+ g.spec.Linux.Sysctl = make(map[string]string)
+ }
+}
+
+func (g *Generator) initSpecLinuxSeccomp() {
+ g.initSpecLinux()
+ if g.spec.Linux.Seccomp == nil {
+ g.spec.Linux.Seccomp = &rspec.LinuxSeccomp{}
+ }
+}
+
+func (g *Generator) initSpecLinuxResources() {
+ g.initSpecLinux()
+ if g.spec.Linux.Resources == nil {
+ g.spec.Linux.Resources = &rspec.LinuxResources{}
+ }
+}
+
+func (g *Generator) initSpecLinuxResourcesCPU() {
+ g.initSpecLinuxResources()
+ if g.spec.Linux.Resources.CPU == nil {
+ g.spec.Linux.Resources.CPU = &rspec.LinuxCPU{}
+ }
+}
+
+func (g *Generator) initSpecLinuxResourcesMemory() {
+ g.initSpecLinuxResources()
+ if g.spec.Linux.Resources.Memory == nil {
+ g.spec.Linux.Resources.Memory = &rspec.LinuxMemory{}
+ }
+}
+
+func (g *Generator) initSpecLinuxResourcesNetwork() {
+ g.initSpecLinuxResources()
+ if g.spec.Linux.Resources.Network == nil {
+ g.spec.Linux.Resources.Network = &rspec.LinuxNetwork{}
+ }
+}
+
+func (g *Generator) initSpecLinuxResourcesPids() {
+ g.initSpecLinuxResources()
+ if g.spec.Linux.Resources.Pids == nil {
+ g.spec.Linux.Resources.Pids = &rspec.LinuxPids{}
+ }
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/specerror/error.go b/vendor/github.com/opencontainers/runtime-tools/specerror/error.go
new file mode 100644
index 000000000..c75bb6b14
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/specerror/error.go
@@ -0,0 +1,170 @@
+// Package specerror implements runtime-spec-specific tooling for
+// tracking RFC 2119 violations.
+package specerror
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/go-multierror"
+ rfc2119 "github.com/opencontainers/runtime-tools/error"
+)
+
+const referenceTemplate = "https://github.com/opencontainers/runtime-spec/blob/v%s/%s"
+
+// Code represents the spec violation, enumerating both
+// configuration violations and runtime violations.
+type Code int
+
+const (
+ // NonError represents that an input is not an error
+ NonError Code = iota
+ // NonRFCError represents that an error is not a rfc2119 error
+ NonRFCError
+
+ // ConfigFileExistence represents the error code of 'config.json' existence test
+ ConfigFileExistence
+ // ArtifactsInSingleDir represents the error code of artifacts place test
+ ArtifactsInSingleDir
+
+ // SpecVersion represents the error code of specfication version test
+ SpecVersion
+
+ // RootOnNonHyperV represents the error code of root setting test on non hyper-v containers
+ RootOnNonHyperV
+ // RootOnHyperV represents the error code of root setting test on hyper-v containers
+ RootOnHyperV
+ // PathFormatOnWindows represents the error code of the path format test on Window
+ PathFormatOnWindows
+ // PathName represents the error code of the path name test
+ PathName
+ // PathExistence represents the error code of the path existence test
+ PathExistence
+ // ReadonlyFilesystem represents the error code of readonly test
+ ReadonlyFilesystem
+ // ReadonlyOnWindows represents the error code of readonly setting test on Windows
+ ReadonlyOnWindows
+
+ // DefaultFilesystems represents the error code of default filesystems test
+ DefaultFilesystems
+
+ // CreateWithID represents the error code of 'create' lifecyle test with 'id' provided
+ CreateWithID
+ // CreateWithUniqueID represents the error code of 'create' lifecyle test with unique 'id' provided
+ CreateWithUniqueID
+ // CreateNewContainer represents the error code 'create' lifecyle test that creates new container
+ CreateNewContainer
+)
+
+type errorTemplate struct {
+ Level rfc2119.Level
+ Reference func(version string) (reference string, err error)
+}
+
+// Error represents a runtime-spec violation.
+type Error struct {
+ // Err holds the RFC 2119 violation.
+ Err rfc2119.Error
+
+ // Code is a matchable holds a Code
+ Code Code
+}
+
+var (
+ containerFormatRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "bundle.md#container-format"), nil
+ }
+ specVersionRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config.md#specification-version"), nil
+ }
+ rootRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config.md#root"), nil
+ }
+ defaultFSRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config-linux.md#default-filesystems"), nil
+ }
+ runtimeCreateRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "runtime.md#create"), nil
+ }
+)
+
+var ociErrors = map[Code]errorTemplate{
+ // Bundle.md
+ // Container Format
+ ConfigFileExistence: {Level: rfc2119.Must, Reference: containerFormatRef},
+ ArtifactsInSingleDir: {Level: rfc2119.Must, Reference: containerFormatRef},
+
+ // Config.md
+ // Specification Version
+ SpecVersion: {Level: rfc2119.Must, Reference: specVersionRef},
+ // Root
+ RootOnNonHyperV: {Level: rfc2119.Required, Reference: rootRef},
+ RootOnHyperV: {Level: rfc2119.Must, Reference: rootRef},
+ // TODO: add tests for 'PathFormatOnWindows'
+ PathFormatOnWindows: {Level: rfc2119.Must, Reference: rootRef},
+ PathName: {Level: rfc2119.Should, Reference: rootRef},
+ PathExistence: {Level: rfc2119.Must, Reference: rootRef},
+ ReadonlyFilesystem: {Level: rfc2119.Must, Reference: rootRef},
+ ReadonlyOnWindows: {Level: rfc2119.Must, Reference: rootRef},
+
+ // Config-Linux.md
+ // Default Filesystems
+ DefaultFilesystems: {Level: rfc2119.Should, Reference: defaultFSRef},
+
+ // Runtime.md
+ // Create
+ CreateWithID: {Level: rfc2119.Must, Reference: runtimeCreateRef},
+ CreateWithUniqueID: {Level: rfc2119.Must, Reference: runtimeCreateRef},
+ CreateNewContainer: {Level: rfc2119.Must, Reference: runtimeCreateRef},
+}
+
+// Error returns the error message with specification reference.
+func (err *Error) Error() string {
+ return err.Err.Error()
+}
+
+// NewError creates an Error referencing a spec violation. The error
+// can be cast to an *Error for extracting structured information
+// about the level of the violation and a reference to the violated
+// spec condition.
+//
+// A version string (for the version of the spec that was violated)
+// must be set to get a working URL.
+func NewError(code Code, err error, version string) error {
+ template := ociErrors[code]
+ reference, err2 := template.Reference(version)
+ if err2 != nil {
+ return err2
+ }
+ return &Error{
+ Err: rfc2119.Error{
+ Level: template.Level,
+ Reference: reference,
+ Err: err,
+ },
+ Code: code,
+ }
+}
+
+// FindError finds an error from a source error (multiple error) and
+// returns the error code if found.
+// If the source error is nil or empty, return NonError.
+// If the source error is not a multiple error, return NonRFCError.
+func FindError(err error, code Code) Code {
+ if err == nil {
+ return NonError
+ }
+
+ if merr, ok := err.(*multierror.Error); ok {
+ if merr.ErrorOrNil() == nil {
+ return NonError
+ }
+ for _, e := range merr.Errors {
+ if rfcErr, ok := e.(*Error); ok {
+ if rfcErr.Code == code {
+ return code
+ }
+ }
+ }
+ }
+ return NonRFCError
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/validate/validate.go b/vendor/github.com/opencontainers/runtime-tools/validate/validate.go
new file mode 100644
index 000000000..bbdb29c60
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/validate/validate.go
@@ -0,0 +1,1050 @@
+package validate
+
+import (
+ "bufio"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "runtime"
+ "strings"
+ "syscall"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/blang/semver"
+ "github.com/hashicorp/go-multierror"
+ rspec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/sirupsen/logrus"
+ "github.com/syndtr/gocapability/capability"
+
+ "github.com/opencontainers/runtime-tools/specerror"
+)
+
+const specConfig = "config.json"
+
+var (
+ defaultRlimits = []string{
+ "RLIMIT_AS",
+ "RLIMIT_CORE",
+ "RLIMIT_CPU",
+ "RLIMIT_DATA",
+ "RLIMIT_FSIZE",
+ "RLIMIT_LOCKS",
+ "RLIMIT_MEMLOCK",
+ "RLIMIT_MSGQUEUE",
+ "RLIMIT_NICE",
+ "RLIMIT_NOFILE",
+ "RLIMIT_NPROC",
+ "RLIMIT_RSS",
+ "RLIMIT_RTPRIO",
+ "RLIMIT_RTTIME",
+ "RLIMIT_SIGPENDING",
+ "RLIMIT_STACK",
+ }
+)
+
+// Validator represents a validator for runtime bundle
+type Validator struct {
+ spec *rspec.Spec
+ bundlePath string
+ HostSpecific bool
+ platform string
+}
+
+// NewValidator creates a Validator
+func NewValidator(spec *rspec.Spec, bundlePath string, hostSpecific bool, platform string) Validator {
+ if hostSpecific && platform != runtime.GOOS {
+ platform = runtime.GOOS
+ }
+ return Validator{
+ spec: spec,
+ bundlePath: bundlePath,
+ HostSpecific: hostSpecific,
+ platform: platform,
+ }
+}
+
+// NewValidatorFromPath creates a Validator with specified bundle path
+func NewValidatorFromPath(bundlePath string, hostSpecific bool, platform string) (Validator, error) {
+ if hostSpecific && platform != runtime.GOOS {
+ platform = runtime.GOOS
+ }
+ if bundlePath == "" {
+ return Validator{}, fmt.Errorf("bundle path shouldn't be empty")
+ }
+
+ if _, err := os.Stat(bundlePath); err != nil {
+ return Validator{}, err
+ }
+
+ configPath := filepath.Join(bundlePath, specConfig)
+ content, err := ioutil.ReadFile(configPath)
+ if err != nil {
+ return Validator{}, specerror.NewError(specerror.ConfigFileExistence, err, rspec.Version)
+ }
+ if !utf8.Valid(content) {
+ return Validator{}, fmt.Errorf("%q is not encoded in UTF-8", configPath)
+ }
+ var spec rspec.Spec
+ if err = json.Unmarshal(content, &spec); err != nil {
+ return Validator{}, err
+ }
+
+ return NewValidator(&spec, bundlePath, hostSpecific, platform), nil
+}
+
+// CheckAll checks all parts of runtime bundle
+func (v *Validator) CheckAll() (errs error) {
+ errs = multierror.Append(errs, v.CheckPlatform())
+ errs = multierror.Append(errs, v.CheckRoot())
+ errs = multierror.Append(errs, v.CheckMandatoryFields())
+ errs = multierror.Append(errs, v.CheckSemVer())
+ errs = multierror.Append(errs, v.CheckMounts())
+ errs = multierror.Append(errs, v.CheckProcess())
+ errs = multierror.Append(errs, v.CheckHooks())
+ errs = multierror.Append(errs, v.CheckLinux())
+
+ return
+}
+
+// CheckRoot checks status of v.spec.Root
+func (v *Validator) CheckRoot() (errs error) {
+ logrus.Debugf("check root")
+
+ if v.platform == "windows" && v.spec.Windows != nil && v.spec.Windows.HyperV != nil {
+ if v.spec.Root != nil {
+ errs = multierror.Append(errs,
+ specerror.NewError(specerror.RootOnHyperV, fmt.Errorf("for Hyper-V containers, Root must not be set"), rspec.Version))
+ return
+ }
+ return
+ } else if v.spec.Root == nil {
+ errs = multierror.Append(errs,
+ specerror.NewError(specerror.RootOnNonHyperV, fmt.Errorf("for non-Hyper-V containers, Root must be set"), rspec.Version))
+ return
+ }
+
+ absBundlePath, err := filepath.Abs(v.bundlePath)
+ if err != nil {
+ errs = multierror.Append(errs, fmt.Errorf("unable to convert %q to an absolute path", v.bundlePath))
+ return
+ }
+
+ if filepath.Base(v.spec.Root.Path) != "rootfs" {
+ errs = multierror.Append(errs,
+ specerror.NewError(specerror.PathName, fmt.Errorf("path name should be the conventional 'rootfs'"), rspec.Version))
+ }
+
+ var rootfsPath string
+ var absRootPath string
+ if filepath.IsAbs(v.spec.Root.Path) {
+ rootfsPath = v.spec.Root.Path
+ absRootPath = filepath.Clean(rootfsPath)
+ } else {
+ var err error
+ rootfsPath = filepath.Join(v.bundlePath, v.spec.Root.Path)
+ absRootPath, err = filepath.Abs(rootfsPath)
+ if err != nil {
+ errs = multierror.Append(errs, fmt.Errorf("unable to convert %q to an absolute path", rootfsPath))
+ return
+ }
+ }
+
+ if fi, err := os.Stat(rootfsPath); err != nil {
+ errs = multierror.Append(errs,
+ specerror.NewError(specerror.PathExistence, fmt.Errorf("cannot find the root path %q", rootfsPath), rspec.Version))
+ } else if !fi.IsDir() {
+ errs = multierror.Append(errs,
+ specerror.NewError(specerror.PathExistence, fmt.Errorf("root.path %q is not a directory", rootfsPath), rspec.Version))
+ }
+
+ rootParent := filepath.Dir(absRootPath)
+ if absRootPath == string(filepath.Separator) || rootParent != absBundlePath {
+ errs = multierror.Append(errs,
+ specerror.NewError(specerror.ArtifactsInSingleDir, fmt.Errorf("root.path is %q, but it MUST be a child of %q", v.spec.Root.Path, absBundlePath), rspec.Version))
+ }
+
+ if v.platform == "windows" {
+ if v.spec.Root.Readonly {
+ errs = multierror.Append(errs,
+ specerror.NewError(specerror.ReadonlyOnWindows, fmt.Errorf("root.readonly field MUST be omitted or false when target platform is windows"), rspec.Version))
+ }
+ }
+
+ return
+}
+
+// CheckSemVer checks v.spec.Version
+func (v *Validator) CheckSemVer() (errs error) {
+ logrus.Debugf("check semver")
+
+ version := v.spec.Version
+ _, err := semver.Parse(version)
+ if err != nil {
+ errs = multierror.Append(errs,
+ specerror.NewError(specerror.SpecVersion, fmt.Errorf("%q is not valid SemVer: %s", version, err.Error()), rspec.Version))
+ }
+ if version != rspec.Version {
+ errs = multierror.Append(errs, fmt.Errorf("validate currently only handles version %s, but the supplied configuration targets %s", rspec.Version, version))
+ }
+
+ return
+}
+
+// CheckHooks check v.spec.Hooks
+func (v *Validator) CheckHooks() (errs error) {
+ logrus.Debugf("check hooks")
+
+ if v.spec.Hooks != nil {
+ errs = multierror.Append(errs, checkEventHooks("pre-start", v.spec.Hooks.Prestart, v.HostSpecific))
+ errs = multierror.Append(errs, checkEventHooks("post-start", v.spec.Hooks.Poststart, v.HostSpecific))
+ errs = multierror.Append(errs, checkEventHooks("post-stop", v.spec.Hooks.Poststop, v.HostSpecific))
+ }
+
+ return
+}
+
+func checkEventHooks(hookType string, hooks []rspec.Hook, hostSpecific bool) (errs error) {
+ for _, hook := range hooks {
+ if !filepath.IsAbs(hook.Path) {
+ errs = multierror.Append(errs, fmt.Errorf("the %s hook %v: is not absolute path", hookType, hook.Path))
+ }
+
+ if hostSpecific {
+ fi, err := os.Stat(hook.Path)
+ if err != nil {
+ errs = multierror.Append(errs, fmt.Errorf("cannot find %s hook: %v", hookType, hook.Path))
+ }
+ if fi.Mode()&0111 == 0 {
+ errs = multierror.Append(errs, fmt.Errorf("the %s hook %v: is not executable", hookType, hook.Path))
+ }
+ }
+
+ for _, env := range hook.Env {
+ if !envValid(env) {
+ errs = multierror.Append(errs, fmt.Errorf("env %q for hook %v is in the invalid form", env, hook.Path))
+ }
+ }
+ }
+
+ return
+}
+
+// CheckProcess checks v.spec.Process
+func (v *Validator) CheckProcess() (errs error) {
+ logrus.Debugf("check process")
+
+ if v.spec.Process == nil {
+ return
+ }
+
+ process := v.spec.Process
+ if !filepath.IsAbs(process.Cwd) {
+ errs = multierror.Append(errs, fmt.Errorf("cwd %q is not an absolute path", process.Cwd))
+ }
+
+ for _, env := range process.Env {
+ if !envValid(env) {
+ errs = multierror.Append(errs, fmt.Errorf("env %q should be in the form of 'key=value'. The left hand side must consist solely of letters, digits, and underscores '_'", env))
+ }
+ }
+
+ if len(process.Args) == 0 {
+ errs = multierror.Append(errs, fmt.Errorf("args must not be empty"))
+ } else {
+ if filepath.IsAbs(process.Args[0]) {
+ var rootfsPath string
+ if filepath.IsAbs(v.spec.Root.Path) {
+ rootfsPath = v.spec.Root.Path
+ } else {
+ rootfsPath = filepath.Join(v.bundlePath, v.spec.Root.Path)
+ }
+ absPath := filepath.Join(rootfsPath, process.Args[0])
+ fileinfo, err := os.Stat(absPath)
+ if os.IsNotExist(err) {
+ logrus.Warnf("executable %q is not available in rootfs currently", process.Args[0])
+ } else if err != nil {
+ errs = multierror.Append(errs, err)
+ } else {
+ m := fileinfo.Mode()
+ if m.IsDir() || m&0111 == 0 {
+ errs = multierror.Append(errs, fmt.Errorf("arg %q is not executable", process.Args[0]))
+ }
+ }
+ }
+ }
+
+ if v.spec.Process.Capabilities != nil {
+ errs = multierror.Append(errs, v.CheckCapabilities())
+ }
+ errs = multierror.Append(errs, v.CheckRlimits())
+
+ if v.platform == "linux" {
+ if len(process.ApparmorProfile) > 0 {
+ profilePath := filepath.Join(v.bundlePath, v.spec.Root.Path, "/etc/apparmor.d", process.ApparmorProfile)
+ _, err := os.Stat(profilePath)
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ }
+ }
+ }
+
+ return
+}
+
+// CheckCapabilities checks v.spec.Process.Capabilities
+func (v *Validator) CheckCapabilities() (errs error) {
+ process := v.spec.Process
+ if v.platform == "linux" {
+ var effective, permitted, inheritable, ambient bool
+ caps := make(map[string][]string)
+
+ for _, cap := range process.Capabilities.Bounding {
+ caps[cap] = append(caps[cap], "bounding")
+ }
+ for _, cap := range process.Capabilities.Effective {
+ caps[cap] = append(caps[cap], "effective")
+ }
+ for _, cap := range process.Capabilities.Inheritable {
+ caps[cap] = append(caps[cap], "inheritable")
+ }
+ for _, cap := range process.Capabilities.Permitted {
+ caps[cap] = append(caps[cap], "permitted")
+ }
+ for _, cap := range process.Capabilities.Ambient {
+ caps[cap] = append(caps[cap], "ambient")
+ }
+
+ for capability, owns := range caps {
+ if err := CapValid(capability, v.HostSpecific); err != nil {
+ errs = multierror.Append(errs, fmt.Errorf("capability %q is not valid, man capabilities(7)", capability))
+ }
+
+ effective, permitted, ambient, inheritable = false, false, false, false
+ for _, set := range owns {
+ if set == "effective" {
+ effective = true
+ continue
+ }
+ if set == "inheritable" {
+ inheritable = true
+ continue
+ }
+ if set == "permitted" {
+ permitted = true
+ continue
+ }
+ if set == "ambient" {
+ ambient = true
+ continue
+ }
+ }
+ if effective && !permitted {
+ errs = multierror.Append(errs, fmt.Errorf("effective capability %q is not allowed, as it's not permitted", capability))
+ }
+ if ambient && !(effective && inheritable) {
+ errs = multierror.Append(errs, fmt.Errorf("ambient capability %q is not allowed, as it's not permitted and inheribate", capability))
+ }
+ }
+ } else {
+ logrus.Warnf("process.capabilities validation not yet implemented for OS %q", v.platform)
+ }
+
+ return
+}
+
+// CheckRlimits checks v.spec.Process.Rlimits
+func (v *Validator) CheckRlimits() (errs error) {
+ process := v.spec.Process
+ for index, rlimit := range process.Rlimits {
+ for i := index + 1; i < len(process.Rlimits); i++ {
+ if process.Rlimits[index].Type == process.Rlimits[i].Type {
+ errs = multierror.Append(errs, fmt.Errorf("rlimit can not contain the same type %q", process.Rlimits[index].Type))
+ }
+ }
+ errs = multierror.Append(errs, v.rlimitValid(rlimit))
+ }
+
+ return
+}
+
+func supportedMountTypes(OS string, hostSpecific bool) (map[string]bool, error) {
+ supportedTypes := make(map[string]bool)
+
+ if OS != "linux" && OS != "windows" {
+ logrus.Warnf("%v is not supported to check mount type", OS)
+ return nil, nil
+ } else if OS == "windows" {
+ supportedTypes["ntfs"] = true
+ return supportedTypes, nil
+ }
+
+ if hostSpecific {
+ f, err := os.Open("/proc/filesystems")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return supportedTypes, err
+ }
+
+ text := s.Text()
+ parts := strings.Split(text, "\t")
+ if len(parts) > 1 {
+ supportedTypes[parts[1]] = true
+ } else {
+ supportedTypes[parts[0]] = true
+ }
+ }
+
+ supportedTypes["bind"] = true
+
+ return supportedTypes, nil
+ }
+ logrus.Warn("Checking linux mount types without --host-specific is not supported yet")
+ return nil, nil
+}
+
+// CheckMounts checks v.spec.Mounts
+func (v *Validator) CheckMounts() (errs error) {
+ logrus.Debugf("check mounts")
+
+ supportedTypes, err := supportedMountTypes(v.platform, v.HostSpecific)
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ return
+ }
+
+ for i, mountA := range v.spec.Mounts {
+ if supportedTypes != nil && !supportedTypes[mountA.Type] {
+ errs = multierror.Append(errs, fmt.Errorf("unsupported mount type %q", mountA.Type))
+ }
+ if v.platform == "windows" {
+ if err := pathValid(v.platform, mountA.Destination); err != nil {
+ errs = multierror.Append(errs, err)
+ }
+ if err := pathValid(v.platform, mountA.Source); err != nil {
+ errs = multierror.Append(errs, err)
+ }
+ } else {
+ if err := pathValid(v.platform, mountA.Destination); err != nil {
+ errs = multierror.Append(errs, err)
+ }
+ }
+ for j, mountB := range v.spec.Mounts {
+ if i == j {
+ continue
+ }
+ // whether B.Desination is nested within A.Destination
+ nested, err := nestedValid(v.platform, mountA.Destination, mountB.Destination)
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ continue
+ }
+ if nested {
+ if v.platform == "windows" && i < j {
+ errs = multierror.Append(errs, fmt.Errorf("on Windows, %v nested within %v is forbidden", mountB.Destination, mountA.Destination))
+ }
+ if i > j {
+ logrus.Warnf("%v will be covered by %v", mountB.Destination, mountA.Destination)
+ }
+ }
+ }
+ }
+
+ return
+}
+
+// CheckPlatform checks v.platform
+func (v *Validator) CheckPlatform() (errs error) {
+ logrus.Debugf("check platform")
+
+ if v.platform != "linux" && v.platform != "solaris" && v.platform != "windows" {
+ errs = multierror.Append(errs, fmt.Errorf("platform %q is not supported", v.platform))
+ return
+ }
+
+ if v.platform == "windows" {
+ if v.spec.Windows == nil {
+ errs = multierror.Append(errs, errors.New("'windows' MUST be set when platform is `windows`"))
+ }
+ }
+
+ return
+}
+
+// CheckLinux checks v.spec.Linux
+func (v *Validator) CheckLinux() (errs error) {
+ logrus.Debugf("check linux")
+
+ if v.spec.Linux == nil {
+ return
+ }
+
+ var nsTypeList = map[rspec.LinuxNamespaceType]struct {
+ num int
+ newExist bool
+ }{
+ rspec.PIDNamespace: {0, false},
+ rspec.NetworkNamespace: {0, false},
+ rspec.MountNamespace: {0, false},
+ rspec.IPCNamespace: {0, false},
+ rspec.UTSNamespace: {0, false},
+ rspec.UserNamespace: {0, false},
+ rspec.CgroupNamespace: {0, false},
+ }
+
+ for index := 0; index < len(v.spec.Linux.Namespaces); index++ {
+ ns := v.spec.Linux.Namespaces[index]
+ if !namespaceValid(ns) {
+ errs = multierror.Append(errs, fmt.Errorf("namespace %v is invalid", ns))
+ }
+
+ tmpItem := nsTypeList[ns.Type]
+ tmpItem.num = tmpItem.num + 1
+ if tmpItem.num > 1 {
+ errs = multierror.Append(errs, fmt.Errorf("duplicated namespace %q", ns.Type))
+ }
+
+ if len(ns.Path) == 0 {
+ tmpItem.newExist = true
+ }
+ nsTypeList[ns.Type] = tmpItem
+ }
+
+ if (len(v.spec.Linux.UIDMappings) > 0 || len(v.spec.Linux.GIDMappings) > 0) && !nsTypeList[rspec.UserNamespace].newExist {
+ errs = multierror.Append(errs, errors.New("the UID/GID mappings requires a new User namespace to be specified as well"))
+ } else if len(v.spec.Linux.UIDMappings) > 5 {
+ errs = multierror.Append(errs, errors.New("only 5 UID mappings are allowed (linux kernel restriction)"))
+ } else if len(v.spec.Linux.GIDMappings) > 5 {
+ errs = multierror.Append(errs, errors.New("only 5 GID mappings are allowed (linux kernel restriction)"))
+ }
+
+ for k := range v.spec.Linux.Sysctl {
+ if strings.HasPrefix(k, "net.") && !nsTypeList[rspec.NetworkNamespace].newExist {
+ errs = multierror.Append(errs, fmt.Errorf("sysctl %v requires a new Network namespace to be specified as well", k))
+ }
+ if strings.HasPrefix(k, "fs.mqueue.") {
+ if !nsTypeList[rspec.MountNamespace].newExist || !nsTypeList[rspec.IPCNamespace].newExist {
+ errs = multierror.Append(errs, fmt.Errorf("sysctl %v requires a new IPC namespace and Mount namespace to be specified as well", k))
+ }
+ }
+ }
+
+ if v.platform == "linux" && !nsTypeList[rspec.UTSNamespace].newExist && v.spec.Hostname != "" {
+ errs = multierror.Append(errs, fmt.Errorf("on Linux, hostname requires a new UTS namespace to be specified as well"))
+ }
+
+ // Linux devices validation
+ devList := make(map[string]bool)
+ devTypeList := make(map[string]bool)
+ for index := 0; index < len(v.spec.Linux.Devices); index++ {
+ device := v.spec.Linux.Devices[index]
+ if !deviceValid(device) {
+ errs = multierror.Append(errs, fmt.Errorf("device %v is invalid", device))
+ }
+
+ if _, exists := devList[device.Path]; exists {
+ errs = multierror.Append(errs, fmt.Errorf("device %s is duplicated", device.Path))
+ } else {
+ var rootfsPath string
+ if filepath.IsAbs(v.spec.Root.Path) {
+ rootfsPath = v.spec.Root.Path
+ } else {
+ rootfsPath = filepath.Join(v.bundlePath, v.spec.Root.Path)
+ }
+ absPath := filepath.Join(rootfsPath, device.Path)
+ fi, err := os.Stat(absPath)
+ if os.IsNotExist(err) {
+ devList[device.Path] = true
+ } else if err != nil {
+ errs = multierror.Append(errs, err)
+ } else {
+ fStat, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ errs = multierror.Append(errs, fmt.Errorf("cannot determine state for device %s", device.Path))
+ continue
+ }
+ var devType string
+ switch fStat.Mode & syscall.S_IFMT {
+ case syscall.S_IFCHR:
+ devType = "c"
+ case syscall.S_IFBLK:
+ devType = "b"
+ case syscall.S_IFIFO:
+ devType = "p"
+ default:
+ devType = "unmatched"
+ }
+ if devType != device.Type || (devType == "c" && device.Type == "u") {
+ errs = multierror.Append(errs, fmt.Errorf("unmatched %s already exists in filesystem", device.Path))
+ continue
+ }
+ if devType != "p" {
+ dev := fStat.Rdev
+ major := (dev >> 8) & 0xfff
+ minor := (dev & 0xff) | ((dev >> 12) & 0xfff00)
+ if int64(major) != device.Major || int64(minor) != device.Minor {
+ errs = multierror.Append(errs, fmt.Errorf("unmatched %s already exists in filesystem", device.Path))
+ continue
+ }
+ }
+ if device.FileMode != nil {
+ expectedPerm := *device.FileMode & os.ModePerm
+ actualPerm := fi.Mode() & os.ModePerm
+ if expectedPerm != actualPerm {
+ errs = multierror.Append(errs, fmt.Errorf("unmatched %s already exists in filesystem", device.Path))
+ continue
+ }
+ }
+ if device.UID != nil {
+ if *device.UID != fStat.Uid {
+ errs = multierror.Append(errs, fmt.Errorf("unmatched %s already exists in filesystem", device.Path))
+ continue
+ }
+ }
+ if device.GID != nil {
+ if *device.GID != fStat.Gid {
+ errs = multierror.Append(errs, fmt.Errorf("unmatched %s already exists in filesystem", device.Path))
+ continue
+ }
+ }
+ }
+ }
+
+ // unify u->c when comparing, they are synonyms
+ var devID string
+ if device.Type == "u" {
+ devID = fmt.Sprintf("%s:%d:%d", "c", device.Major, device.Minor)
+ } else {
+ devID = fmt.Sprintf("%s:%d:%d", device.Type, device.Major, device.Minor)
+ }
+
+ if _, exists := devTypeList[devID]; exists {
+ logrus.Warnf("type:%s, major:%d and minor:%d for linux devices is duplicated", device.Type, device.Major, device.Minor)
+ } else {
+ devTypeList[devID] = true
+ }
+ }
+
+ if v.spec.Linux.Resources != nil {
+ errs = multierror.Append(errs, v.CheckLinuxResources())
+ }
+
+ if v.spec.Linux.Seccomp != nil {
+ errs = multierror.Append(errs, v.CheckSeccomp())
+ }
+
+ switch v.spec.Linux.RootfsPropagation {
+ case "":
+ case "private":
+ case "rprivate":
+ case "slave":
+ case "rslave":
+ case "shared":
+ case "rshared":
+ case "unbindable":
+ case "runbindable":
+ default:
+ errs = multierror.Append(errs, errors.New("rootfsPropagation must be empty or one of \"private|rprivate|slave|rslave|shared|rshared|unbindable|runbindable\""))
+ }
+
+ for _, maskedPath := range v.spec.Linux.MaskedPaths {
+ if !strings.HasPrefix(maskedPath, "/") {
+ errs = multierror.Append(errs, fmt.Errorf("maskedPath %v is not an absolute path", maskedPath))
+ }
+ }
+
+ for _, readonlyPath := range v.spec.Linux.ReadonlyPaths {
+ if !strings.HasPrefix(readonlyPath, "/") {
+ errs = multierror.Append(errs, fmt.Errorf("readonlyPath %v is not an absolute path", readonlyPath))
+ }
+ }
+
+ return
+}
+
+// CheckLinuxResources checks v.spec.Linux.Resources
+func (v *Validator) CheckLinuxResources() (errs error) {
+ logrus.Debugf("check linux resources")
+
+ r := v.spec.Linux.Resources
+ if r.Memory != nil {
+ if r.Memory.Limit != nil && r.Memory.Swap != nil && uint64(*r.Memory.Limit) > uint64(*r.Memory.Swap) {
+ errs = multierror.Append(errs, fmt.Errorf("minimum memoryswap should be larger than memory limit"))
+ }
+ if r.Memory.Limit != nil && r.Memory.Reservation != nil && uint64(*r.Memory.Reservation) > uint64(*r.Memory.Limit) {
+ errs = multierror.Append(errs, fmt.Errorf("minimum memory limit should be larger than memory reservation"))
+ }
+ }
+ if r.Network != nil && v.HostSpecific {
+ var exist bool
+ interfaces, err := net.Interfaces()
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ return
+ }
+ for _, prio := range r.Network.Priorities {
+ exist = false
+ for _, ni := range interfaces {
+ if prio.Name == ni.Name {
+ exist = true
+ break
+ }
+ }
+ if !exist {
+ errs = multierror.Append(errs, fmt.Errorf("interface %s does not exist currently", prio.Name))
+ }
+ }
+ }
+ for index := 0; index < len(r.Devices); index++ {
+ switch r.Devices[index].Type {
+ case "a", "b", "c":
+ default:
+ errs = multierror.Append(errs, fmt.Errorf("type of devices %s is invalid", r.Devices[index].Type))
+ }
+
+ access := []byte(r.Devices[index].Access)
+ for i := 0; i < len(access); i++ {
+ switch access[i] {
+ case 'r', 'w', 'm':
+ default:
+ errs = multierror.Append(errs, fmt.Errorf("access %s is invalid", r.Devices[index].Access))
+ return
+ }
+ }
+ }
+
+ return
+}
+
+// CheckSeccomp checkc v.spec.Linux.Seccomp
+func (v *Validator) CheckSeccomp() (errs error) {
+ logrus.Debugf("check linux seccomp")
+
+ s := v.spec.Linux.Seccomp
+ if !seccompActionValid(s.DefaultAction) {
+ errs = multierror.Append(errs, fmt.Errorf("seccomp defaultAction %q is invalid", s.DefaultAction))
+ }
+ for index := 0; index < len(s.Syscalls); index++ {
+ if !syscallValid(s.Syscalls[index]) {
+ errs = multierror.Append(errs, fmt.Errorf("syscall %v is invalid", s.Syscalls[index]))
+ }
+ }
+ for index := 0; index < len(s.Architectures); index++ {
+ switch s.Architectures[index] {
+ case rspec.ArchX86:
+ case rspec.ArchX86_64:
+ case rspec.ArchX32:
+ case rspec.ArchARM:
+ case rspec.ArchAARCH64:
+ case rspec.ArchMIPS:
+ case rspec.ArchMIPS64:
+ case rspec.ArchMIPS64N32:
+ case rspec.ArchMIPSEL:
+ case rspec.ArchMIPSEL64:
+ case rspec.ArchMIPSEL64N32:
+ case rspec.ArchPPC:
+ case rspec.ArchPPC64:
+ case rspec.ArchPPC64LE:
+ case rspec.ArchS390:
+ case rspec.ArchS390X:
+ case rspec.ArchPARISC:
+ case rspec.ArchPARISC64:
+ default:
+ errs = multierror.Append(errs, fmt.Errorf("seccomp architecture %q is invalid", s.Architectures[index]))
+ }
+ }
+
+ return
+}
+
+// CapValid checks whether a capability is valid
+func CapValid(c string, hostSpecific bool) error {
+ isValid := false
+
+ if !strings.HasPrefix(c, "CAP_") {
+ return fmt.Errorf("capability %s must start with CAP_", c)
+ }
+ for _, cap := range capability.List() {
+ if c == fmt.Sprintf("CAP_%s", strings.ToUpper(cap.String())) {
+ if hostSpecific && cap > LastCap() {
+ return fmt.Errorf("%s is not supported on the current host", c)
+ }
+ isValid = true
+ break
+ }
+ }
+
+ if !isValid {
+ return fmt.Errorf("invalid capability: %s", c)
+ }
+ return nil
+}
+
+// LastCap return last cap of system
+func LastCap() capability.Cap {
+ last := capability.CAP_LAST_CAP
+ // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap
+ if last == capability.Cap(63) {
+ last = capability.CAP_BLOCK_SUSPEND
+ }
+
+ return last
+}
+
+func envValid(env string) bool {
+ items := strings.Split(env, "=")
+ if len(items) < 2 {
+ return false
+ }
+ for i, ch := range strings.TrimSpace(items[0]) {
+ if !unicode.IsDigit(ch) && !unicode.IsLetter(ch) && ch != '_' {
+ return false
+ }
+ if i == 0 && unicode.IsDigit(ch) {
+ logrus.Warnf("Env %v: variable name beginning with digit is not recommended.", env)
+ }
+ }
+ return true
+}
+
+func (v *Validator) rlimitValid(rlimit rspec.POSIXRlimit) (errs error) {
+ if rlimit.Hard < rlimit.Soft {
+ errs = multierror.Append(errs, fmt.Errorf("hard limit of rlimit %s should not be less than soft limit", rlimit.Type))
+ }
+
+ if v.platform == "linux" {
+ for _, val := range defaultRlimits {
+ if val == rlimit.Type {
+ return
+ }
+ }
+ errs = multierror.Append(errs, fmt.Errorf("rlimit type %q is invalid", rlimit.Type))
+ } else {
+ logrus.Warnf("process.rlimits validation not yet implemented for platform %q", v.platform)
+ }
+
+ return
+}
+
+func namespaceValid(ns rspec.LinuxNamespace) bool {
+ switch ns.Type {
+ case rspec.PIDNamespace:
+ case rspec.NetworkNamespace:
+ case rspec.MountNamespace:
+ case rspec.IPCNamespace:
+ case rspec.UTSNamespace:
+ case rspec.UserNamespace:
+ case rspec.CgroupNamespace:
+ default:
+ return false
+ }
+
+ if ns.Path != "" && !filepath.IsAbs(ns.Path) {
+ return false
+ }
+
+ return true
+}
+
+func pathValid(os, path string) error {
+ if os == "windows" {
+ matched, err := regexp.MatchString("^[a-zA-Z]:(\\\\[^\\\\/<>|:*?\"]+)+$", path)
+ if err != nil {
+ return err
+ }
+ if !matched {
+ return fmt.Errorf("invalid windows path %v", path)
+ }
+ return nil
+ }
+ if !filepath.IsAbs(path) {
+ return fmt.Errorf("%v is not an absolute path", path)
+ }
+ return nil
+}
+
+// Check whether pathB is nested whithin pathA
+func nestedValid(os, pathA, pathB string) (bool, error) {
+ if pathA == pathB {
+ return false, nil
+ }
+ if pathA == "/" && pathB != "" {
+ return true, nil
+ }
+
+ var sep string
+ if os == "windows" {
+ sep = "\\"
+ } else {
+ sep = "/"
+ }
+
+ splitedPathA := strings.Split(filepath.Clean(pathA), sep)
+ splitedPathB := strings.Split(filepath.Clean(pathB), sep)
+ lenA := len(splitedPathA)
+ lenB := len(splitedPathB)
+
+ if lenA > lenB {
+ if (lenA - lenB) == 1 {
+ // if pathA is longer but not end with separator
+ if splitedPathA[lenA-1] != "" {
+ return false, nil
+ }
+ splitedPathA = splitedPathA[:lenA-1]
+ } else {
+ return false, nil
+ }
+ }
+
+ for i, partA := range splitedPathA {
+ if partA != splitedPathB[i] {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func deviceValid(d rspec.LinuxDevice) bool {
+ switch d.Type {
+ case "b", "c", "u":
+ if d.Major <= 0 || d.Minor <= 0 {
+ return false
+ }
+ case "p":
+ if d.Major > 0 || d.Minor > 0 {
+ return false
+ }
+ default:
+ return false
+ }
+ return true
+}
+
+func seccompActionValid(secc rspec.LinuxSeccompAction) bool {
+ switch secc {
+ case "":
+ case rspec.ActKill:
+ case rspec.ActTrap:
+ case rspec.ActErrno:
+ case rspec.ActTrace:
+ case rspec.ActAllow:
+ default:
+ return false
+ }
+ return true
+}
+
+func syscallValid(s rspec.LinuxSyscall) bool {
+ if !seccompActionValid(s.Action) {
+ return false
+ }
+ for index := 0; index < len(s.Args); index++ {
+ arg := s.Args[index]
+ switch arg.Op {
+ case rspec.OpNotEqual:
+ case rspec.OpLessThan:
+ case rspec.OpLessEqual:
+ case rspec.OpEqualTo:
+ case rspec.OpGreaterEqual:
+ case rspec.OpGreaterThan:
+ case rspec.OpMaskedEqual:
+ default:
+ return false
+ }
+ }
+ return true
+}
+
+func isStruct(t reflect.Type) bool {
+ return t.Kind() == reflect.Struct
+}
+
+func isStructPtr(t reflect.Type) bool {
+ return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
+}
+
+func checkMandatoryUnit(field reflect.Value, tagField reflect.StructField, parent string) (errs error) {
+ mandatory := !strings.Contains(tagField.Tag.Get("json"), "omitempty")
+ switch field.Kind() {
+ case reflect.Ptr:
+ if mandatory && field.IsNil() {
+ errs = multierror.Append(errs, fmt.Errorf("'%s.%s' should not be empty", parent, tagField.Name))
+ }
+ case reflect.String:
+ if mandatory && (field.Len() == 0) {
+ errs = multierror.Append(errs, fmt.Errorf("'%s.%s' should not be empty", parent, tagField.Name))
+ }
+ case reflect.Slice:
+ if mandatory && (field.IsNil() || field.Len() == 0) {
+ errs = multierror.Append(errs, fmt.Errorf("'%s.%s' should not be empty", parent, tagField.Name))
+ return
+ }
+ for index := 0; index < field.Len(); index++ {
+ mValue := field.Index(index)
+ if mValue.CanInterface() {
+ errs = multierror.Append(errs, checkMandatory(mValue.Interface()))
+ }
+ }
+ case reflect.Map:
+ if mandatory && (field.IsNil() || field.Len() == 0) {
+ errs = multierror.Append(errs, fmt.Errorf("'%s.%s' should not be empty", parent, tagField.Name))
+ return
+ }
+ keys := field.MapKeys()
+ for index := 0; index < len(keys); index++ {
+ mValue := field.MapIndex(keys[index])
+ if mValue.CanInterface() {
+ errs = multierror.Append(errs, checkMandatory(mValue.Interface()))
+ }
+ }
+ default:
+ }
+
+ return
+}
+
+func checkMandatory(obj interface{}) (errs error) {
+ objT := reflect.TypeOf(obj)
+ objV := reflect.ValueOf(obj)
+ if isStructPtr(objT) {
+ objT = objT.Elem()
+ objV = objV.Elem()
+ } else if !isStruct(objT) {
+ return
+ }
+
+ for i := 0; i < objT.NumField(); i++ {
+ t := objT.Field(i).Type
+ if isStructPtr(t) && objV.Field(i).IsNil() {
+ if !strings.Contains(objT.Field(i).Tag.Get("json"), "omitempty") {
+ errs = multierror.Append(errs, fmt.Errorf("'%s.%s' should not be empty", objT.Name(), objT.Field(i).Name))
+ }
+ } else if (isStruct(t) || isStructPtr(t)) && objV.Field(i).CanInterface() {
+ errs = multierror.Append(errs, checkMandatory(objV.Field(i).Interface()))
+ } else {
+ errs = multierror.Append(errs, checkMandatoryUnit(objV.Field(i), objT.Field(i), objT.Name()))
+ }
+
+ }
+ return
+}
+
+// CheckMandatoryFields checks mandatory field of container's config file
+func (v *Validator) CheckMandatoryFields() error {
+ logrus.Debugf("check mandatory fields")
+
+ return checkMandatory(v.spec)
+}
diff --git a/vendor/github.com/opencontainers/selinux/LICENSE b/vendor/github.com/opencontainers/selinux/LICENSE
new file mode 100644
index 000000000..8dada3eda
--- /dev/null
+++ b/vendor/github.com/opencontainers/selinux/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/opencontainers/selinux/README.md b/vendor/github.com/opencontainers/selinux/README.md
new file mode 100644
index 000000000..043a92937
--- /dev/null
+++ b/vendor/github.com/opencontainers/selinux/README.md
@@ -0,0 +1,7 @@
+# selinux
+
+[![GoDoc](https://godoc.org/github.com/opencontainers/selinux?status.svg)](https://godoc.org/github.com/opencontainers/selinux) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/selinux)](https://goreportcard.com/report/github.com/opencontainers/selinux) [![Build Status](https://travis-ci.org/opencontainers/selinux.svg?branch=master)](https://travis-ci.org/opencontainers/selinux)
+
+Common SELinux package used across the container ecosystem.
+
+Please see the [godoc](https://godoc.org/github.com/opencontainers/selinux) for more information.
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
new file mode 100644
index 000000000..6cfc5fded
--- /dev/null
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
@@ -0,0 +1,84 @@
+// +build !selinux !linux
+
+package label
+
+// InitLabels returns the process label and file labels to be used within
+// the container. A list of options can be passed into this function to alter
+// the labels.
+func InitLabels(options []string) (string, string, error) {
+ return "", "", nil
+}
+
+func GetROMountLabel() string {
+ return ""
+}
+
+func GenLabels(options string) (string, string, error) {
+ return "", "", nil
+}
+
+func FormatMountLabel(src string, mountLabel string) string {
+ return src
+}
+
+func SetProcessLabel(processLabel string) error {
+ return nil
+}
+
+func GetFileLabel(path string) (string, error) {
+ return "", nil
+}
+
+func SetFileLabel(path string, fileLabel string) error {
+ return nil
+}
+
+func SetFileCreateLabel(fileLabel string) error {
+ return nil
+}
+
+func Relabel(path string, fileLabel string, shared bool) error {
+ return nil
+}
+
+func GetPidLabel(pid int) (string, error) {
+ return "", nil
+}
+
+func Init() {
+}
+
+func ReserveLabel(label string) error {
+ return nil
+}
+
+func ReleaseLabel(label string) error {
+ return nil
+}
+
+// DupSecOpt takes a process label and returns security options that
+// can be used to set duplicate labels on future container processes
+func DupSecOpt(src string) []string {
+ return nil
+}
+
+// DisableSecOpt returns a security opt that can disable labeling
+// support for future container processes
+func DisableSecOpt() []string {
+ return nil
+}
+
+// Validate checks that the label does not include unexpected options
+func Validate(label string) error {
+ return nil
+}
+
+// RelabelNeeded checks whether the user requested a relabel
+func RelabelNeeded(label string) bool {
+ return false
+}
+
+// IsShared checks that the label includes a "shared" mark
+func IsShared(label string) bool {
+ return false
+}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
new file mode 100644
index 000000000..c008a387b
--- /dev/null
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
@@ -0,0 +1,206 @@
+// +build selinux,linux
+
+package label
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/opencontainers/selinux/go-selinux"
+)
+
+// Valid Label Options
+var validOptions = map[string]bool{
+ "disable": true,
+ "type": true,
+ "user": true,
+ "role": true,
+ "level": true,
+}
+
+var ErrIncompatibleLabel = fmt.Errorf("Bad SELinux option z and Z can not be used together")
+
+// InitLabels returns the process label and file labels to be used within
+// the container. A list of options can be passed into this function to alter
+// the labels. The labels returned will include a random MCS String, that is
+// guaranteed to be unique.
+func InitLabels(options []string) (string, string, error) {
+ if !selinux.GetEnabled() {
+ return "", "", nil
+ }
+ processLabel, mountLabel := selinux.ContainerLabels()
+ if processLabel != "" {
+ pcon := selinux.NewContext(processLabel)
+ mcon := selinux.NewContext(mountLabel)
+ for _, opt := range options {
+ if opt == "disable" {
+ return "", "", nil
+ }
+ if i := strings.Index(opt, ":"); i == -1 {
+ return "", "", fmt.Errorf("Bad label option %q, valid options 'disable' or \n'user, role, level, type' followed by ':' and a value", opt)
+ }
+ con := strings.SplitN(opt, ":", 2)
+ if !validOptions[con[0]] {
+ return "", "", fmt.Errorf("Bad label option %q, valid options 'disable, user, role, level, type'", con[0])
+
+ }
+ pcon[con[0]] = con[1]
+ if con[0] == "level" || con[0] == "user" {
+ mcon[con[0]] = con[1]
+ }
+ }
+ _ = ReleaseLabel(processLabel)
+ processLabel = pcon.Get()
+ mountLabel = mcon.Get()
+ _ = ReserveLabel(processLabel)
+ }
+ return processLabel, mountLabel, nil
+}
+
+func ROMountLabel() string {
+ return selinux.ROFileLabel()
+}
+
+// DEPRECATED: The GenLabels function is only to be used during the transition to the official API.
+func GenLabels(options string) (string, string, error) {
+ return InitLabels(strings.Fields(options))
+}
+
+// FormatMountLabel returns a string to be used by the mount command.
+// The format of this string will be used to alter the labeling of the mountpoint.
+// The string returned is suitable to be used as the options field of the mount command.
+// If you need to have additional mount point options, you can pass them in as
+// the first parameter. Second parameter is the label that you wish to apply
+// to all content in the mount point.
+func FormatMountLabel(src, mountLabel string) string {
+ if mountLabel != "" {
+ switch src {
+ case "":
+ src = fmt.Sprintf("context=%q", mountLabel)
+ default:
+ src = fmt.Sprintf("%s,context=%q", src, mountLabel)
+ }
+ }
+ return src
+}
+
+// SetProcessLabel takes a process label and tells the kernel to assign the
+// label to the next program executed by the current process.
+func SetProcessLabel(processLabel string) error {
+ if processLabel == "" {
+ return nil
+ }
+ return selinux.SetExecLabel(processLabel)
+}
+
+// ProcessLabel returns the process label that the kernel will assign
+// to the next program executed by the current process. If "" is returned
+// this indicates that the default labeling will happen for the process.
+func ProcessLabel() (string, error) {
+ return selinux.ExecLabel()
+}
+
+// GetFileLabel returns the label for specified path
+func FileLabel(path string) (string, error) {
+ return selinux.FileLabel(path)
+}
+
+// SetFileLabel modifies the "path" label to the specified file label
+func SetFileLabel(path string, fileLabel string) error {
+ if selinux.GetEnabled() && fileLabel != "" {
+ return selinux.SetFileLabel(path, fileLabel)
+ }
+ return nil
+}
+
+// SetFileCreateLabel tells the kernel the label for all files to be created
+func SetFileCreateLabel(fileLabel string) error {
+ if selinux.GetEnabled() {
+ return selinux.SetFSCreateLabel(fileLabel)
+ }
+ return nil
+}
+
+// Relabel changes the label of path to the filelabel string.
+// It changes the MCS label to s0 if shared is true.
+// This will allow all containers to share the content.
+func Relabel(path string, fileLabel string, shared bool) error {
+ if !selinux.GetEnabled() {
+ return nil
+ }
+
+ if fileLabel == "" {
+ return nil
+ }
+
+ exclude_paths := map[string]bool{"/": true, "/usr": true, "/etc": true}
+ if exclude_paths[path] {
+ return fmt.Errorf("SELinux relabeling of %s is not allowed", path)
+ }
+
+ if shared {
+ c := selinux.NewContext(fileLabel)
+ c["level"] = "s0"
+ fileLabel = c.Get()
+ }
+ if err := selinux.Chcon(path, fileLabel, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+// PidLabel will return the label of the process running with the specified pid
+func PidLabel(pid int) (string, error) {
+ return selinux.PidLabel(pid)
+}
+
+// Init initialises the labeling system
+func Init() {
+ selinux.GetEnabled()
+}
+
+// ReserveLabel will record the fact that the MCS label has already been used.
+// This will prevent InitLabels from using the MCS label in a newly created
+// container
+func ReserveLabel(label string) error {
+ selinux.ReserveLabel(label)
+ return nil
+}
+
+// ReleaseLabel will remove the reservation of the MCS label.
+// This will allow InitLabels to use the MCS label in a newly created
+// containers
+func ReleaseLabel(label string) error {
+ selinux.ReleaseLabel(label)
+ return nil
+}
+
+// DupSecOpt takes a process label and returns security options that
+// can be used to set duplicate labels on future container processes
+func DupSecOpt(src string) []string {
+ return selinux.DupSecOpt(src)
+}
+
+// DisableSecOpt returns a security opt that can disable labeling
+// support for future container processes
+func DisableSecOpt() []string {
+ return selinux.DisableSecOpt()
+}
+
+// Validate checks that the label does not include unexpected options
+func Validate(label string) error {
+ if strings.Contains(label, "z") && strings.Contains(label, "Z") {
+ return ErrIncompatibleLabel
+ }
+ return nil
+}
+
+// RelabelNeeded checks whether the user requested a relabel
+func RelabelNeeded(label string) bool {
+ return strings.Contains(label, "z") || strings.Contains(label, "Z")
+}
+
+// IsShared checks that the label includes a "shared" mark
+func IsShared(label string) bool {
+ return strings.Contains(label, "z")
+}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go
new file mode 100644
index 000000000..de9316c2e
--- /dev/null
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go
@@ -0,0 +1,593 @@
+// +build linux
+
+package selinux
+
+import (
+ "bufio"
+ "crypto/rand"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+)
+
+const (
+ // Enforcing constant indicate SELinux is in enforcing mode
+ Enforcing = 1
+ // Permissive constant to indicate SELinux is in permissive mode
+ Permissive = 0
+ // Disabled constant to indicate SELinux is disabled
+ Disabled = -1
+ selinuxDir = "/etc/selinux/"
+ selinuxConfig = selinuxDir + "config"
+ selinuxTypeTag = "SELINUXTYPE"
+ selinuxTag = "SELINUX"
+ selinuxPath = "/sys/fs/selinux"
+ xattrNameSelinux = "security.selinux"
+ stRdOnly = 0x01
+)
+
+type selinuxState struct {
+ enabledSet bool
+ enabled bool
+ selinuxfsSet bool
+ selinuxfs string
+ mcsList map[string]bool
+ sync.Mutex
+}
+
+var (
+ assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
+ state = selinuxState{
+ mcsList: make(map[string]bool),
+ }
+)
+
+// Context is a representation of the SELinux label broken into 4 parts
+type Context map[string]string
+
+func (s *selinuxState) setEnable(enabled bool) bool {
+ s.Lock()
+ defer s.Unlock()
+ s.enabledSet = true
+ s.enabled = enabled
+ return s.enabled
+}
+
+func (s *selinuxState) getEnabled() bool {
+ s.Lock()
+ enabled := s.enabled
+ enabledSet := s.enabledSet
+ s.Unlock()
+ if enabledSet {
+ return enabled
+ }
+
+ enabled = false
+ if fs := getSelinuxMountPoint(); fs != "" {
+ if con, _ := CurrentLabel(); con != "kernel" {
+ enabled = true
+ }
+ }
+ return s.setEnable(enabled)
+}
+
+// SetDisabled disables selinux support for the package
+func SetDisabled() {
+ state.setEnable(false)
+}
+
+func (s *selinuxState) setSELinuxfs(selinuxfs string) string {
+ s.Lock()
+ defer s.Unlock()
+ s.selinuxfsSet = true
+ s.selinuxfs = selinuxfs
+ return s.selinuxfs
+}
+
+func (s *selinuxState) getSELinuxfs() string {
+ s.Lock()
+ selinuxfs := s.selinuxfs
+ selinuxfsSet := s.selinuxfsSet
+ s.Unlock()
+ if selinuxfsSet {
+ return selinuxfs
+ }
+
+ selinuxfs = ""
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return selinuxfs
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ txt := scanner.Text()
+ // Safe as mountinfo encodes mountpoints with spaces as \040.
+ sepIdx := strings.Index(txt, " - ")
+ if sepIdx == -1 {
+ continue
+ }
+ if !strings.Contains(txt[sepIdx:], "selinuxfs") {
+ continue
+ }
+ fields := strings.Split(txt, " ")
+ if len(fields) < 5 {
+ continue
+ }
+ selinuxfs = fields[4]
+ break
+ }
+
+ if selinuxfs != "" {
+ var buf syscall.Statfs_t
+ syscall.Statfs(selinuxfs, &buf)
+ if (buf.Flags & stRdOnly) == 1 {
+ selinuxfs = ""
+ }
+ }
+ return s.setSELinuxfs(selinuxfs)
+}
+
+// getSelinuxMountPoint returns the path to the mountpoint of an selinuxfs
+// filesystem or an empty string if no mountpoint is found. Selinuxfs is
+// a proc-like pseudo-filesystem that exposes the selinux policy API to
+// processes. The existence of an selinuxfs mount is used to determine
+// whether selinux is currently enabled or not.
+func getSelinuxMountPoint() string {
+ return state.getSELinuxfs()
+}
+
+// GetEnabled returns whether selinux is currently enabled.
+func GetEnabled() bool {
+ return state.getEnabled()
+}
+
+func readConfig(target string) (value string) {
+ var (
+ val, key string
+ bufin *bufio.Reader
+ )
+
+ in, err := os.Open(selinuxConfig)
+ if err != nil {
+ return ""
+ }
+ defer in.Close()
+
+ bufin = bufio.NewReader(in)
+
+ for done := false; !done; {
+ var line string
+ if line, err = bufin.ReadString('\n'); err != nil {
+ if err != io.EOF {
+ return ""
+ }
+ done = true
+ }
+ line = strings.TrimSpace(line)
+ if len(line) == 0 {
+ // Skip blank lines
+ continue
+ }
+ if line[0] == ';' || line[0] == '#' {
+ // Skip comments
+ continue
+ }
+ if groups := assignRegex.FindStringSubmatch(line); groups != nil {
+ key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2])
+ if key == target {
+ return strings.Trim(val, "\"")
+ }
+ }
+ }
+ return ""
+}
+
+func getSELinuxPolicyRoot() string {
+ return selinuxDir + readConfig(selinuxTypeTag)
+}
+
+func readCon(name string) (string, error) {
+ var val string
+
+ in, err := os.Open(name)
+ if err != nil {
+ return "", err
+ }
+ defer in.Close()
+
+ _, err = fmt.Fscanf(in, "%s", &val)
+ return val, err
+}
+
+// SetFileLabel sets the SELinux label for this path or returns an error.
+func SetFileLabel(path string, label string) error {
+ return lsetxattr(path, xattrNameSelinux, []byte(label), 0)
+}
+
+// FileLabel returns the SELinux label for this path or returns an error.
+func FileLabel(path string) (string, error) {
+ label, err := lgetxattr(path, xattrNameSelinux)
+ if err != nil {
+ return "", err
+ }
+ // Trim the NUL byte at the end of the byte buffer, if present.
+ if len(label) > 0 && label[len(label)-1] == '\x00' {
+ label = label[:len(label)-1]
+ }
+ return string(label), nil
+}
+
+/*
+SetFSCreateLabel tells kernel the label to create all file system objects
+created by this task. Setting label="" to return to default.
+*/
+func SetFSCreateLabel(label string) error {
+ return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()), label)
+}
+
+/*
+FSCreateLabel returns the default label the kernel which the kernel is using
+for file system objects created by this task. "" indicates default.
+*/
+func FSCreateLabel() (string, error) {
+ return readCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()))
+}
+
+// CurrentLabel returns the SELinux label of the current process thread, or an error.
+func CurrentLabel() (string, error) {
+ return readCon(fmt.Sprintf("/proc/self/task/%d/attr/current", syscall.Gettid()))
+}
+
+// PidLabel returns the SELinux label of the given pid, or an error.
+func PidLabel(pid int) (string, error) {
+ return readCon(fmt.Sprintf("/proc/%d/attr/current", pid))
+}
+
+/*
+ExecLabel returns the SELinux label that the kernel will use for any programs
+that are executed by the current process thread, or an error.
+*/
+func ExecLabel() (string, error) {
+ return readCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()))
+}
+
+func writeCon(name string, val string) error {
+ out, err := os.OpenFile(name, os.O_WRONLY, 0)
+ if err != nil {
+ return err
+ }
+ defer out.Close()
+
+ if val != "" {
+ _, err = out.Write([]byte(val))
+ } else {
+ _, err = out.Write(nil)
+ }
+ return err
+}
+
+/*
+SetExecLabel sets the SELinux label that the kernel will use for any programs
+that are executed by the current process thread, or an error.
+*/
+func SetExecLabel(label string) error {
+ return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), label)
+}
+
+// Get returns the Context as a string
+func (c Context) Get() string {
+ return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"])
+}
+
+// NewContext creates a new Context struct from the specified label
+func NewContext(label string) Context {
+ c := make(Context)
+
+ if len(label) != 0 {
+ con := strings.SplitN(label, ":", 4)
+ c["user"] = con[0]
+ c["role"] = con[1]
+ c["type"] = con[2]
+ c["level"] = con[3]
+ }
+ return c
+}
+
+// ReserveLabel reserves the MLS/MCS level component of the specified label
+func ReserveLabel(label string) {
+ if len(label) != 0 {
+ con := strings.SplitN(label, ":", 4)
+ mcsAdd(con[3])
+ }
+}
+
+func selinuxEnforcePath() string {
+ return fmt.Sprintf("%s/enforce", selinuxPath)
+}
+
+// EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled
+func EnforceMode() int {
+ var enforce int
+
+ enforceS, err := readCon(selinuxEnforcePath())
+ if err != nil {
+ return -1
+ }
+
+ enforce, err = strconv.Atoi(string(enforceS))
+ if err != nil {
+ return -1
+ }
+ return enforce
+}
+
+/*
+SetEnforceMode sets the current SELinux mode Enforcing, Permissive.
+Disabled is not valid, since this needs to be set at boot time.
+*/
+func SetEnforceMode(mode int) error {
+ return writeCon(selinuxEnforcePath(), fmt.Sprintf("%d", mode))
+}
+
+/*
+DefaultEnforceMode returns the systems default SELinux mode Enforcing,
+Permissive or Disabled. Note this is is just the default at boot time.
+EnforceMode tells you the systems current mode.
+*/
+func DefaultEnforceMode() int {
+ switch readConfig(selinuxTag) {
+ case "enforcing":
+ return Enforcing
+ case "permissive":
+ return Permissive
+ }
+ return Disabled
+}
+
+func mcsAdd(mcs string) error {
+ state.Lock()
+ defer state.Unlock()
+ if state.mcsList[mcs] {
+ return fmt.Errorf("MCS Label already exists")
+ }
+ state.mcsList[mcs] = true
+ return nil
+}
+
+func mcsDelete(mcs string) {
+ state.Lock()
+ defer state.Unlock()
+ state.mcsList[mcs] = false
+}
+
+func intToMcs(id int, catRange uint32) string {
+ var (
+ SETSIZE = int(catRange)
+ TIER = SETSIZE
+ ORD = id
+ )
+
+ if id < 1 || id > 523776 {
+ return ""
+ }
+
+ for ORD > TIER {
+ ORD = ORD - TIER
+ TIER--
+ }
+ TIER = SETSIZE - TIER
+ ORD = ORD + TIER
+ return fmt.Sprintf("s0:c%d,c%d", TIER, ORD)
+}
+
+func uniqMcs(catRange uint32) string {
+ var (
+ n uint32
+ c1, c2 uint32
+ mcs string
+ )
+
+ for {
+ binary.Read(rand.Reader, binary.LittleEndian, &n)
+ c1 = n % catRange
+ binary.Read(rand.Reader, binary.LittleEndian, &n)
+ c2 = n % catRange
+ if c1 == c2 {
+ continue
+ } else {
+ if c1 > c2 {
+ c1, c2 = c2, c1
+ }
+ }
+ mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2)
+ if err := mcsAdd(mcs); err != nil {
+ continue
+ }
+ break
+ }
+ return mcs
+}
+
+/*
+ReleaseLabel will unreserve the MLS/MCS Level field of the specified label.
+Allowing it to be used by another process.
+*/
+func ReleaseLabel(label string) {
+ if len(label) != 0 {
+ con := strings.SplitN(label, ":", 4)
+ mcsDelete(con[3])
+ }
+}
+
+var roFileLabel string
+
+// ROFileLabel returns the specified SELinux readonly file label
+func ROFileLabel() (fileLabel string) {
+ return roFileLabel
+}
+
+/*
+ContainerLabels returns an allocated processLabel and fileLabel to be used for
+container labeling by the calling process.
+*/
+func ContainerLabels() (processLabel string, fileLabel string) {
+ var (
+ val, key string
+ bufin *bufio.Reader
+ )
+
+ if !GetEnabled() {
+ return "", ""
+ }
+ lxcPath := fmt.Sprintf("%s/contexts/lxc_contexts", getSELinuxPolicyRoot())
+ in, err := os.Open(lxcPath)
+ if err != nil {
+ return "", ""
+ }
+ defer in.Close()
+
+ bufin = bufio.NewReader(in)
+
+ for done := false; !done; {
+ var line string
+ if line, err = bufin.ReadString('\n'); err != nil {
+ if err == io.EOF {
+ done = true
+ } else {
+ goto exit
+ }
+ }
+ line = strings.TrimSpace(line)
+ if len(line) == 0 {
+ // Skip blank lines
+ continue
+ }
+ if line[0] == ';' || line[0] == '#' {
+ // Skip comments
+ continue
+ }
+ if groups := assignRegex.FindStringSubmatch(line); groups != nil {
+ key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2])
+ if key == "process" {
+ processLabel = strings.Trim(val, "\"")
+ }
+ if key == "file" {
+ fileLabel = strings.Trim(val, "\"")
+ }
+ if key == "ro_file" {
+ roFileLabel = strings.Trim(val, "\"")
+ }
+ }
+ }
+
+ if processLabel == "" || fileLabel == "" {
+ return "", ""
+ }
+
+ if roFileLabel == "" {
+ roFileLabel = fileLabel
+ }
+exit:
+ mcs := uniqMcs(1024)
+ scon := NewContext(processLabel)
+ scon["level"] = mcs
+ processLabel = scon.Get()
+ scon = NewContext(fileLabel)
+ scon["level"] = mcs
+ fileLabel = scon.Get()
+ return processLabel, fileLabel
+}
+
+// SecurityCheckContext validates that the SELinux label is understood by the kernel
+func SecurityCheckContext(val string) error {
+ return writeCon(fmt.Sprintf("%s.context", selinuxPath), val)
+}
+
+/*
+CopyLevel returns a label with the MLS/MCS level from src label replaces on
+the dest label.
+*/
+func CopyLevel(src, dest string) (string, error) {
+ if src == "" {
+ return "", nil
+ }
+ if err := SecurityCheckContext(src); err != nil {
+ return "", err
+ }
+ if err := SecurityCheckContext(dest); err != nil {
+ return "", err
+ }
+ scon := NewContext(src)
+ tcon := NewContext(dest)
+ mcsDelete(tcon["level"])
+ mcsAdd(scon["level"])
+ tcon["level"] = scon["level"]
+ return tcon.Get(), nil
+}
+
+// Prevent users from relabing system files
+func badPrefix(fpath string) error {
+ var badprefixes = []string{"/usr"}
+
+ for _, prefix := range badprefixes {
+ if fpath == prefix || strings.HasPrefix(fpath, fmt.Sprintf("%s/", prefix)) {
+ return fmt.Errorf("relabeling content in %s is not allowed", prefix)
+ }
+ }
+ return nil
+}
+
+// Chcon changes the fpath file object to the SELinux label label.
+// If the fpath is a directory and recurse is true Chcon will walk the
+// directory tree setting the label
+func Chcon(fpath string, label string, recurse bool) error {
+ if label == "" {
+ return nil
+ }
+ if err := badPrefix(fpath); err != nil {
+ return err
+ }
+ callback := func(p string, info os.FileInfo, err error) error {
+ return SetFileLabel(p, label)
+ }
+
+ if recurse {
+ return filepath.Walk(fpath, callback)
+ }
+
+ return SetFileLabel(fpath, label)
+}
+
+// DupSecOpt takes an SELinux process label and returns security options that
+// can will set the SELinux Type and Level for future container processes
+func DupSecOpt(src string) []string {
+ if src == "" {
+ return nil
+ }
+ con := NewContext(src)
+ if con["user"] == "" ||
+ con["role"] == "" ||
+ con["type"] == "" ||
+ con["level"] == "" {
+ return nil
+ }
+ return []string{"user:" + con["user"],
+ "role:" + con["role"],
+ "type:" + con["type"],
+ "level:" + con["level"]}
+}
+
+// DisableSecOpt returns a security opt that can be used to disabling SELinux
+// labeling support for future container processes
+func DisableSecOpt() []string {
+ return []string{"disable"}
+}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/xattrs.go b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs.go
new file mode 100644
index 000000000..7f2ef8504
--- /dev/null
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs.go
@@ -0,0 +1,78 @@
+// +build linux
+
+package selinux
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var _zero uintptr
+
+// Returns a []byte slice if the xattr is set and nil otherwise
+// Requires path and its attribute as arguments
+func lgetxattr(path string, attr string) ([]byte, error) {
+ var sz int
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return nil, err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return nil, err
+ }
+
+ // Start with a 128 length byte array
+ sz = 128
+ dest := make([]byte, sz)
+ destBytes := unsafe.Pointer(&dest[0])
+ _sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+
+ switch {
+ case errno == syscall.ENODATA:
+ return nil, errno
+ case errno == syscall.ENOTSUP:
+ return nil, errno
+ case errno == syscall.ERANGE:
+ // 128 byte array might just not be good enough,
+ // A dummy buffer is used ``uintptr(0)`` to get real size
+ // of the xattrs on disk
+ _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(unsafe.Pointer(nil)), uintptr(0), 0, 0)
+ sz = int(_sz)
+ if sz < 0 {
+ return nil, errno
+ }
+ dest = make([]byte, sz)
+ destBytes := unsafe.Pointer(&dest[0])
+ _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+ if errno != 0 {
+ return nil, errno
+ }
+ case errno != 0:
+ return nil, errno
+ }
+ sz = int(_sz)
+ return dest[:sz], nil
+}
+
+func lsetxattr(path string, attr string, data []byte, flags int) error {
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return err
+ }
+ var dataBytes unsafe.Pointer
+ if len(data) > 0 {
+ dataBytes = unsafe.Pointer(&data[0])
+ } else {
+ dataBytes = unsafe.Pointer(&_zero)
+ }
+ _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
diff --git a/vendor/github.com/ostreedev/ostree-go/LICENSE b/vendor/github.com/ostreedev/ostree-go/LICENSE
new file mode 100644
index 000000000..aa93b4dab
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/LICENSE
@@ -0,0 +1,17 @@
+Portions of this code are derived from:
+
+https://github.com/dradtke/gotk3
+
+Copyright (c) 2013 Conformal Systems LLC.
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/ostreedev/ostree-go/README.md b/vendor/github.com/ostreedev/ostree-go/README.md
new file mode 100644
index 000000000..c79010a0f
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/README.md
@@ -0,0 +1,4 @@
+OSTree-Go
+=========
+
+Go bindings for OSTree. Find out more about OSTree [here](https://github.com/ostreedev/ostree)
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gboolean.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gboolean.go
new file mode 100644
index 000000000..a4ad0f000
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gboolean.go
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2013 Conformal Systems <info@conformal.com>
+ *
+ * This file originated from: http://opensource.conformal.com/
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package glibobject
+
+import (
+ "unsafe"
+)
+
+// #cgo pkg-config: glib-2.0 gobject-2.0
+// #include <glib.h>
+// #include <glib-object.h>
+// #include <gio/gio.h>
+// #include "glibobject.go.h"
+// #include <stdlib.h>
+import "C"
+
+/*
+ * GBoolean
+ */
+
+// GBoolean is a Go representation of glib's gboolean
+type GBoolean C.gboolean
+
+func NewGBoolean() GBoolean {
+ return GBoolean(0)
+}
+
+func GBool(b bool) GBoolean {
+ if b {
+ return GBoolean(1)
+ }
+ return GBoolean(0)
+}
+
+func (b GBoolean) Ptr() unsafe.Pointer {
+ return unsafe.Pointer(&b)
+}
+
+func GoBool(b GBoolean) bool {
+ if b != 0 {
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gcancellable.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gcancellable.go
new file mode 100644
index 000000000..537db4720
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gcancellable.go
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2013 Conformal Systems <info@conformal.com>
+ *
+ * This file originated from: http://opensource.conformal.com/
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package glibobject
+
+// #cgo pkg-config: glib-2.0 gobject-2.0
+// #include <glib.h>
+// #include <glib-object.h>
+// #include <gio/gio.h>
+// #include "glibobject.go.h"
+// #include <stdlib.h>
+import "C"
+
+import (
+ "unsafe"
+)
+
+// GIO types
+
+type GCancellable struct {
+ *GObject
+}
+
+func (self *GCancellable) native() *C.GCancellable {
+ return (*C.GCancellable)(unsafe.Pointer(self))
+}
+
+func (self *GCancellable) Ptr() unsafe.Pointer {
+ return unsafe.Pointer(self)
+}
+
+// At the moment, no cancellable API, just pass nil
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gerror.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gerror.go
new file mode 100644
index 000000000..714b15d0b
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gerror.go
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2013 Conformal Systems <info@conformal.com>
+ *
+ * This file originated from: http://opensource.conformal.com/
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package glibobject
+
+// #cgo pkg-config: glib-2.0 gobject-2.0
+// #include <glib.h>
+// #include <glib-object.h>
+// #include <gio/gio.h>
+// #include "glibobject.go.h"
+// #include <stdlib.h>
+import "C"
+import (
+ "errors"
+ "unsafe"
+)
+
+/*
+ * GError
+ */
+
+// GError is a representation of GLib's GError
+type GError struct {
+ ptr unsafe.Pointer
+}
+
+func NewGError() GError {
+ return GError{nil}
+}
+
+func (e GError) Ptr() unsafe.Pointer {
+ if e.ptr == nil {
+ return nil
+ }
+ return e.ptr
+}
+
+func (e GError) Nil() {
+ e.ptr = nil
+}
+
+func (e *GError) native() *C.GError {
+ if e == nil || e.ptr == nil {
+ return nil
+ }
+ return (*C.GError)(e.ptr)
+}
+
+func ToGError(ptr unsafe.Pointer) GError {
+ return GError{ptr}
+}
+
+func ConvertGError(e GError) error {
+ defer C.g_error_free(e.native())
+ return errors.New(C.GoString((*C.char)(C._g_error_get_message(e.native()))))
+}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfile.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfile.go
new file mode 100644
index 000000000..babe70509
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfile.go
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2013 Conformal Systems <info@conformal.com>
+ *
+ * This file originated from: http://opensource.conformal.com/
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package glibobject
+
+// #cgo pkg-config: glib-2.0 gobject-2.0
+// #include <glib.h>
+// #include <glib-object.h>
+// #include <gio/gio.h>
+// #include "glibobject.go.h"
+// #include <stdlib.h>
+import "C"
+import (
+ "unsafe"
+)
+
+/*
+ * GFile
+ */
+
+type GFile struct {
+ ptr unsafe.Pointer
+}
+
+func (f GFile) Ptr() unsafe.Pointer {
+ return f.ptr
+}
+
+func NewGFile() *GFile {
+ return &GFile{nil}
+}
+
+func ToGFile(ptr unsafe.Pointer) *GFile {
+ gf := NewGFile()
+ gf.ptr = ptr
+ return gf
+}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfileinfo.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfileinfo.go
new file mode 100644
index 000000000..9c155834a
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfileinfo.go
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2013 Conformal Systems <info@conformal.com>
+ *
+ * This file originated from: http://opensource.conformal.com/
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package glibobject
+
+// #cgo pkg-config: glib-2.0 gobject-2.0
+// #include <glib.h>
+// #include <glib-object.h>
+// #include <gio/gio.h>
+// #include "glibobject.go.h"
+// #include <stdlib.h>
+import "C"
+import (
+ "unsafe"
+)
+
+/*
+ * GFileInfo
+ */
+
+type GFileInfo struct {
+ ptr unsafe.Pointer
+}
+
+func (fi GFileInfo) Ptr() unsafe.Pointer {
+ return fi.ptr
+}
+
+func NewGFileInfo() GFileInfo {
+ var fi GFileInfo = GFileInfo{nil}
+ return fi
+}
+
+func ToGFileInfo(p unsafe.Pointer) *GFileInfo {
+ var fi *GFileInfo = &GFileInfo{}
+ fi.ptr = p
+ return fi
+}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtable.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtable.go
new file mode 100644
index 000000000..20cc321cb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtable.go
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2013 Conformal Systems <info@conformal.com>
+ *
+ * This file originated from: http://opensource.conformal.com/
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package glibobject
+
+import (
+ "unsafe"
+)
+
+// #cgo pkg-config: glib-2.0 gobject-2.0
+// #include <glib.h>
+// #include <glib-object.h>
+// #include <gio/gio.h>
+// #include "glibobject.go.h"
+// #include <stdlib.h>
+import "C"
+
+/*
+ * GHashTable
+ */
+type GHashTable struct {
+ ptr unsafe.Pointer
+}
+
+func (ht *GHashTable) Ptr() unsafe.Pointer {
+ return ht.ptr
+}
+
+func (ht *GHashTable) native() *C.GHashTable {
+ return (*C.GHashTable)(ht.ptr)
+}
+
+func ToGHashTable(ptr unsafe.Pointer) *GHashTable {
+ return &GHashTable{ptr}
+}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtableiter.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtableiter.go
new file mode 100644
index 000000000..1657edf5f
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtableiter.go
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2013 Conformal Systems <info@conformal.com>
+ *
+ * This file originated from: http://opensource.conformal.com/
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package glibobject
+
+import (
+ "unsafe"
+)
+
+// #cgo pkg-config: glib-2.0 gobject-2.0
+// #include <glib.h>
+// #include <glib-object.h>
+// #include <gio/gio.h>
+// #include "glibobject.go.h"
+// #include <stdlib.h>
+import "C"
+
+/*
+ * GHashTableIter
+ */
+type GHashTableIter struct {
+ ptr unsafe.Pointer
+}
+
+func (ht *GHashTableIter) Ptr() unsafe.Pointer {
+ return ht.ptr
+}
+
+func (ht *GHashTableIter) native() *C.GHashTableIter {
+ return (*C.GHashTableIter)(ht.ptr)
+}
+
+func ToGHashTableIter(ptr unsafe.Pointer) *GHashTableIter {
+ return &GHashTableIter{ptr}
+}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go
new file mode 100644
index 000000000..f3d3aa526
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2013 Conformal Systems <info@conformal.com>
+ *
+ * This file originated from: http://opensource.conformal.com/
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package glibobject
+
+// #cgo pkg-config: glib-2.0 gobject-2.0
+// #include <glib.h>
+// #include <glib-object.h>
+// #include <gio/gio.h>
+// #include "glibobject.go.h"
+// #include <stdlib.h>
+import "C"
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go.h
new file mode 100644
index 000000000..a55bd242f
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go.h
@@ -0,0 +1,17 @@
+#include <glib.h>
+
+static char *
+_g_error_get_message (GError *error)
+{
+ g_assert (error != NULL);
+ return error->message;
+}
+
+static const char *
+_g_variant_lookup_string (GVariant *v, const char *key)
+{
+ const char *r;
+ if (g_variant_lookup (v, key, "&s", &r))
+ return r;
+ return NULL;
+}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gobject.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gobject.go
new file mode 100644
index 000000000..dedbe749a
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gobject.go
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2013 Conformal Systems <info@conformal.com>
+ *
+ * This file originated from: http://opensource.conformal.com/
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package glibobject
+
+// #cgo pkg-config: glib-2.0 gobject-2.0
+// #include <glib.h>
+// #include <glib-object.h>
+// #include <gio/gio.h>
+// #include "glibobject.go.h"
+// #include <stdlib.h>
+import "C"
+import (
+ "unsafe"
+)
+
+/*
+ * GObject
+ */
+
+// IObject is an interface type implemented by Object and all types which embed
+// an Object. It is meant to be used as a type for function arguments which
+// require GObjects or any subclasses thereof.
+type IObject interface {
+ toGObject() *C.GObject
+ ToObject() *GObject
+}
+
+// GObject is a representation of GLib's GObject.
+type GObject struct {
+ ptr unsafe.Pointer
+}
+
+func (v *GObject) Ptr() unsafe.Pointer {
+ return v.ptr
+}
+
+func (v *GObject) native() *C.GObject {
+ if v == nil {
+ return nil
+ }
+ return (*C.GObject)(v.ptr)
+}
+
+func (v *GObject) Ref() {
+ C.g_object_ref(C.gpointer(v.Ptr()))
+}
+
+func (v *GObject) Unref() {
+ C.g_object_unref(C.gpointer(v.Ptr()))
+}
+
+func (v *GObject) RefSink() {
+ C.g_object_ref_sink(C.gpointer(v.native()))
+}
+
+func (v *GObject) IsFloating() bool {
+ c := C.g_object_is_floating(C.gpointer(v.native()))
+ return GoBool(GBoolean(c))
+}
+
+func (v *GObject) ForceFloating() {
+ C.g_object_force_floating(v.native())
+}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/goptioncontext.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/goptioncontext.go
new file mode 100644
index 000000000..05fd54a1a
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/goptioncontext.go
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013 Conformal Systems <info@conformal.com>
+ *
+ * This file originated from: http://opensource.conformal.com/
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package glibobject
+
+import (
+ "unsafe"
+)
+
+// #cgo pkg-config: glib-2.0 gobject-2.0
+// #include <glib.h>
+// #include <glib-object.h>
+// #include <gio/gio.h>
+// #include "glibobject.go.h"
+// #include <stdlib.h>
+import "C"
+
+/*
+ * GOptionContext
+ */
+
+type GOptionContext struct {
+ ptr unsafe.Pointer
+}
+
+func (oc *GOptionContext) Ptr() unsafe.Pointer {
+ return oc.ptr
+}
+
+func (oc *GOptionContext) native() *C.GOptionContext {
+ return (*C.GOptionContext)(oc.ptr)
+}
+
+func ToGOptionContext(ptr unsafe.Pointer) GOptionContext {
+ return GOptionContext{ptr}
+}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go
new file mode 100644
index 000000000..30572ea87
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2013 Conformal Systems <info@conformal.com>
+ *
+ * This file originated from: http://opensource.conformal.com/
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package glibobject
+
+// #cgo pkg-config: glib-2.0 gobject-2.0
+// #include <glib.h>
+// #include <glib-object.h>
+// #include <gio/gio.h>
+// #include "glibobject.go.h"
+// #include <stdlib.h>
+import "C"
+import (
+ "fmt"
+ "unsafe"
+)
+
+/*
+ * GVariant
+ */
+
+type GVariant struct {
+ ptr unsafe.Pointer
+}
+
+//func GVariantNew(p unsafe.Pointer) *GVariant {
+//o := &GVariant{p}
+//runtime.SetFinalizer(o, (*GVariant).Unref)
+//return o;
+//}
+
+//func GVariantNewSink(p unsafe.Pointer) *GVariant {
+//o := &GVariant{p}
+//runtime.SetFinalizer(o, (*GVariant).Unref)
+//o.RefSink()
+//return o;
+//}
+
+func (v *GVariant) native() *C.GVariant {
+ return (*C.GVariant)(v.ptr)
+}
+
+func (v *GVariant) Ptr() unsafe.Pointer {
+ return v.ptr
+}
+
+func (v *GVariant) Ref() {
+ C.g_variant_ref(v.native())
+}
+
+func (v *GVariant) Unref() {
+ C.g_variant_unref(v.native())
+}
+
+func (v *GVariant) RefSink() {
+ C.g_variant_ref_sink(v.native())
+}
+
+func (v *GVariant) TypeString() string {
+ cs := (*C.char)(C.g_variant_get_type_string(v.native()))
+ return C.GoString(cs)
+}
+
+func (v *GVariant) GetChildValue(i int) *GVariant {
+ cchild := C.g_variant_get_child_value(v.native(), C.gsize(i))
+ return (*GVariant)(unsafe.Pointer(cchild))
+}
+
+func (v *GVariant) LookupString(key string) (string, error) {
+ ckey := C.CString(key)
+ defer C.free(unsafe.Pointer(ckey))
+ // TODO: Find a way to have constant C strings in golang
+ cstr := C._g_variant_lookup_string(v.native(), ckey)
+ if cstr == nil {
+ return "", fmt.Errorf("No such key: %s", key)
+ }
+ return C.GoString(cstr), nil
+}
+
+func ToGVariant(ptr unsafe.Pointer) *GVariant {
+ return &GVariant{ptr}
+}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go
new file mode 100644
index 000000000..d3a8ae5fd
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go
@@ -0,0 +1,93 @@
+// Package otbuiltin contains all of the basic commands for creating and
+// interacting with an ostree repository
+package otbuiltin
+
+import (
+ "errors"
+ "fmt"
+ "runtime"
+ "unsafe"
+
+ glib "github.com/ostreedev/ostree-go/pkg/glibobject"
+)
+
+// #cgo pkg-config: ostree-1
+// #include <stdlib.h>
+// #include <glib.h>
+// #include <ostree.h>
+// #include "builtin.go.h"
+import "C"
+
+type Repo struct {
+ //*glib.GObject
+ ptr unsafe.Pointer
+}
+
+// Converts an ostree repo struct to its C equivalent
+func (r *Repo) native() *C.OstreeRepo {
+ //return (*C.OstreeRepo)(r.Ptr())
+ return (*C.OstreeRepo)(r.ptr)
+}
+
+// Takes a C ostree repo and converts it to a Go struct
+func repoFromNative(p *C.OstreeRepo) *Repo {
+ if p == nil {
+ return nil
+ }
+ //o := (*glib.GObject)(unsafe.Pointer(p))
+ //r := &Repo{o}
+ r := &Repo{unsafe.Pointer(p)}
+ return r
+}
+
+// Checks if the repo has been initialized
+func (r *Repo) isInitialized() bool {
+ if r.ptr != nil {
+ return true
+ }
+ return false
+}
+
+// Attempts to open the repo at the given path
+func OpenRepo(path string) (*Repo, error) {
+ var cerr *C.GError = nil
+ cpath := C.CString(path)
+ pathc := C.g_file_new_for_path(cpath)
+ defer C.g_object_unref(C.gpointer(pathc))
+ crepo := C.ostree_repo_new(pathc)
+ repo := repoFromNative(crepo)
+ r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(crepo, nil, &cerr)))
+ if !r {
+ return nil, generateError(cerr)
+ }
+ return repo, nil
+}
+
+// Enable support for tombstone commits, which allow the repo to distinguish between
+// commits that were intentionally deleted and commits that were removed accidentally
+func enableTombstoneCommits(repo *Repo) error {
+ var tombstoneCommits bool
+ var config *C.GKeyFile = C.ostree_repo_get_config(repo.native())
+ var cerr *C.GError
+
+ tombstoneCommits = glib.GoBool(glib.GBoolean(C.g_key_file_get_boolean(config, (*C.gchar)(C.CString("core")), (*C.gchar)(C.CString("tombstone-commits")), nil)))
+
+ //tombstoneCommits is false only if it really is false or if it is set to FALSE in the config file
+ if !tombstoneCommits {
+ C.g_key_file_set_boolean(config, (*C.gchar)(C.CString("core")), (*C.gchar)(C.CString("tombstone-commits")), C.TRUE)
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_config(repo.native(), config, &cerr))) {
+ return generateError(cerr)
+ }
+ }
+ return nil
+}
+
+func generateError(err *C.GError) error {
+ goErr := glib.ConvertGError(glib.ToGError(unsafe.Pointer(err)))
+ _, file, line, ok := runtime.Caller(1)
+ if ok {
+ return errors.New(fmt.Sprintf("%s:%d - %s", file, line, goErr))
+ } else {
+ return goErr
+ }
+}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h
new file mode 100644
index 000000000..734de9821
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h
@@ -0,0 +1,191 @@
+#ifndef BUILTIN_GO_H
+#define BUILTIN_GO_H
+
+#include <glib.h>
+#include <ostree.h>
+#include <string.h>
+#include <fcntl.h>
+
+static guint32 owner_uid;
+static guint32 owner_gid;
+
+static void
+_ostree_repo_append_modifier_flags(OstreeRepoCommitModifierFlags *flags, int flag) {
+ *flags |= flag;
+}
+
+struct CommitFilterData {
+ GHashTable *mode_adds;
+ GHashTable *skip_list;
+};
+
+typedef struct CommitFilterData CommitFilterData;
+
+static char* _gptr_to_str(gpointer p)
+{
+ return (char*)p;
+}
+
+// The following 3 functions are wrapper functions for macros since CGO can't parse macros
+static OstreeRepoFile*
+_ostree_repo_file(GFile *file)
+{
+ return OSTREE_REPO_FILE (file);
+}
+
+static guint
+_gpointer_to_uint (gpointer ptr)
+{
+ return GPOINTER_TO_UINT (ptr);
+}
+
+static gpointer
+_guint_to_pointer (guint u)
+{
+ return GUINT_TO_POINTER (u);
+}
+
+static void
+_g_clear_object (volatile GObject **object_ptr)
+{
+ g_clear_object(object_ptr);
+}
+
+static const GVariantType*
+_g_variant_type (char *type)
+{
+ return G_VARIANT_TYPE (type);
+}
+
+static int
+_at_fdcwd ()
+{
+ return AT_FDCWD;
+}
+
+static guint64
+_guint64_from_be (guint64 val)
+{
+ return GUINT64_FROM_BE (val);
+}
+
+
+
+// These functions are wrappers for variadic functions since CGO can't parse variadic functions
+static void
+_g_printerr_onearg (char* msg,
+ char* arg)
+{
+ g_printerr("%s %s\n", msg, arg);
+}
+
+static void
+_g_set_error_onearg (GError *err,
+ char* msg,
+ char* arg)
+{
+ g_set_error(&err, G_IO_ERROR, G_IO_ERROR_FAILED, "%s %s", msg, arg);
+}
+
+static void
+_g_variant_builder_add_twoargs (GVariantBuilder* builder,
+ const char *format_string,
+ char *arg1,
+ GVariant *arg2)
+{
+ g_variant_builder_add(builder, format_string, arg1, arg2);
+}
+
+static GHashTable*
+_g_hash_table_new_full ()
+{
+ return g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL);
+}
+
+static void
+_g_variant_get_commit_dump (GVariant *variant,
+ const char *format,
+ char **subject,
+ char **body,
+ guint64 *timestamp)
+{
+ return g_variant_get (variant, format, NULL, NULL, NULL, subject, body, timestamp, NULL, NULL);
+}
+
+static guint32
+_binary_or (guint32 a, guint32 b)
+{
+ return a | b;
+}
+
+static void
+_cleanup (OstreeRepo *self,
+ OstreeRepoCommitModifier *modifier,
+ GCancellable *cancellable,
+ GError **out_error)
+{
+ if (self)
+ ostree_repo_abort_transaction(self, cancellable, out_error);
+ if (modifier)
+ ostree_repo_commit_modifier_unref (modifier);
+}
+
+// The following functions make up a commit_filter function that gets passed into
+// another C function (and thus can't be a go function) as well as its helpers
+static OstreeRepoCommitFilterResult
+_commit_filter (OstreeRepo *self,
+ const char *path,
+ GFileInfo *file_info,
+ gpointer user_data)
+{
+ struct CommitFilterData *data = user_data;
+ GHashTable *mode_adds = data->mode_adds;
+ GHashTable *skip_list = data->skip_list;
+ gpointer value;
+
+ if (owner_uid >= 0)
+ g_file_info_set_attribute_uint32 (file_info, "unix::uid", owner_uid);
+ if (owner_gid >= 0)
+ g_file_info_set_attribute_uint32 (file_info, "unix::gid", owner_gid);
+
+ if (mode_adds && g_hash_table_lookup_extended (mode_adds, path, NULL, &value))
+ {
+ guint current_mode = g_file_info_get_attribute_uint32 (file_info, "unix::mode");
+ guint mode_add = GPOINTER_TO_UINT (value);
+ g_file_info_set_attribute_uint32 (file_info, "unix::mode",
+ current_mode | mode_add);
+ g_hash_table_remove (mode_adds, path);
+ }
+
+ if (skip_list && g_hash_table_contains (skip_list, path))
+ {
+ g_hash_table_remove (skip_list, path);
+ return OSTREE_REPO_COMMIT_FILTER_SKIP;
+ }
+
+ return OSTREE_REPO_COMMIT_FILTER_ALLOW;
+}
+
+
+static void
+_set_owner_uid (guint32 uid)
+{
+ owner_uid = uid;
+}
+
+static void _set_owner_gid (guint32 gid)
+{
+ owner_gid = gid;
+}
+
+// Wrapper function for a function that takes a C function as a parameter.
+// That translation doesn't work in go
+static OstreeRepoCommitModifier*
+_ostree_repo_commit_modifier_new_wrapper (OstreeRepoCommitModifierFlags flags,
+ gpointer user_data,
+ GDestroyNotify destroy_notify)
+{
+ return ostree_repo_commit_modifier_new(flags, _commit_filter, user_data, destroy_notify);
+}
+
+#endif
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go
new file mode 100644
index 000000000..d43ea07c7
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go
@@ -0,0 +1 @@
+package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go
new file mode 100644
index 000000000..55b51bfbd
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go
@@ -0,0 +1,102 @@
+package otbuiltin
+
+import (
+ "strings"
+ "unsafe"
+
+ glib "github.com/ostreedev/ostree-go/pkg/glibobject"
+)
+
+// #cgo pkg-config: ostree-1
+// #include <stdlib.h>
+// #include <glib.h>
+// #include <ostree.h>
+// #include "builtin.go.h"
+import "C"
+
+// Global variable for options
+var checkoutOpts checkoutOptions
+
+// Contains all of the options for checking commits out of
+// an ostree repo
+type checkoutOptions struct {
+ UserMode bool // Do not change file ownership or initialize extended attributes
+ Union bool // Keep existing directories and unchanged files, overwriting existing filesystem
+ AllowNoent bool // Do nothing if the specified filepath does not exist
+ DisableCache bool // Do not update or use the internal repository uncompressed object caceh
+ Whiteouts bool // Process 'whiteout' (docker style) entries
+ RequireHardlinks bool // Do not fall back to full copies if hard linking fails
+ Subpath string // Checkout sub-directory path
+ FromFile string // Process many checkouts from the given file
+}
+
+// Instantiates and returns a checkoutOptions struct with default values set
+func NewCheckoutOptions() checkoutOptions {
+ return checkoutOptions{}
+}
+
+// Checks out a commit with the given ref from a repository at the location of repo path to to the destination. Returns an error if the checkout could not be processed
+func Checkout(repoPath, destination, commit string, opts checkoutOptions) error {
+ checkoutOpts = opts
+
+ var cancellable *glib.GCancellable
+ ccommit := C.CString(commit)
+ defer C.free(unsafe.Pointer(ccommit))
+ var gerr = glib.NewGError()
+ cerr := (*C.GError)(gerr.Ptr())
+ defer C.free(unsafe.Pointer(cerr))
+
+ repoPathc := C.g_file_new_for_path(C.CString(repoPath))
+ defer C.g_object_unref(C.gpointer(repoPathc))
+ crepo := C.ostree_repo_new(repoPathc)
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_open(crepo, (*C.GCancellable)(cancellable.Ptr()), &cerr))) {
+ return generateError(cerr)
+ }
+
+ if strings.Compare(checkoutOpts.FromFile, "") != 0 {
+ err := processManyCheckouts(crepo, destination, cancellable)
+ if err != nil {
+ return err
+ }
+ } else {
+ var resolvedCommit *C.char
+ defer C.free(unsafe.Pointer(resolvedCommit))
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(crepo, ccommit, C.FALSE, &resolvedCommit, &cerr))) {
+ return generateError(cerr)
+ }
+ err := processOneCheckout(crepo, resolvedCommit, checkoutOpts.Subpath, destination, cancellable)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Processes one checkout from the repo
+func processOneCheckout(crepo *C.OstreeRepo, resolvedCommit *C.char, subpath, destination string, cancellable *glib.GCancellable) error {
+ cdest := C.CString(destination)
+ defer C.free(unsafe.Pointer(cdest))
+ var gerr = glib.NewGError()
+ cerr := (*C.GError)(gerr.Ptr())
+ defer C.free(unsafe.Pointer(cerr))
+ var repoCheckoutAtOptions C.OstreeRepoCheckoutAtOptions
+
+ if checkoutOpts.UserMode {
+ repoCheckoutAtOptions.mode = C.OSTREE_REPO_CHECKOUT_MODE_USER
+ }
+ if checkoutOpts.Union {
+ repoCheckoutAtOptions.overwrite_mode = C.OSTREE_REPO_CHECKOUT_OVERWRITE_UNION_FILES
+ }
+
+ checkedOut := glib.GoBool(glib.GBoolean(C.ostree_repo_checkout_at(crepo, &repoCheckoutAtOptions, C._at_fdcwd(), cdest, resolvedCommit, nil, &cerr)))
+ if !checkedOut {
+ return generateError(cerr)
+ }
+
+ return nil
+}
+
+// process many checkouts
+func processManyCheckouts(crepo *C.OstreeRepo, target string, cancellable *glib.GCancellable) error {
+ return nil
+}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go
new file mode 100644
index 000000000..d43ea07c7
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go
@@ -0,0 +1 @@
+package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go
new file mode 100644
index 000000000..9550f802c
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go
@@ -0,0 +1,482 @@
+package otbuiltin
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+ "unsafe"
+
+ glib "github.com/ostreedev/ostree-go/pkg/glibobject"
+)
+
+// #cgo pkg-config: ostree-1
+// #include <stdlib.h>
+// #include <glib.h>
+// #include <ostree.h>
+// #include "builtin.go.h"
+import "C"
+
+// Declare global variable to store commitOptions
+var options commitOptions
+
+// Declare a function prototype for being passed into another function
+type handleLineFunc func(string, *glib.GHashTable) error
+
+// Contains all of the options for commmiting to an ostree repo. Initialize
+// with NewCommitOptions()
+type commitOptions struct {
+ Subject string // One line subject
+ Body string // Full description
+ Parent string // Parent of the commit
+ Tree []string // 'dir=PATH' or 'tar=TARFILE' or 'ref=COMMIT': overlay the given argument as a tree
+ AddMetadataString []string // Add a key/value pair to metadata
+ AddDetachedMetadataString []string // Add a key/value pair to detached metadata
+ OwnerUID int // Set file ownership to user id
+ OwnerGID int // Set file ownership to group id
+ NoXattrs bool // Do not import extended attributes
+ LinkCheckoutSpeedup bool // Optimize for commits of trees composed of hardlinks in the repository
+ TarAutoCreateParents bool // When loading tar archives, automatically create parent directories as needed
+ SkipIfUnchanged bool // If the contents are unchanged from a previous commit, do nothing
+ StatOverrideFile string // File containing list of modifications to make permissions
+ SkipListFile string // File containing list of file paths to skip
+ GenerateSizes bool // Generate size information along with commit metadata
+ GpgSign []string // GPG Key ID with which to sign the commit (if you have GPGME - GNU Privacy Guard Made Easy)
+ GpgHomedir string // GPG home directory to use when looking for keyrings (if you have GPGME - GNU Privacy Guard Made Easy)
+ Timestamp time.Time // Override the timestamp of the commit
+ Orphan bool // Commit does not belong to a branch
+ Fsync bool // Specify whether fsync should be used or not. Default to true
+}
+
+// Initializes a commitOptions struct and sets default values
+func NewCommitOptions() commitOptions {
+ var co commitOptions
+ co.OwnerUID = -1
+ co.OwnerGID = -1
+ co.Fsync = true
+ return co
+}
+
+type OstreeRepoTransactionStats struct {
+ metadata_objects_total int32
+ metadata_objects_written int32
+ content_objects_total int32
+ content_objects_written int32
+ content_bytes_written uint64
+}
+
+func (repo *Repo) PrepareTransaction() (bool, error) {
+ var cerr *C.GError = nil
+ var resume C.gboolean
+
+ r := glib.GoBool(glib.GBoolean(C.ostree_repo_prepare_transaction(repo.native(), &resume, nil, &cerr)))
+ if !r {
+ return false, generateError(cerr)
+ }
+ return glib.GoBool(glib.GBoolean(resume)), nil
+}
+
+func (repo *Repo) CommitTransaction() (*OstreeRepoTransactionStats, error) {
+ var cerr *C.GError = nil
+ var stats OstreeRepoTransactionStats = OstreeRepoTransactionStats{}
+ statsPtr := (*C.OstreeRepoTransactionStats)(unsafe.Pointer(&stats))
+ r := glib.GoBool(glib.GBoolean(C.ostree_repo_commit_transaction(repo.native(), statsPtr, nil, &cerr)))
+ if !r {
+ return nil, generateError(cerr)
+ }
+ return &stats, nil
+}
+
+func (repo *Repo) TransactionSetRef(remote string, ref string, checksum string) {
+ var cRemote *C.char = nil
+ var cRef *C.char = nil
+ var cChecksum *C.char = nil
+
+ if remote != "" {
+ cRemote = C.CString(remote)
+ }
+ if ref != "" {
+ cRef = C.CString(ref)
+ }
+ if checksum != "" {
+ cChecksum = C.CString(checksum)
+ }
+ C.ostree_repo_transaction_set_ref(repo.native(), cRemote, cRef, cChecksum)
+}
+
+func (repo *Repo) AbortTransaction() error {
+ var cerr *C.GError = nil
+ r := glib.GoBool(glib.GBoolean(C.ostree_repo_abort_transaction(repo.native(), nil, &cerr)))
+ if !r {
+ return generateError(cerr)
+ }
+ return nil
+}
+
+func (repo *Repo) RegenerateSummary() error {
+ var cerr *C.GError = nil
+ r := glib.GoBool(glib.GBoolean(C.ostree_repo_regenerate_summary(repo.native(), nil, nil, &cerr)))
+ if !r {
+ return generateError(cerr)
+ }
+ return nil
+}
+
+// Commits a directory, specified by commitPath, to an ostree repo as a given branch
+func (repo *Repo) Commit(commitPath, branch string, opts commitOptions) (string, error) {
+ options = opts
+
+ var err error
+ var modeAdds *glib.GHashTable
+ var skipList *glib.GHashTable
+ var objectToCommit *glib.GFile
+ var skipCommit bool = false
+ var ccommitChecksum *C.char
+ defer C.free(unsafe.Pointer(ccommitChecksum))
+ var flags C.OstreeRepoCommitModifierFlags = 0
+ var filter_data C.CommitFilterData
+
+ var cerr *C.GError
+ defer C.free(unsafe.Pointer(cerr))
+ var metadata *C.GVariant = nil
+ defer func(){
+ if metadata != nil {
+ defer C.g_variant_unref(metadata)
+ }
+ }()
+
+ var detachedMetadata *C.GVariant = nil
+ defer C.free(unsafe.Pointer(detachedMetadata))
+ var mtree *C.OstreeMutableTree
+ defer C.free(unsafe.Pointer(mtree))
+ var root *C.GFile
+ defer C.free(unsafe.Pointer(root))
+ var modifier *C.OstreeRepoCommitModifier
+ defer C.free(unsafe.Pointer(modifier))
+ var cancellable *C.GCancellable
+ defer C.free(unsafe.Pointer(cancellable))
+
+ cpath := C.CString(commitPath)
+ defer C.free(unsafe.Pointer(cpath))
+ csubject := C.CString(options.Subject)
+ defer C.free(unsafe.Pointer(csubject))
+ cbody := C.CString(options.Body)
+ defer C.free(unsafe.Pointer(cbody))
+ cbranch := C.CString(branch)
+ defer C.free(unsafe.Pointer(cbranch))
+ cparent := C.CString(options.Parent)
+ defer C.free(unsafe.Pointer(cparent))
+
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_is_writable(repo.native(), &cerr))) {
+ goto out
+ }
+
+ // If the user provided a stat override file
+ if strings.Compare(options.StatOverrideFile, "") != 0 {
+ modeAdds = glib.ToGHashTable(unsafe.Pointer(C._g_hash_table_new_full()))
+ if err = parseFileByLine(options.StatOverrideFile, handleStatOverrideLine, modeAdds, cancellable); err != nil {
+ goto out
+ }
+ }
+
+ // If the user provided a skiplist file
+ if strings.Compare(options.SkipListFile, "") != 0 {
+ skipList = glib.ToGHashTable(unsafe.Pointer(C._g_hash_table_new_full()))
+ if err = parseFileByLine(options.SkipListFile, handleSkipListline, skipList, cancellable); err != nil {
+ goto out
+ }
+ }
+
+ if options.AddMetadataString != nil {
+ metadata, err = parseKeyValueStrings(options.AddMetadataString)
+ if err != nil {
+ goto out
+ }
+ }
+
+ if options.AddDetachedMetadataString != nil {
+ _, err := parseKeyValueStrings(options.AddDetachedMetadataString)
+ if err != nil {
+ goto out
+ }
+ }
+
+ if strings.Compare(branch, "") == 0 && !options.Orphan {
+ err = errors.New("A branch must be specified or use commitOptions.Orphan")
+ goto out
+ }
+
+ if options.NoXattrs {
+ C._ostree_repo_append_modifier_flags(&flags, C.OSTREE_REPO_COMMIT_MODIFIER_FLAGS_SKIP_XATTRS)
+ }
+ if options.GenerateSizes {
+ C._ostree_repo_append_modifier_flags(&flags, C.OSTREE_REPO_COMMIT_MODIFIER_FLAGS_GENERATE_SIZES)
+ }
+ if !options.Fsync {
+ C.ostree_repo_set_disable_fsync(repo.native(), C.TRUE)
+ }
+
+ if flags != 0 || options.OwnerUID >= 0 || options.OwnerGID >= 0 || strings.Compare(options.StatOverrideFile, "") != 0 || options.NoXattrs {
+ filter_data.mode_adds = (*C.GHashTable)(modeAdds.Ptr())
+ filter_data.skip_list = (*C.GHashTable)(skipList.Ptr())
+ C._set_owner_uid((C.guint32)(options.OwnerUID))
+ C._set_owner_gid((C.guint32)(options.OwnerGID))
+ modifier = C._ostree_repo_commit_modifier_new_wrapper(flags, C.gpointer(&filter_data), nil)
+ }
+
+ if strings.Compare(options.Parent, "") != 0 {
+ if strings.Compare(options.Parent, "none") == 0 {
+ options.Parent = ""
+ }
+ } else if !options.Orphan {
+ cerr = nil
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo.native(), cbranch, C.TRUE, &cparent, &cerr))) {
+ goto out
+ }
+ }
+
+ if options.LinkCheckoutSpeedup && !glib.GoBool(glib.GBoolean(C.ostree_repo_scan_hardlinks(repo.native(), cancellable, &cerr))) {
+ goto out
+ }
+
+ mtree = C.ostree_mutable_tree_new()
+
+ if len(commitPath) == 0 && (len(options.Tree) == 0 || len(options.Tree[0]) == 0) {
+ currentDir := (*C.char)(C.g_get_current_dir())
+ objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(currentDir)))
+ C.g_free(C.gpointer(currentDir))
+
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) {
+ goto out
+ }
+ } else if len(options.Tree) != 0 {
+ var eq int = -1
+ cerr = nil
+ for tree := range options.Tree {
+ eq = strings.Index(options.Tree[tree], "=")
+ if eq == -1 {
+ C._g_set_error_onearg(cerr, C.CString("Missing type in tree specification"), C.CString(options.Tree[tree]))
+ goto out
+ }
+ treeType := options.Tree[tree][:eq]
+ treeVal := options.Tree[tree][eq+1:]
+
+ if strings.Compare(treeType, "dir") == 0 {
+ objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(C.CString(treeVal))))
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) {
+ goto out
+ }
+ } else if strings.Compare(treeType, "tar") == 0 {
+ objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(C.CString(treeVal))))
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_archive_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, (C.gboolean)(glib.GBool(opts.TarAutoCreateParents)), cancellable, &cerr))) {
+ fmt.Println("error 1")
+ goto out
+ }
+ } else if strings.Compare(treeType, "ref") == 0 {
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo.native(), C.CString(treeVal), (**C.GFile)(objectToCommit.Ptr()), nil, cancellable, &cerr))) {
+ goto out
+ }
+
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) {
+ goto out
+ }
+ } else {
+ C._g_set_error_onearg(cerr, C.CString("Missing type in tree specification"), C.CString(treeVal))
+ goto out
+ }
+ }
+ } else {
+ objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(cpath)))
+ cerr = nil
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) {
+ goto out
+ }
+ }
+
+ if modeAdds != nil && C.g_hash_table_size((*C.GHashTable)(modeAdds.Ptr())) > 0 {
+ var hashIter *C.GHashTableIter
+
+ var key, value C.gpointer
+
+ C.g_hash_table_iter_init(hashIter, (*C.GHashTable)(modeAdds.Ptr()))
+
+ for glib.GoBool(glib.GBoolean(C.g_hash_table_iter_next(hashIter, &key, &value))) {
+ C._g_printerr_onearg(C.CString("Unmatched StatOverride path: "), C._gptr_to_str(key))
+ }
+ err = errors.New("Unmatched StatOverride paths")
+ C.free(unsafe.Pointer(hashIter))
+ C.free(unsafe.Pointer(key))
+ C.free(unsafe.Pointer(value))
+ goto out
+ }
+
+ if skipList != nil && C.g_hash_table_size((*C.GHashTable)(skipList.Ptr())) > 0 {
+ var hashIter *C.GHashTableIter
+ var key, value C.gpointer
+
+ C.g_hash_table_iter_init(hashIter, (*C.GHashTable)(skipList.Ptr()))
+
+ for glib.GoBool(glib.GBoolean(C.g_hash_table_iter_next(hashIter, &key, &value))) {
+ C._g_printerr_onearg(C.CString("Unmatched SkipList path: "), C._gptr_to_str(key))
+ }
+ err = errors.New("Unmatched SkipList paths")
+ C.free(unsafe.Pointer(hashIter))
+ C.free(unsafe.Pointer(key))
+ C.free(unsafe.Pointer(value))
+ goto out
+ }
+
+ cerr = nil
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_mtree(repo.native(), mtree, &root, cancellable, &cerr))) {
+ goto out
+ }
+
+ if options.SkipIfUnchanged && strings.Compare(options.Parent, "") != 0 {
+ var parentRoot *C.GFile
+
+ cerr = nil
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo.native(), cparent, &parentRoot, nil, cancellable, &cerr))) {
+ C.free(unsafe.Pointer(parentRoot))
+ goto out
+ }
+
+ if glib.GoBool(glib.GBoolean(C.g_file_equal(root, parentRoot))) {
+ skipCommit = true
+ }
+ C.free(unsafe.Pointer(parentRoot))
+ }
+
+ if !skipCommit {
+ var timestamp C.guint64
+
+ if options.Timestamp.IsZero() {
+ var now *C.GDateTime = C.g_date_time_new_now_utc()
+ timestamp = (C.guint64)(C.g_date_time_to_unix(now))
+ C.g_date_time_unref(now)
+
+ cerr = nil
+ ret := C.ostree_repo_write_commit(repo.native(), cparent, csubject, cbody, metadata, C._ostree_repo_file(root), &ccommitChecksum, cancellable, &cerr)
+ if !glib.GoBool(glib.GBoolean(ret)) {
+ goto out
+ }
+ } else {
+ timestamp = (C.guint64)(options.Timestamp.Unix())
+
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_commit_with_time(repo.native(), cparent, csubject, cbody,
+ metadata, C._ostree_repo_file(root), timestamp, &ccommitChecksum, cancellable, &cerr))) {
+ goto out
+ }
+ }
+
+ if detachedMetadata != nil {
+ C.ostree_repo_write_commit_detached_metadata(repo.native(), ccommitChecksum, detachedMetadata, cancellable, &cerr)
+ }
+
+ if len(options.GpgSign) != 0 {
+ for key := range options.GpgSign {
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_sign_commit(repo.native(), (*C.gchar)(ccommitChecksum), (*C.gchar)(C.CString(options.GpgSign[key])), (*C.gchar)(C.CString(options.GpgHomedir)), cancellable, &cerr))) {
+ goto out
+ }
+ }
+ }
+
+ if strings.Compare(branch, "") != 0 {
+ C.ostree_repo_transaction_set_ref(repo.native(), nil, cbranch, ccommitChecksum)
+ } else if !options.Orphan {
+ goto out
+ } else {
+ // TODO: Looks like I forgot to implement this.
+ }
+ } else {
+ ccommitChecksum = C.CString(options.Parent)
+ }
+
+ return C.GoString(ccommitChecksum), nil
+out:
+ if repo.native() != nil {
+ C.ostree_repo_abort_transaction(repo.native(), cancellable, nil)
+ //C.free(unsafe.Pointer(repo.native()))
+ }
+ if modifier != nil {
+ C.ostree_repo_commit_modifier_unref(modifier)
+ }
+ if err != nil {
+ return "", err
+ }
+ return "", generateError(cerr)
+}
+
+// Parse an array of key value pairs of the format KEY=VALUE and add them to a GVariant
+func parseKeyValueStrings(pairs []string) (*C.GVariant, error) {
+ builder := C.g_variant_builder_new(C._g_variant_type(C.CString("a{sv}")))
+ defer C.g_variant_builder_unref(builder)
+
+ for iter := range pairs {
+ index := strings.Index(pairs[iter], "=")
+ if index <= 0 {
+ var buffer bytes.Buffer
+ buffer.WriteString("Missing '=' in KEY=VALUE metadata '%s'")
+ buffer.WriteString(pairs[iter])
+ return nil, errors.New(buffer.String())
+ }
+
+ key := C.CString(pairs[iter][:index])
+ value := C.CString(pairs[iter][index+1:])
+
+ valueVariant := C.g_variant_new_string((*C.gchar)(value))
+
+ C._g_variant_builder_add_twoargs(builder, C.CString("{sv}"), key, valueVariant)
+ }
+
+ metadata := C.g_variant_builder_end(builder)
+ return C.g_variant_ref_sink(metadata), nil
+}
+
+// Parse a file linue by line and handle the line with the handleLineFunc
+func parseFileByLine(path string, fn handleLineFunc, table *glib.GHashTable, cancellable *C.GCancellable) error {
+ var contents *C.char
+ var file *glib.GFile
+ var lines []string
+ var gerr = glib.NewGError()
+ cerr := (*C.GError)(gerr.Ptr())
+
+ file = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(C.CString(path))))
+ if !glib.GoBool(glib.GBoolean(C.g_file_load_contents((*C.GFile)(file.Ptr()), cancellable, &contents, nil, nil, &cerr))) {
+ return generateError(cerr)
+ }
+
+ lines = strings.Split(C.GoString(contents), "\n")
+ for line := range lines {
+ if strings.Compare(lines[line], "") == 0 {
+ continue
+ }
+
+ if err := fn(lines[line], table); err != nil {
+ return generateError(cerr)
+ }
+ }
+ return nil
+}
+
+// Handle an individual line from a Statoverride file
+func handleStatOverrideLine(line string, table *glib.GHashTable) error {
+ var space int
+ var modeAdd C.guint
+
+ if space = strings.IndexRune(line, ' '); space == -1 {
+ return errors.New("Malformed StatOverrideFile (no space found)")
+ }
+
+ modeAdd = (C.guint)(C.g_ascii_strtod((*C.gchar)(C.CString(line)), nil))
+ C.g_hash_table_insert((*C.GHashTable)(table.Ptr()), C.gpointer(C.g_strdup((*C.gchar)(C.CString(line[space+1:])))), C._guint_to_pointer(modeAdd))
+
+ return nil
+}
+
+// Handle an individual line from a Skiplist file
+func handleSkipListline(line string, table *glib.GHashTable) error {
+ C.g_hash_table_add((*C.GHashTable)(table.Ptr()), C.gpointer( C.g_strdup((*C.gchar)(C.CString(line)))))
+
+ return nil
+}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go
new file mode 100644
index 000000000..d43ea07c7
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go
@@ -0,0 +1 @@
+package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go
new file mode 100644
index 000000000..d43ea07c7
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go
@@ -0,0 +1 @@
+package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go
new file mode 100644
index 000000000..d43ea07c7
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go
@@ -0,0 +1 @@
+package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go
new file mode 100644
index 000000000..d43ea07c7
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go
@@ -0,0 +1 @@
+package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go
new file mode 100644
index 000000000..d43ea07c7
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go
@@ -0,0 +1 @@
+package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go
new file mode 100644
index 000000000..c1ca2dc7e
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go
@@ -0,0 +1,90 @@
+package otbuiltin
+
+import (
+ "errors"
+ "strings"
+ "unsafe"
+
+ glib "github.com/ostreedev/ostree-go/pkg/glibobject"
+)
+
+// #cgo pkg-config: ostree-1
+// #include <stdlib.h>
+// #include <glib.h>
+// #include <ostree.h>
+// #include "builtin.go.h"
+import "C"
+
+// Declare variables for options
+var initOpts initOptions
+
+// Contains all of the options for initializing an ostree repo
+type initOptions struct {
+ Mode string // either bare, archive-z2, or bare-user
+
+ repoMode C.OstreeRepoMode
+}
+
+// Instantiates and returns an initOptions struct with default values set
+func NewInitOptions() initOptions {
+ io := initOptions{}
+ io.Mode = "bare"
+ io.repoMode = C.OSTREE_REPO_MODE_BARE
+ return io
+}
+
+// Initializes a new ostree repository at the given path. Returns true
+// if the repo exists at the location, regardless of whether it was initialized
+// by the function or if it already existed. Returns an error if the repo could
+// not be initialized
+func Init(path string, options initOptions) (bool, error) {
+ initOpts = options
+ err := parseMode()
+ if err != nil {
+ return false, err
+ }
+
+ // Create a repo struct from the path
+ var cerr *C.GError
+ defer C.free(unsafe.Pointer(cerr))
+ cpath := C.CString(path)
+ defer C.free(unsafe.Pointer(cpath))
+ pathc := C.g_file_new_for_path(cpath)
+ defer C.g_object_unref(C.gpointer(pathc))
+ crepo := C.ostree_repo_new(pathc)
+
+ // If the repo exists in the filesystem, return an error but set exists to true
+ /* var exists C.gboolean = 0
+ success := glib.GoBool(glib.GBoolean(C.ostree_repo_exists(crepo, &exists, &cerr)))
+ if exists != 0 {
+ err = errors.New("repository already exists")
+ return true, err
+ } else if !success {
+ return false, generateError(cerr)
+ }*/
+
+ cerr = nil
+ created := glib.GoBool(glib.GBoolean(C.ostree_repo_create(crepo, initOpts.repoMode, nil, &cerr)))
+ if !created {
+ errString := generateError(cerr).Error()
+ if strings.Contains(errString, "File exists") {
+ return true, generateError(cerr)
+ }
+ return false, generateError(cerr)
+ }
+ return true, nil
+}
+
+// Converts the mode string to a C.OSTREE_REPO_MODE enum value
+func parseMode() error {
+ if strings.EqualFold(initOpts.Mode, "bare") {
+ initOpts.repoMode = C.OSTREE_REPO_MODE_BARE
+ } else if strings.EqualFold(initOpts.Mode, "bare-user") {
+ initOpts.repoMode = C.OSTREE_REPO_MODE_BARE_USER
+ } else if strings.EqualFold(initOpts.Mode, "archive-z2") {
+ initOpts.repoMode = C.OSTREE_REPO_MODE_ARCHIVE_Z2
+ } else {
+ return errors.New("Invalid option for mode")
+ }
+ return nil
+}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go
new file mode 100644
index 000000000..2ceea0925
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go
@@ -0,0 +1,167 @@
+package otbuiltin
+
+import (
+ "fmt"
+ "strings"
+ "time"
+ "unsafe"
+
+ glib "github.com/ostreedev/ostree-go/pkg/glibobject"
+)
+
+// #cgo pkg-config: ostree-1
+// #include <stdlib.h>
+// #include <glib.h>
+// #include <ostree.h>
+// #include "builtin.go.h"
+import "C"
+
+// Declare variables for options
+var logOpts logOptions
+
+// Set the format of the strings in the log
+const formatString = "2006-01-02 03:04;05 -0700"
+
+// Struct for the various pieces of data in a log entry
+type LogEntry struct {
+ Checksum []byte
+ Variant []byte
+ Timestamp time.Time
+ Subject string
+ Body string
+}
+
+// Convert the log entry to a string
+func (l LogEntry) String() string {
+ if len(l.Variant) == 0 {
+ return fmt.Sprintf("%s\n%s\n\n\t%s\n\n\t%s\n\n", l.Checksum, l.Timestamp, l.Subject, l.Body)
+ }
+ return fmt.Sprintf("%s\n%s\n\n", l.Checksum, l.Variant)
+}
+
+type OstreeDumpFlags uint
+
+const (
+ OSTREE_DUMP_NONE OstreeDumpFlags = 0
+ OSTREE_DUMP_RAW OstreeDumpFlags = 1 << iota
+)
+
+// Contains all of the options for initializing an ostree repo
+type logOptions struct {
+ Raw bool // Show raw variant data
+}
+
+//Instantiates and returns a logOptions struct with default values set
+func NewLogOptions() logOptions {
+ return logOptions{}
+}
+
+// Show the logs of a branch starting with a given commit or ref. Returns a
+// slice of log entries on success and an error otherwise
+func Log(repoPath, branch string, options logOptions) ([]LogEntry, error) {
+ // attempt to open the repository
+ repo, err := OpenRepo(repoPath)
+ if err != nil {
+ return nil, err
+ }
+
+ cbranch := C.CString(branch)
+ defer C.free(unsafe.Pointer(cbranch))
+ var checksum *C.char
+ defer C.free(unsafe.Pointer(checksum))
+ var flags OstreeDumpFlags = OSTREE_DUMP_NONE
+ var cerr *C.GError
+ defer C.free(unsafe.Pointer(cerr))
+
+ if logOpts.Raw {
+ flags |= OSTREE_DUMP_RAW
+ }
+
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo.native(), cbranch, C.FALSE, &checksum, &cerr))) {
+ return nil, generateError(cerr)
+ }
+
+ return logCommit(repo, checksum, false, flags)
+}
+
+func logCommit(repo *Repo, checksum *C.char, isRecursive bool, flags OstreeDumpFlags) ([]LogEntry, error) {
+ var variant *C.GVariant
+ var parent *C.char
+ defer C.free(unsafe.Pointer(parent))
+ var gerr = glib.NewGError()
+ var cerr = (*C.GError)(gerr.Ptr())
+ defer C.free(unsafe.Pointer(cerr))
+ entries := make([]LogEntry, 0, 1)
+ var err error
+
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, &variant, &cerr))) {
+ if isRecursive && glib.GoBool(glib.GBoolean(C.g_error_matches(cerr, C.g_io_error_quark(), C.G_IO_ERROR_NOT_FOUND))) {
+ return nil, nil
+ }
+ return entries, generateError(cerr)
+ }
+
+ nextLogEntry := dumpLogObject(C.OSTREE_OBJECT_TYPE_COMMIT, checksum, variant, flags)
+
+ // get the parent of this commit
+ parent = (*C.char)(C.ostree_commit_get_parent(variant))
+ defer C.free(unsafe.Pointer(parent))
+ if parent != nil {
+ entries, err = logCommit(repo, parent, true, flags)
+ if err != nil {
+ return nil, err
+ }
+ }
+ entries = append(entries, *nextLogEntry)
+ return entries, nil
+}
+
+func dumpLogObject(objectType C.OstreeObjectType, checksum *C.char, variant *C.GVariant, flags OstreeDumpFlags) *LogEntry {
+ objLog := new(LogEntry)
+ objLog.Checksum = []byte(C.GoString(checksum))
+
+ if (flags & OSTREE_DUMP_RAW) != 0 {
+ dumpVariant(objLog, variant)
+ return objLog
+ }
+
+ switch objectType {
+ case C.OSTREE_OBJECT_TYPE_COMMIT:
+ dumpCommit(objLog, variant, flags)
+ return objLog
+ default:
+ return objLog
+ }
+}
+
+func dumpVariant(log *LogEntry, variant *C.GVariant) {
+ var byteswappedVariant *C.GVariant
+
+ if C.G_BYTE_ORDER != C.G_BIG_ENDIAN {
+ byteswappedVariant = C.g_variant_byteswap(variant)
+ log.Variant = []byte(C.GoString((*C.char)(C.g_variant_print(byteswappedVariant, C.TRUE))))
+ } else {
+ log.Variant = []byte(C.GoString((*C.char)(C.g_variant_print(byteswappedVariant, C.TRUE))))
+ }
+}
+
+func dumpCommit(log *LogEntry, variant *C.GVariant, flags OstreeDumpFlags) {
+ var subject, body *C.char
+ defer C.free(unsafe.Pointer(subject))
+ defer C.free(unsafe.Pointer(body))
+ var timestamp C.guint64
+
+ C._g_variant_get_commit_dump(variant, C.CString("(a{sv}aya(say)&s&stayay)"), &subject, &body, &timestamp)
+
+ // Timestamp is now a Unix formatted timestamp as a guint64
+ timestamp = C._guint64_from_be(timestamp)
+ log.Timestamp = time.Unix((int64)(timestamp), 0)
+
+ if strings.Compare(C.GoString(subject), "") != 0 {
+ log.Subject = C.GoString(subject)
+ }
+
+ if strings.Compare(C.GoString(body), "") != 0 {
+ log.Body = C.GoString(body)
+ }
+}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go
new file mode 100644
index 000000000..d43ea07c7
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go
@@ -0,0 +1 @@
+package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go
new file mode 100644
index 000000000..8dfa40a55
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go
@@ -0,0 +1,217 @@
+package otbuiltin
+
+import (
+ "bytes"
+ "errors"
+ "strconv"
+ "strings"
+ "time"
+ "unsafe"
+
+ glib "github.com/ostreedev/ostree-go/pkg/glibobject"
+)
+
+// #cgo pkg-config: ostree-1
+// #include <stdlib.h>
+// #include <glib.h>
+// #include <ostree.h>
+// #include "builtin.go.h"
+import "C"
+
+// Declare gobal variable for options
+var pruneOpts pruneOptions
+
+// Contains all of the options for pruning an ostree repo. Use
+// NewPruneOptions() to initialize
+type pruneOptions struct {
+ NoPrune bool // Only display unreachable objects; don't delete
+ RefsOnly bool // Only compute reachability via refs
+ DeleteCommit string // Specify a commit to delete
+ KeepYoungerThan time.Time // All commits older than this date will be pruned
+ Depth int // Only traverse depths (integer) parents for each commit (default: -1=infinite)
+ StaticDeltasOnly int // Change the behavior of --keep-younger-than and --delete-commit to prune only the static delta files
+}
+
+// Instantiates and returns a pruneOptions struct with default values set
+func NewPruneOptions() pruneOptions {
+ po := new(pruneOptions)
+ po.Depth = -1
+ return *po
+}
+
+// Search for unreachable objects in the repository given by repoPath. Removes the
+// objects unless pruneOptions.NoPrune is specified
+func Prune(repoPath string, options pruneOptions) (string, error) {
+ pruneOpts = options
+ // attempt to open the repository
+ repo, err := OpenRepo(repoPath)
+ if err != nil {
+ return "", err
+ }
+
+ var pruneFlags C.OstreeRepoPruneFlags
+ var numObjectsTotal int
+ var numObjectsPruned int
+ var objSizeTotal uint64
+ var gerr = glib.NewGError()
+ var cerr = (*C.GError)(gerr.Ptr())
+ defer C.free(unsafe.Pointer(cerr))
+ var cancellable *glib.GCancellable
+
+ if !pruneOpts.NoPrune && !glib.GoBool(glib.GBoolean(C.ostree_repo_is_writable(repo.native(), &cerr))) {
+ return "", generateError(cerr)
+ }
+
+ cerr = nil
+ if strings.Compare(pruneOpts.DeleteCommit, "") != 0 {
+ if pruneOpts.NoPrune {
+ return "", errors.New("Cannot specify both pruneOptions.DeleteCommit and pruneOptions.NoPrune")
+ }
+
+ if pruneOpts.StaticDeltasOnly > 0 {
+ if glib.GoBool(glib.GBoolean(C.ostree_repo_prune_static_deltas(repo.native(), C.CString(pruneOpts.DeleteCommit), (*C.GCancellable)(cancellable.Ptr()), &cerr))) {
+ return "", generateError(cerr)
+ }
+ } else if err = deleteCommit(repo, pruneOpts.DeleteCommit, cancellable); err != nil {
+ return "", err
+ }
+ }
+
+ if !pruneOpts.KeepYoungerThan.IsZero() {
+ if pruneOpts.NoPrune {
+ return "", errors.New("Cannot specify both pruneOptions.KeepYoungerThan and pruneOptions.NoPrune")
+ }
+
+ if err = pruneCommitsKeepYoungerThanDate(repo, pruneOpts.KeepYoungerThan, cancellable); err != nil {
+ return "", err
+ }
+ }
+
+ if pruneOpts.RefsOnly {
+ pruneFlags |= C.OSTREE_REPO_PRUNE_FLAGS_REFS_ONLY
+ }
+ if pruneOpts.NoPrune {
+ pruneFlags |= C.OSTREE_REPO_PRUNE_FLAGS_NO_PRUNE
+ }
+
+ formattedFreedSize := C.GoString((*C.char)(C.g_format_size_full((C.guint64)(objSizeTotal), 0)))
+
+ var buffer bytes.Buffer
+
+ buffer.WriteString("Total objects: ")
+ buffer.WriteString(strconv.Itoa(numObjectsTotal))
+ if numObjectsPruned == 0 {
+ buffer.WriteString("\nNo unreachable objects")
+ } else if pruneOpts.NoPrune {
+ buffer.WriteString("\nWould delete: ")
+ buffer.WriteString(strconv.Itoa(numObjectsPruned))
+ buffer.WriteString(" objects, freeing ")
+ buffer.WriteString(formattedFreedSize)
+ } else {
+ buffer.WriteString("\nDeleted ")
+ buffer.WriteString(strconv.Itoa(numObjectsPruned))
+ buffer.WriteString(" objects, ")
+ buffer.WriteString(formattedFreedSize)
+ buffer.WriteString(" freed")
+ }
+
+ return buffer.String(), nil
+}
+
+// Delete an unreachable commit from the repo
+func deleteCommit(repo *Repo, commitToDelete string, cancellable *glib.GCancellable) error {
+ var refs *glib.GHashTable
+ var hashIter glib.GHashTableIter
+ var hashkey, hashvalue C.gpointer
+ var gerr = glib.NewGError()
+ var cerr = (*C.GError)(gerr.Ptr())
+ defer C.free(unsafe.Pointer(cerr))
+
+ if glib.GoBool(glib.GBoolean(C.ostree_repo_list_refs(repo.native(), nil, (**C.GHashTable)(refs.Ptr()), (*C.GCancellable)(cancellable.Ptr()), &cerr))) {
+ return generateError(cerr)
+ }
+
+ C.g_hash_table_iter_init((*C.GHashTableIter)(hashIter.Ptr()), (*C.GHashTable)(refs.Ptr()))
+ for C.g_hash_table_iter_next((*C.GHashTableIter)(hashIter.Ptr()), &hashkey, &hashvalue) != 0 {
+ var ref string = C.GoString((*C.char)(hashkey))
+ var commit string = C.GoString((*C.char)(hashvalue))
+ if strings.Compare(commitToDelete, commit) == 0 {
+ var buffer bytes.Buffer
+ buffer.WriteString("Commit ")
+ buffer.WriteString(commitToDelete)
+ buffer.WriteString(" is referenced by ")
+ buffer.WriteString(ref)
+ return errors.New(buffer.String())
+ }
+ }
+
+ if err := enableTombstoneCommits(repo); err != nil {
+ return err
+ }
+
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_delete_object(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, C.CString(commitToDelete), (*C.GCancellable)(cancellable.Ptr()), &cerr))) {
+ return generateError(cerr)
+ }
+
+ return nil
+}
+
+// Prune commits but keep any younger than the given date regardless of whether they
+// are reachable
+func pruneCommitsKeepYoungerThanDate(repo *Repo, date time.Time, cancellable *glib.GCancellable) error {
+ var objects *glib.GHashTable
+ defer C.free(unsafe.Pointer(objects))
+ var hashIter glib.GHashTableIter
+ var key, value C.gpointer
+ defer C.free(unsafe.Pointer(key))
+ defer C.free(unsafe.Pointer(value))
+ var gerr = glib.NewGError()
+ var cerr = (*C.GError)(gerr.Ptr())
+ defer C.free(unsafe.Pointer(cerr))
+
+ if err := enableTombstoneCommits(repo); err != nil {
+ return err
+ }
+
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_list_objects(repo.native(), C.OSTREE_REPO_LIST_OBJECTS_ALL, (**C.GHashTable)(objects.Ptr()), (*C.GCancellable)(cancellable.Ptr()), &cerr))) {
+ return generateError(cerr)
+ }
+
+ C.g_hash_table_iter_init((*C.GHashTableIter)(hashIter.Ptr()), (*C.GHashTable)(objects.Ptr()))
+ for C.g_hash_table_iter_next((*C.GHashTableIter)(hashIter.Ptr()), &key, &value) != 0 {
+ var serializedKey *glib.GVariant
+ defer C.free(unsafe.Pointer(serializedKey))
+ var checksum *C.char
+ defer C.free(unsafe.Pointer(checksum))
+ var objType C.OstreeObjectType
+ var commitTimestamp uint64
+ var commit *glib.GVariant = nil
+
+ C.ostree_object_name_deserialize((*C.GVariant)(serializedKey.Ptr()), &checksum, &objType)
+
+ if objType != C.OSTREE_OBJECT_TYPE_COMMIT {
+ continue
+ }
+
+ cerr = nil
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, (**C.GVariant)(commit.Ptr()), &cerr))) {
+ return generateError(cerr)
+ }
+
+ commitTimestamp = (uint64)(C.ostree_commit_get_timestamp((*C.GVariant)(commit.Ptr())))
+ if commitTimestamp < (uint64)(date.Unix()) {
+ cerr = nil
+ if pruneOpts.StaticDeltasOnly != 0 {
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_prune_static_deltas(repo.native(), checksum, (*C.GCancellable)(cancellable.Ptr()), &cerr))) {
+ return generateError(cerr)
+ }
+ } else {
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_delete_object(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, (*C.GCancellable)(cancellable.Ptr()), &cerr))) {
+ return generateError(cerr)
+ }
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go
new file mode 100644
index 000000000..d43ea07c7
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go
@@ -0,0 +1 @@
+package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go
new file mode 100644
index 000000000..d43ea07c7
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go
@@ -0,0 +1 @@
+package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go
new file mode 100644
index 000000000..d43ea07c7
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go
@@ -0,0 +1 @@
+package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go
new file mode 100644
index 000000000..d43ea07c7
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go
@@ -0,0 +1 @@
+package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go
new file mode 100644
index 000000000..d43ea07c7
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go
@@ -0,0 +1 @@
+package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go
new file mode 100644
index 000000000..d43ea07c7
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go
@@ -0,0 +1 @@
+package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go
new file mode 100644
index 000000000..d43ea07c7
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go
@@ -0,0 +1 @@
+package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go
new file mode 100644
index 000000000..d43ea07c7
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go
@@ -0,0 +1 @@
+package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go
new file mode 100644
index 000000000..d43ea07c7
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go
@@ -0,0 +1 @@
+package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go
new file mode 100644
index 000000000..d43ea07c7
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go
@@ -0,0 +1 @@
+package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go
diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE
new file mode 100644
index 000000000..835ba3e75
--- /dev/null
+++ b/vendor/github.com/pkg/errors/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2015, Dave Cheney <dave@cheney.net>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md
new file mode 100644
index 000000000..273db3c98
--- /dev/null
+++ b/vendor/github.com/pkg/errors/README.md
@@ -0,0 +1,52 @@
+# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors)
+
+Package errors provides simple error handling primitives.
+
+`go get github.com/pkg/errors`
+
+The traditional error handling idiom in Go is roughly akin to
+```go
+if err != nil {
+ return err
+}
+```
+which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
+
+## Adding context to an error
+
+The errors.Wrap function returns a new error that adds context to the original error. For example
+```go
+_, err := ioutil.ReadAll(r)
+if err != nil {
+ return errors.Wrap(err, "read failed")
+}
+```
+## Retrieving the cause of an error
+
+Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
+```go
+type causer interface {
+ Cause() error
+}
+```
+`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
+```go
+switch err := errors.Cause(err).(type) {
+case *MyError:
+ // handle specifically
+default:
+ // unknown error
+}
+```
+
+[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
+
+## Contributing
+
+We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
+
+Before proposing a change, please discuss your change by raising an issue.
+
+## Licence
+
+BSD-2-Clause
diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go
new file mode 100644
index 000000000..842ee8045
--- /dev/null
+++ b/vendor/github.com/pkg/errors/errors.go
@@ -0,0 +1,269 @@
+// Package errors provides simple error handling primitives.
+//
+// The traditional error handling idiom in Go is roughly akin to
+//
+// if err != nil {
+// return err
+// }
+//
+// which applied recursively up the call stack results in error reports
+// without context or debugging information. The errors package allows
+// programmers to add context to the failure path in their code in a way
+// that does not destroy the original value of the error.
+//
+// Adding context to an error
+//
+// The errors.Wrap function returns a new error that adds context to the
+// original error by recording a stack trace at the point Wrap is called,
+// and the supplied message. For example
+//
+// _, err := ioutil.ReadAll(r)
+// if err != nil {
+// return errors.Wrap(err, "read failed")
+// }
+//
+// If additional control is required the errors.WithStack and errors.WithMessage
+// functions destructure errors.Wrap into its component operations of annotating
+// an error with a stack trace and an a message, respectively.
+//
+// Retrieving the cause of an error
+//
+// Using errors.Wrap constructs a stack of errors, adding context to the
+// preceding error. Depending on the nature of the error it may be necessary
+// to reverse the operation of errors.Wrap to retrieve the original error
+// for inspection. Any error value which implements this interface
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// can be inspected by errors.Cause. errors.Cause will recursively retrieve
+// the topmost error which does not implement causer, which is assumed to be
+// the original cause. For example:
+//
+// switch err := errors.Cause(err).(type) {
+// case *MyError:
+// // handle specifically
+// default:
+// // unknown error
+// }
+//
+// causer interface is not exported by this package, but is considered a part
+// of stable public API.
+//
+// Formatted printing of errors
+//
+// All error values returned from this package implement fmt.Formatter and can
+// be formatted by the fmt package. The following verbs are supported
+//
+// %s print the error. If the error has a Cause it will be
+// printed recursively
+// %v see %s
+// %+v extended format. Each Frame of the error's StackTrace will
+// be printed in detail.
+//
+// Retrieving the stack trace of an error or wrapper
+//
+// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
+// invoked. This information can be retrieved with the following interface.
+//
+// type stackTracer interface {
+// StackTrace() errors.StackTrace
+// }
+//
+// Where errors.StackTrace is defined as
+//
+// type StackTrace []Frame
+//
+// The Frame type represents a call site in the stack trace. Frame supports
+// the fmt.Formatter interface that can be used for printing information about
+// the stack trace of this error. For example:
+//
+// if err, ok := err.(stackTracer); ok {
+// for _, f := range err.StackTrace() {
+// fmt.Printf("%+s:%d", f)
+// }
+// }
+//
+// stackTracer interface is not exported by this package, but is considered a part
+// of stable public API.
+//
+// See the documentation for Frame.Format for more details.
+package errors
+
+import (
+ "fmt"
+ "io"
+)
+
+// New returns an error with the supplied message.
+// New also records the stack trace at the point it was called.
+func New(message string) error {
+ return &fundamental{
+ msg: message,
+ stack: callers(),
+ }
+}
+
+// Errorf formats according to a format specifier and returns the string
+// as a value that satisfies error.
+// Errorf also records the stack trace at the point it was called.
+func Errorf(format string, args ...interface{}) error {
+ return &fundamental{
+ msg: fmt.Sprintf(format, args...),
+ stack: callers(),
+ }
+}
+
+// fundamental is an error that has a message and a stack, but no caller.
+type fundamental struct {
+ msg string
+ *stack
+}
+
+func (f *fundamental) Error() string { return f.msg }
+
+func (f *fundamental) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ io.WriteString(s, f.msg)
+ f.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, f.msg)
+ case 'q':
+ fmt.Fprintf(s, "%q", f.msg)
+ }
+}
+
+// WithStack annotates err with a stack trace at the point WithStack was called.
+// If err is nil, WithStack returns nil.
+func WithStack(err error) error {
+ if err == nil {
+ return nil
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+type withStack struct {
+ error
+ *stack
+}
+
+func (w *withStack) Cause() error { return w.error }
+
+func (w *withStack) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v", w.Cause())
+ w.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, w.Error())
+ case 'q':
+ fmt.Fprintf(s, "%q", w.Error())
+ }
+}
+
+// Wrap returns an error annotating err with a stack trace
+// at the point Wrap is called, and the supplied message.
+// If err is nil, Wrap returns nil.
+func Wrap(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: message,
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// Wrapf returns an error annotating err with a stack trace
+// at the point Wrapf is call, and the format specifier.
+// If err is nil, Wrapf returns nil.
+func Wrapf(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// WithMessage annotates err with a new message.
+// If err is nil, WithMessage returns nil.
+func WithMessage(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: message,
+ }
+}
+
+type withMessage struct {
+ cause error
+ msg string
+}
+
+func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
+func (w *withMessage) Cause() error { return w.cause }
+
+func (w *withMessage) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v\n", w.Cause())
+ io.WriteString(s, w.msg)
+ return
+ }
+ fallthrough
+ case 's', 'q':
+ io.WriteString(s, w.Error())
+ }
+}
+
+// Cause returns the underlying cause of the error, if possible.
+// An error value has a cause if it implements the following
+// interface:
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// If the error does not implement Cause, the original error will
+// be returned. If the error is nil, nil will be returned without further
+// investigation.
+func Cause(err error) error {
+ type causer interface {
+ Cause() error
+ }
+
+ for err != nil {
+ cause, ok := err.(causer)
+ if !ok {
+ break
+ }
+ err = cause.Cause()
+ }
+ return err
+}
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
new file mode 100644
index 000000000..6b1f2891a
--- /dev/null
+++ b/vendor/github.com/pkg/errors/stack.go
@@ -0,0 +1,178 @@
+package errors
+
+import (
+ "fmt"
+ "io"
+ "path"
+ "runtime"
+ "strings"
+)
+
+// Frame represents a program counter inside a stack frame.
+type Frame uintptr
+
+// pc returns the program counter for this frame;
+// multiple frames may have the same PC value.
+func (f Frame) pc() uintptr { return uintptr(f) - 1 }
+
+// file returns the full path to the file that contains the
+// function for this Frame's pc.
+func (f Frame) file() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ file, _ := fn.FileLine(f.pc())
+ return file
+}
+
+// line returns the line number of source code of the
+// function for this Frame's pc.
+func (f Frame) line() int {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return 0
+ }
+ _, line := fn.FileLine(f.pc())
+ return line
+}
+
+// Format formats the frame according to the fmt.Formatter interface.
+//
+// %s source file
+// %d source line
+// %n function name
+// %v equivalent to %s:%d
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+s path of source file relative to the compile time GOPATH
+// %+v equivalent to %+s:%d
+func (f Frame) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 's':
+ switch {
+ case s.Flag('+'):
+ pc := f.pc()
+ fn := runtime.FuncForPC(pc)
+ if fn == nil {
+ io.WriteString(s, "unknown")
+ } else {
+ file, _ := fn.FileLine(pc)
+ fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
+ }
+ default:
+ io.WriteString(s, path.Base(f.file()))
+ }
+ case 'd':
+ fmt.Fprintf(s, "%d", f.line())
+ case 'n':
+ name := runtime.FuncForPC(f.pc()).Name()
+ io.WriteString(s, funcname(name))
+ case 'v':
+ f.Format(s, 's')
+ io.WriteString(s, ":")
+ f.Format(s, 'd')
+ }
+}
+
+// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
+type StackTrace []Frame
+
+func (st StackTrace) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case s.Flag('+'):
+ for _, f := range st {
+ fmt.Fprintf(s, "\n%+v", f)
+ }
+ case s.Flag('#'):
+ fmt.Fprintf(s, "%#v", []Frame(st))
+ default:
+ fmt.Fprintf(s, "%v", []Frame(st))
+ }
+ case 's':
+ fmt.Fprintf(s, "%s", []Frame(st))
+ }
+}
+
+// stack represents a stack of program counters.
+type stack []uintptr
+
+func (s *stack) Format(st fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case st.Flag('+'):
+ for _, pc := range *s {
+ f := Frame(pc)
+ fmt.Fprintf(st, "\n%+v", f)
+ }
+ }
+ }
+}
+
+func (s *stack) StackTrace() StackTrace {
+ f := make([]Frame, len(*s))
+ for i := 0; i < len(f); i++ {
+ f[i] = Frame((*s)[i])
+ }
+ return f
+}
+
+func callers() *stack {
+ const depth = 32
+ var pcs [depth]uintptr
+ n := runtime.Callers(3, pcs[:])
+ var st stack = pcs[0:n]
+ return &st
+}
+
+// funcname removes the path prefix component of a function's name reported by func.Name().
+func funcname(name string) string {
+ i := strings.LastIndex(name, "/")
+ name = name[i+1:]
+ i = strings.Index(name, ".")
+ return name[i+1:]
+}
+
+func trimGOPATH(name, file string) string {
+ // Here we want to get the source file path relative to the compile time
+ // GOPATH. As of Go 1.6.x there is no direct way to know the compiled
+ // GOPATH at runtime, but we can infer the number of path segments in the
+ // GOPATH. We note that fn.Name() returns the function name qualified by
+ // the import path, which does not include the GOPATH. Thus we can trim
+ // segments from the beginning of the file path until the number of path
+ // separators remaining is one more than the number of path separators in
+ // the function name. For example, given:
+ //
+ // GOPATH /home/user
+ // file /home/user/src/pkg/sub/file.go
+ // fn.Name() pkg/sub.Type.Method
+ //
+ // We want to produce:
+ //
+ // pkg/sub/file.go
+ //
+ // From this we can easily see that fn.Name() has one less path separator
+ // than our desired output. We count separators from the end of the file
+ // path until it finds two more than in the function name and then move
+ // one character forward to preserve the initial path segment without a
+ // leading separator.
+ const sep = "/"
+ goal := strings.Count(name, sep) + 2
+ i := len(file)
+ for n := 0; n < goal; n++ {
+ i = strings.LastIndex(file[:i], sep)
+ if i == -1 {
+ // not enough separators found, set i so that the slice expression
+ // below leaves file unmodified
+ i = -len(sep)
+ break
+ }
+ }
+ // get back to 0 or trim the leading separator
+ file = file[i+len(sep):]
+ return file
+}
diff --git a/vendor/github.com/pquerna/ffjson/LICENSE b/vendor/github.com/pquerna/ffjson/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/pquerna/ffjson/NOTICE b/vendor/github.com/pquerna/ffjson/NOTICE
new file mode 100644
index 000000000..405a49618
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/NOTICE
@@ -0,0 +1,8 @@
+ffjson
+Copyright (c) 2014, Paul Querna
+
+This product includes software developed by
+Paul Querna (http://paul.querna.org/).
+
+Portions of this software were developed as
+part of Go, Copyright (c) 2012 The Go Authors. \ No newline at end of file
diff --git a/vendor/github.com/pquerna/ffjson/README.md b/vendor/github.com/pquerna/ffjson/README.md
new file mode 100644
index 000000000..30b239f10
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/README.md
@@ -0,0 +1,232 @@
+# ffjson: faster JSON for Go
+
+[![Build Status](https://travis-ci.org/pquerna/ffjson.svg?branch=master)](https://travis-ci.org/pquerna/ffjson)
+
+`ffjson` generates static `MarshalJSON` and `UnmarshalJSON` functions for structures in Go. The generated functions reduce the reliance upon runtime reflection to do serialization and are generally 2 to 3 times faster. In cases where `ffjson` doesn't understand a Type involved, it falls back to `encoding/json`, meaning it is a safe drop in replacement. By using `ffjson` your JSON serialization just gets faster with no additional code changes.
+
+When you change your `struct`, you will need to run `ffjson` again (or make it part of your build tools).
+
+## Blog Posts
+
+* 2014-03-31: [First Release and Background](https://journal.paul.querna.org/articles/2014/03/31/ffjson-faster-json-in-go/)
+
+## Getting Started
+
+If `myfile.go` contains the `struct` types you would like to be faster, and assuming `GOPATH` is set to a reasonable value for an existing project (meaning that in this particular example if `myfile.go` is in the `myproject` directory, the project should be under `$GOPATH/src/myproject`), you can just run:
+
+ go get -u github.com/pquerna/ffjson
+ ffjson myfile.go
+ git add myfile_ffjson.go
+
+
+## Performance Status:
+
+* `MarshalJSON` is **2x to 3x** faster than `encoding/json`.
+* `UnmarshalJSON` is **2x to 3x** faster than `encoding/json`.
+
+## Features
+
+* **Unmarshal Support:** Since v0.9, `ffjson` supports Unmarshaling of structures.
+* **Drop in Replacement:** Because `ffjson` implements the interfaces already defined by `encoding/json` the performance enhancements are transparent to users of your structures.
+* **Supports all types:** `ffjson` has native support for most of Go's types -- for any type it doesn't support with fast paths, it falls back to using `encoding/json`. This means all structures should work out of the box. If they don't, [open a issue!](https://github.com/pquerna/ffjson/issues)
+* **ffjson: skip**: If you have a structure you want `ffjson` to ignore, add `ffjson: skip` to the doc string for this structure.
+* **Extensive Tests:** `ffjson` contains an extensive test suite including fuzz'ing against the JSON parser.
+
+
+# Using ffjson
+
+`ffjson` generates code based upon existing `struct` types. For example, `ffjson foo.go` will by default create a new file `foo_ffjson.go` that contains serialization functions for all structs found in `foo.go`.
+
+```
+Usage of ffjson:
+
+ ffjson [options] [input_file]
+
+ffjson generates Go code for optimized JSON serialization.
+
+ -go-cmd="": Path to go command; Useful for `goapp` support.
+ -import-name="": Override import name in case it cannot be detected.
+ -nodecoder: Do not generate decoder functions
+ -noencoder: Do not generate encoder functions
+ -w="": Write generate code to this path instead of ${input}_ffjson.go.
+```
+
+Your code must be in a compilable state for `ffjson` to work. If you code doesn't compile ffjson will most likely exit with an error.
+
+## Disabling code generation for structs
+
+You might not want all your structs to have JSON code generated. To completely disable generation for a struct, add `ffjson: skip` to the struct comment. For example:
+
+```Go
+// ffjson: skip
+type Foo struct {
+ Bar string
+}
+```
+
+You can also choose not to have either the decoder or encoder generated by including `ffjson: nodecoder` or `ffjson: noencoder` in your comment. For instance, this will only generate the encoder (marshal) part for this struct:
+
+```Go
+// ffjson: nodecoder
+type Foo struct {
+ Bar string
+}
+```
+
+You can also disable encoders/decoders entirely for a file by using the `-noencoder`/`-nodecoder` commandline flags.
+
+## Using ffjson with `go generate`
+
+`ffjson` is a great fit with `go generate`. It allows you to specify the ffjson command inside your individual go files and run them all at once. This way you don't have to maintain a separate build file with the files you need to generate.
+
+Add this comment anywhere inside your go files:
+
+```Go
+//go:generate ffjson $GOFILE
+```
+
+To re-generate ffjson for all files with the tag in a folder, simply execute:
+
+```sh
+go generate
+```
+
+To generate for the current package and all sub-packages, use:
+
+```sh
+go generate ./...
+```
+This is most of what you need to know about go generate, but you can sese more about [go generate on the golang blog](http://blog.golang.org/generate).
+
+## Should I include ffjson files in VCS?
+
+That question is really up to you. If you don't, you will have a more complex build process. If you do, you have to keep the generated files updated if you change the content of your structs.
+
+That said, ffjson operates deterministically, so it will generate the same code every time it run, so unless your code changes, the generated content should not change. Note however that this is only true if you are using the same ffjson version, so if you have several people working on a project, you might need to synchronize your ffjson version.
+
+## Performance pitfalls
+
+`ffjson` has a few cases where it will fall back to using the runtime encoder/decoder. Notable cases are:
+
+* Interface struct members. Since it isn't possible to know the type of these types before runtime, ffjson has to use the reflect based coder.
+* Structs with custom marshal/unmarshal.
+* Map with a complex value. Simple types like `map[string]int` is fine though.
+* Inline struct definitions `type A struct{B struct{ X int} }` are handled by the encoder, but currently has fallback in the decoder.
+* Slices of slices / slices of maps are currently falling back when generating the decoder.
+
+## Reducing Garbage Collection
+
+`ffjson` already does a lot to help garbage generation. However whenever you go through the json.Marshal you get a new byte slice back. On very high throughput servers this can lead to increased GC pressure.
+
+### Tip 1: Use ffjson.Marshal() / ffjson.Unmarshal()
+
+This is probably the easiest optimization for you. Instead of going through encoding/json, you can call ffjson. This will disable the checks that encoding/json does to the json when it receives it from struct functions.
+
+```Go
+ import "github.com/pquerna/ffjson/ffjson"
+
+ // BEFORE:
+ buf, err := json.Marshal(&item)
+
+ // AFTER:
+ buf, err := ffjson.Marshal(&item)
+```
+This simple change is likely to double the speed of your encoding/decoding.
+
+
+[![GoDoc][1]][2]
+[1]: https://godoc.org/github.com/pquerna/ffjson/ffjson?status.svg
+[2]: https://godoc.org/github.com/pquerna/ffjson/ffjson#Marshal
+
+### Tip 2: Pooling the buffer
+
+On servers where you have a lot of concurrent encoding going on, you can hand back the byte buffer you get from json.Marshal once you are done using it. An example could look like this:
+```Go
+import "github.com/pquerna/ffjson/ffjson"
+
+func Encode(item interface{}, out io.Writer) {
+ // Encode
+ buf, err := ffjson.Marshal(&item)
+
+ // Write the buffer
+ _,_ = out.Write(buf)
+
+ // We are now no longer need the buffer so we pool it.
+ ffjson.Pool(buf)
+}
+```
+Note that the buffers you put back in the pool can still be reclaimed by the garbage collector, so you wont risk your program building up a big memory use by pooling the buffers.
+
+[![GoDoc][1]][2]
+[1]: https://godoc.org/github.com/pquerna/ffjson/ffjson?status.svg
+[2]: https://godoc.org/github.com/pquerna/ffjson/ffjson#Pool
+
+### Tip 3: Creating an Encoder
+
+There might be cases where you need to encode many objects at once. This could be a server backing up, writing a lot of entries to files, etc.
+
+To do this, there is an interface similar to `encoding/json`, that allow you to create a re-usable encoder. Here is an example where we want to encode an array of the `Item` type, with a comma between entries:
+```Go
+import "github.com/pquerna/ffjson/ffjson"
+
+func EncodeItems(items []Item, out io.Writer) {
+ // We create an encoder.
+ enc := ffjson.NewEncoder(out)
+
+ for i, item := range items {
+ // Encode into the buffer
+ err := enc.Encode(&item)
+
+ // If err is nil, the content is written to out, so we can write to it as well.
+ if i != len(items) -1 {
+ _,_ = out.Write([]byte{','})
+ }
+ }
+}
+```
+
+
+Documentation: [![GoDoc][1]][2]
+[1]: https://godoc.org/github.com/pquerna/ffjson/ffjson?status.svg
+[2]: https://godoc.org/github.com/pquerna/ffjson/ffjson#Encoder
+
+## Tip 4: Avoid interfaces
+
+We don't want to dictate how you structure your data, but having interfaces in your code will make ffjson use the golang encoder for these. When ffjson has to do this, it may even become slower than using `json.Marshal` directly.
+
+To see where that happens, search the generated `_ffjson.go` file for the text `Falling back`, which will indicate where ffjson is unable to generate code for your data structure.
+
+## Tip 5: `ffjson` all the things!
+
+You should not only create ffjson code for your main struct, but also any structs that is included/used in your json code.
+
+So if your struct looks like this:
+```Go
+type Foo struct {
+ V Bar
+}
+```
+You should also make sure that code is generated for `Bar` if it is placed in another file. Also note that currently it requires you to do this in order, since generating code for `Foo` will check if code for `Bar` exists. This is only an issue if `Foo` and `Bar` are placed in different files. We are currently working on allowing simultaneous generation of an entire package.
+
+
+## Improvements, bugs, adding features, and taking ffjson new directions!
+
+Please [open issues in Github](https://github.com/pquerna/ffjson/issues) for ideas, bugs, and general thoughts. Pull requests are of course preferred :)
+
+## Similar projects
+
+* [go-codec](https://github.com/ugorji/go/tree/master/codec#readme). Very good project, that also allows streaming en/decoding, but requires you to call the library to use.
+* [megajson](https://github.com/benbjohnson/megajson). This has limited support, and development seems to have almost stopped at the time of writing.
+
+# Credits
+
+`ffjson` has recieved significant contributions from:
+
+* [Klaus Post](https://github.com/klauspost)
+* [Paul Querna](https://github.com/pquerna)
+* [Erik Dubbelboer](https://github.com/erikdubbelboer)
+
+## License
+
+`ffjson` is licensed under the [Apache License, Version 2.0](./LICENSE)
+
diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/buffer.go b/vendor/github.com/pquerna/ffjson/fflib/v1/buffer.go
new file mode 100644
index 000000000..7f63a8582
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/fflib/v1/buffer.go
@@ -0,0 +1,421 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package v1
+
+// Simple byte buffer for marshaling data.
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "io"
+ "unicode/utf8"
+)
+
+type grower interface {
+ Grow(n int)
+}
+
+type truncater interface {
+ Truncate(n int)
+ Reset()
+}
+
+type bytesReader interface {
+ Bytes() []byte
+ String() string
+}
+
+type runeWriter interface {
+ WriteRune(r rune) (n int, err error)
+}
+
+type stringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+type lener interface {
+ Len() int
+}
+
+type rewinder interface {
+ Rewind(n int) (err error)
+}
+
+type encoder interface {
+ Encode(interface{}) error
+}
+
+// TODO(pquerna): continue to reduce these interfaces
+
+type EncodingBuffer interface {
+ io.Writer
+ io.WriterTo
+ io.ByteWriter
+ stringWriter
+ truncater
+ grower
+ rewinder
+ encoder
+}
+
+type DecodingBuffer interface {
+ io.ReadWriter
+ io.ByteWriter
+ stringWriter
+ runeWriter
+ truncater
+ grower
+ bytesReader
+ lener
+}
+
+// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
+// The zero value for Buffer is an empty buffer ready to use.
+type Buffer struct {
+ buf []byte // contents are the bytes buf[off : len(buf)]
+ off int // read at &buf[off], write at &buf[len(buf)]
+ runeBytes [utf8.UTFMax]byte // avoid allocation of slice on each WriteByte or Rune
+ encoder *json.Encoder
+ skipTrailingByte bool
+}
+
+// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer.
+var ErrTooLarge = errors.New("fflib.v1.Buffer: too large")
+
+// Bytes returns a slice of the contents of the unread portion of the buffer;
+// len(b.Bytes()) == b.Len(). If the caller changes the contents of the
+// returned slice, the contents of the buffer will change provided there
+// are no intervening method calls on the Buffer.
+func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
+
+// String returns the contents of the unread portion of the buffer
+// as a string. If the Buffer is a nil pointer, it returns "<nil>".
+func (b *Buffer) String() string {
+ if b == nil {
+ // Special case, useful in debugging.
+ return "<nil>"
+ }
+ return string(b.buf[b.off:])
+}
+
+// Len returns the number of bytes of the unread portion of the buffer;
+// b.Len() == len(b.Bytes()).
+func (b *Buffer) Len() int { return len(b.buf) - b.off }
+
+// Truncate discards all but the first n unread bytes from the buffer.
+// It panics if n is negative or greater than the length of the buffer.
+func (b *Buffer) Truncate(n int) {
+ if n == 0 {
+ b.off = 0
+ b.buf = b.buf[0:0]
+ } else {
+ b.buf = b.buf[0 : b.off+n]
+ }
+}
+
+// Reset resets the buffer so it has no content.
+// b.Reset() is the same as b.Truncate(0).
+func (b *Buffer) Reset() { b.Truncate(0) }
+
+// grow grows the buffer to guarantee space for n more bytes.
+// It returns the index where bytes should be written.
+// If the buffer can't grow it will panic with ErrTooLarge.
+func (b *Buffer) grow(n int) int {
+ // If we have no buffer, get one from the pool
+ m := b.Len()
+ if m == 0 {
+ if b.buf == nil {
+ b.buf = makeSlice(2 * n)
+ b.off = 0
+ } else if b.off != 0 {
+ // If buffer is empty, reset to recover space.
+ b.Truncate(0)
+ }
+ }
+ if len(b.buf)+n > cap(b.buf) {
+ var buf []byte
+ if m+n <= cap(b.buf)/2 {
+ // We can slide things down instead of allocating a new
+ // slice. We only need m+n <= cap(b.buf) to slide, but
+ // we instead let capacity get twice as large so we
+ // don't spend all our time copying.
+ copy(b.buf[:], b.buf[b.off:])
+ buf = b.buf[:m]
+ } else {
+ // not enough space anywhere
+ buf = makeSlice(2*cap(b.buf) + n)
+ copy(buf, b.buf[b.off:])
+ Pool(b.buf)
+ b.buf = buf
+ }
+ b.off = 0
+ }
+ b.buf = b.buf[0 : b.off+m+n]
+ return b.off + m
+}
+
+// Grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After Grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+// If n is negative, Grow will panic.
+// If the buffer can't grow it will panic with ErrTooLarge.
+func (b *Buffer) Grow(n int) {
+ if n < 0 {
+ panic("bytes.Buffer.Grow: negative count")
+ }
+ m := b.grow(n)
+ b.buf = b.buf[0:m]
+}
+
+// Write appends the contents of p to the buffer, growing the buffer as
+// needed. The return value n is the length of p; err is always nil. If the
+// buffer becomes too large, Write will panic with ErrTooLarge.
+func (b *Buffer) Write(p []byte) (n int, err error) {
+ if b.skipTrailingByte {
+ p = p[:len(p)-1]
+ }
+ m := b.grow(len(p))
+ return copy(b.buf[m:], p), nil
+}
+
+// WriteString appends the contents of s to the buffer, growing the buffer as
+// needed. The return value n is the length of s; err is always nil. If the
+// buffer becomes too large, WriteString will panic with ErrTooLarge.
+func (b *Buffer) WriteString(s string) (n int, err error) {
+ m := b.grow(len(s))
+ return copy(b.buf[m:], s), nil
+}
+
+// MinRead is the minimum slice size passed to a Read call by
+// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
+// what is required to hold the contents of r, ReadFrom will not grow the
+// underlying buffer.
+const minRead = 512
+
+// ReadFrom reads data from r until EOF and appends it to the buffer, growing
+// the buffer as needed. The return value n is the number of bytes read. Any
+// error except io.EOF encountered during the read is also returned. If the
+// buffer becomes too large, ReadFrom will panic with ErrTooLarge.
+func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
+ // If buffer is empty, reset to recover space.
+ if b.off >= len(b.buf) {
+ b.Truncate(0)
+ }
+ for {
+ if free := cap(b.buf) - len(b.buf); free < minRead {
+ // not enough space at end
+ newBuf := b.buf
+ if b.off+free < minRead {
+ // not enough space using beginning of buffer;
+ // double buffer capacity
+ newBuf = makeSlice(2*cap(b.buf) + minRead)
+ }
+ copy(newBuf, b.buf[b.off:])
+ Pool(b.buf)
+ b.buf = newBuf[:len(b.buf)-b.off]
+ b.off = 0
+ }
+ m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
+ b.buf = b.buf[0 : len(b.buf)+m]
+ n += int64(m)
+ if e == io.EOF {
+ break
+ }
+ if e != nil {
+ return n, e
+ }
+ }
+ return n, nil // err is EOF, so return nil explicitly
+}
+
+// WriteTo writes data to w until the buffer is drained or an error occurs.
+// The return value n is the number of bytes written; it always fits into an
+// int, but it is int64 to match the io.WriterTo interface. Any error
+// encountered during the write is also returned.
+func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
+ if b.off < len(b.buf) {
+ nBytes := b.Len()
+ m, e := w.Write(b.buf[b.off:])
+ if m > nBytes {
+ panic("bytes.Buffer.WriteTo: invalid Write count")
+ }
+ b.off += m
+ n = int64(m)
+ if e != nil {
+ return n, e
+ }
+ // all bytes should have been written, by definition of
+ // Write method in io.Writer
+ if m != nBytes {
+ return n, io.ErrShortWrite
+ }
+ }
+ // Buffer is now empty; reset.
+ b.Truncate(0)
+ return
+}
+
+// WriteByte appends the byte c to the buffer, growing the buffer as needed.
+// The returned error is always nil, but is included to match bufio.Writer's
+// WriteByte. If the buffer becomes too large, WriteByte will panic with
+// ErrTooLarge.
+func (b *Buffer) WriteByte(c byte) error {
+ m := b.grow(1)
+ b.buf[m] = c
+ return nil
+}
+
+func (b *Buffer) Rewind(n int) error {
+ b.buf = b.buf[:len(b.buf)-n]
+ return nil
+}
+
+func (b *Buffer) Encode(v interface{}) error {
+ if b.encoder == nil {
+ b.encoder = json.NewEncoder(b)
+ }
+ b.skipTrailingByte = true
+ err := b.encoder.Encode(v)
+ b.skipTrailingByte = false
+ return err
+}
+
+// WriteRune appends the UTF-8 encoding of Unicode code point r to the
+// buffer, returning its length and an error, which is always nil but is
+// included to match bufio.Writer's WriteRune. The buffer is grown as needed;
+// if it becomes too large, WriteRune will panic with ErrTooLarge.
+func (b *Buffer) WriteRune(r rune) (n int, err error) {
+ if r < utf8.RuneSelf {
+ b.WriteByte(byte(r))
+ return 1, nil
+ }
+ n = utf8.EncodeRune(b.runeBytes[0:], r)
+ b.Write(b.runeBytes[0:n])
+ return n, nil
+}
+
+// Read reads the next len(p) bytes from the buffer or until the buffer
+// is drained. The return value n is the number of bytes read. If the
+// buffer has no data to return, err is io.EOF (unless len(p) is zero);
+// otherwise it is nil.
+func (b *Buffer) Read(p []byte) (n int, err error) {
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ if len(p) == 0 {
+ return
+ }
+ return 0, io.EOF
+ }
+ n = copy(p, b.buf[b.off:])
+ b.off += n
+ return
+}
+
+// Next returns a slice containing the next n bytes from the buffer,
+// advancing the buffer as if the bytes had been returned by Read.
+// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
+// The slice is only valid until the next call to a read or write method.
+func (b *Buffer) Next(n int) []byte {
+ m := b.Len()
+ if n > m {
+ n = m
+ }
+ data := b.buf[b.off : b.off+n]
+ b.off += n
+ return data
+}
+
+// ReadByte reads and returns the next byte from the buffer.
+// If no byte is available, it returns error io.EOF.
+func (b *Buffer) ReadByte() (c byte, err error) {
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ return 0, io.EOF
+ }
+ c = b.buf[b.off]
+ b.off++
+ return c, nil
+}
+
+// ReadRune reads and returns the next UTF-8-encoded
+// Unicode code point from the buffer.
+// If no bytes are available, the error returned is io.EOF.
+// If the bytes are an erroneous UTF-8 encoding, it
+// consumes one byte and returns U+FFFD, 1.
+func (b *Buffer) ReadRune() (r rune, size int, err error) {
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ return 0, 0, io.EOF
+ }
+ c := b.buf[b.off]
+ if c < utf8.RuneSelf {
+ b.off++
+ return rune(c), 1, nil
+ }
+ r, n := utf8.DecodeRune(b.buf[b.off:])
+ b.off += n
+ return r, n, nil
+}
+
+// ReadBytes reads until the first occurrence of delim in the input,
+// returning a slice containing the data up to and including the delimiter.
+// If ReadBytes encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadBytes returns err != nil if and only if the returned data does not end in
+// delim.
+func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
+ slice, err := b.readSlice(delim)
+ // return a copy of slice. The buffer's backing array may
+ // be overwritten by later calls.
+ line = append(line, slice...)
+ return
+}
+
+// readSlice is like ReadBytes but returns a reference to internal buffer data.
+func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
+ i := bytes.IndexByte(b.buf[b.off:], delim)
+ end := b.off + i + 1
+ if i < 0 {
+ end = len(b.buf)
+ err = io.EOF
+ }
+ line = b.buf[b.off:end]
+ b.off = end
+ return line, err
+}
+
+// ReadString reads until the first occurrence of delim in the input,
+// returning a string containing the data up to and including the delimiter.
+// If ReadString encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadString returns err != nil if and only if the returned data does not end
+// in delim.
+func (b *Buffer) ReadString(delim byte) (line string, err error) {
+ slice, err := b.readSlice(delim)
+ return string(slice), err
+}
+
+// NewBuffer creates and initializes a new Buffer using buf as its initial
+// contents. It is intended to prepare a Buffer to read existing data. It
+// can also be used to size the internal buffer for writing. To do that,
+// buf should have the desired capacity but a length of zero.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is
+// sufficient to initialize a Buffer.
+func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
+
+// NewBufferString creates and initializes a new Buffer using string s as its
+// initial contents. It is intended to prepare a buffer to read an existing
+// string.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is
+// sufficient to initialize a Buffer.
+func NewBufferString(s string) *Buffer {
+ return &Buffer{buf: []byte(s)}
+}
diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_nopool.go b/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_nopool.go
new file mode 100644
index 000000000..b84af6ff9
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_nopool.go
@@ -0,0 +1,11 @@
+// +build !go1.3
+
+package v1
+
+// Stub version of buffer_pool.go for Go 1.2, which doesn't have sync.Pool.
+
+func Pool(b []byte) {}
+
+func makeSlice(n int) []byte {
+ return make([]byte, n)
+}
diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_pool.go b/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_pool.go
new file mode 100644
index 000000000..a021c57cf
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_pool.go
@@ -0,0 +1,105 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.3
+
+package v1
+
+// Allocation pools for Buffers.
+
+import "sync"
+
+var pools [14]sync.Pool
+var pool64 *sync.Pool
+
+func init() {
+ var i uint
+ // TODO(pquerna): add science here around actual pool sizes.
+ for i = 6; i < 20; i++ {
+ n := 1 << i
+ pools[poolNum(n)].New = func() interface{} { return make([]byte, 0, n) }
+ }
+ pool64 = &pools[0]
+}
+
+// This returns the pool number that will give a buffer of
+// at least 'i' bytes.
+func poolNum(i int) int {
+ // TODO(pquerna): convert to log2 w/ bsr asm instruction:
+ // <https://groups.google.com/forum/#!topic/golang-nuts/uAb5J1_y7ns>
+ if i <= 64 {
+ return 0
+ } else if i <= 128 {
+ return 1
+ } else if i <= 256 {
+ return 2
+ } else if i <= 512 {
+ return 3
+ } else if i <= 1024 {
+ return 4
+ } else if i <= 2048 {
+ return 5
+ } else if i <= 4096 {
+ return 6
+ } else if i <= 8192 {
+ return 7
+ } else if i <= 16384 {
+ return 8
+ } else if i <= 32768 {
+ return 9
+ } else if i <= 65536 {
+ return 10
+ } else if i <= 131072 {
+ return 11
+ } else if i <= 262144 {
+ return 12
+ } else if i <= 524288 {
+ return 13
+ } else {
+ return -1
+ }
+}
+
+// Send a buffer to the Pool to reuse for other instances.
+// You may no longer utilize the content of the buffer, since it may be used
+// by other goroutines.
+func Pool(b []byte) {
+ if b == nil {
+ return
+ }
+ c := cap(b)
+
+ // Our smallest buffer is 64 bytes, so we discard smaller buffers.
+ if c < 64 {
+ return
+ }
+
+ // We need to put the incoming buffer into the NEXT buffer,
+ // since a buffer guarantees AT LEAST the number of bytes available
+ // that is the top of this buffer.
+ // That is the reason for dividing the cap by 2, so it gets into the NEXT bucket.
+ // We add 2 to avoid rounding down if size is exactly power of 2.
+ pn := poolNum((c + 2) >> 1)
+ if pn != -1 {
+ pools[pn].Put(b[0:0])
+ }
+ // if we didn't have a slot for this []byte, we just drop it and let the GC
+ // take care of it.
+}
+
+// makeSlice allocates a slice of size n -- it will attempt to use a pool'ed
+// instance whenever possible.
+func makeSlice(n int) []byte {
+ if n <= 64 {
+ return pool64.Get().([]byte)[0:n]
+ }
+
+ pn := poolNum(n)
+
+ if pn != -1 {
+ return pools[pn].Get().([]byte)[0:n]
+ } else {
+ return make([]byte, n)
+ }
+}
diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/bytenum.go b/vendor/github.com/pquerna/ffjson/fflib/v1/bytenum.go
new file mode 100644
index 000000000..08477409a
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/fflib/v1/bytenum.go
@@ -0,0 +1,88 @@
+/**
+ * Copyright 2014 Paul Querna
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/* Portions of this file are on Go stdlib's strconv/iota.go */
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package v1
+
+import (
+ "github.com/pquerna/ffjson/fflib/v1/internal"
+)
+
+func ParseFloat(s []byte, bitSize int) (f float64, err error) {
+ return internal.ParseFloat(s, bitSize)
+}
+
+// ParseUint is like ParseInt but for unsigned numbers, and oeprating on []byte
+func ParseUint(s []byte, base int, bitSize int) (n uint64, err error) {
+ if len(s) == 1 {
+ switch s[0] {
+ case '0':
+ return 0, nil
+ case '1':
+ return 1, nil
+ case '2':
+ return 2, nil
+ case '3':
+ return 3, nil
+ case '4':
+ return 4, nil
+ case '5':
+ return 5, nil
+ case '6':
+ return 6, nil
+ case '7':
+ return 7, nil
+ case '8':
+ return 8, nil
+ case '9':
+ return 9, nil
+ }
+ }
+ return internal.ParseUint(s, base, bitSize)
+}
+
+func ParseInt(s []byte, base int, bitSize int) (i int64, err error) {
+ if len(s) == 1 {
+ switch s[0] {
+ case '0':
+ return 0, nil
+ case '1':
+ return 1, nil
+ case '2':
+ return 2, nil
+ case '3':
+ return 3, nil
+ case '4':
+ return 4, nil
+ case '5':
+ return 5, nil
+ case '6':
+ return 6, nil
+ case '7':
+ return 7, nil
+ case '8':
+ return 8, nil
+ case '9':
+ return 9, nil
+ }
+ }
+ return internal.ParseInt(s, base, bitSize)
+}
diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/decimal.go b/vendor/github.com/pquerna/ffjson/fflib/v1/decimal.go
new file mode 100644
index 000000000..069df7a02
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/fflib/v1/decimal.go
@@ -0,0 +1,378 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Multiprecision decimal numbers.
+// For floating-point formatting only; not general purpose.
+// Only operations are assign and (binary) left/right shift.
+// Can do binary floating point in multiprecision decimal precisely
+// because 2 divides 10; cannot do decimal floating point
+// in multiprecision binary precisely.
+
+package v1
+
+type decimal struct {
+ d [800]byte // digits
+ nd int // number of digits used
+ dp int // decimal point
+ neg bool
+ trunc bool // discarded nonzero digits beyond d[:nd]
+}
+
+func (a *decimal) String() string {
+ n := 10 + a.nd
+ if a.dp > 0 {
+ n += a.dp
+ }
+ if a.dp < 0 {
+ n += -a.dp
+ }
+
+ buf := make([]byte, n)
+ w := 0
+ switch {
+ case a.nd == 0:
+ return "0"
+
+ case a.dp <= 0:
+ // zeros fill space between decimal point and digits
+ buf[w] = '0'
+ w++
+ buf[w] = '.'
+ w++
+ w += digitZero(buf[w : w+-a.dp])
+ w += copy(buf[w:], a.d[0:a.nd])
+
+ case a.dp < a.nd:
+ // decimal point in middle of digits
+ w += copy(buf[w:], a.d[0:a.dp])
+ buf[w] = '.'
+ w++
+ w += copy(buf[w:], a.d[a.dp:a.nd])
+
+ default:
+ // zeros fill space between digits and decimal point
+ w += copy(buf[w:], a.d[0:a.nd])
+ w += digitZero(buf[w : w+a.dp-a.nd])
+ }
+ return string(buf[0:w])
+}
+
+func digitZero(dst []byte) int {
+ for i := range dst {
+ dst[i] = '0'
+ }
+ return len(dst)
+}
+
+// trim trailing zeros from number.
+// (They are meaningless; the decimal point is tracked
+// independent of the number of digits.)
+func trim(a *decimal) {
+ for a.nd > 0 && a.d[a.nd-1] == '0' {
+ a.nd--
+ }
+ if a.nd == 0 {
+ a.dp = 0
+ }
+}
+
+// Assign v to a.
+func (a *decimal) Assign(v uint64) {
+ var buf [24]byte
+
+ // Write reversed decimal in buf.
+ n := 0
+ for v > 0 {
+ v1 := v / 10
+ v -= 10 * v1
+ buf[n] = byte(v + '0')
+ n++
+ v = v1
+ }
+
+ // Reverse again to produce forward decimal in a.d.
+ a.nd = 0
+ for n--; n >= 0; n-- {
+ a.d[a.nd] = buf[n]
+ a.nd++
+ }
+ a.dp = a.nd
+ trim(a)
+}
+
+// Maximum shift that we can do in one pass without overflow.
+// Signed int has 31 bits, and we have to be able to accommodate 9<<k.
+const maxShift = 27
+
+// Binary shift right (* 2) by k bits. k <= maxShift to avoid overflow.
+func rightShift(a *decimal, k uint) {
+ r := 0 // read pointer
+ w := 0 // write pointer
+
+ // Pick up enough leading digits to cover first shift.
+ n := 0
+ for ; n>>k == 0; r++ {
+ if r >= a.nd {
+ if n == 0 {
+ // a == 0; shouldn't get here, but handle anyway.
+ a.nd = 0
+ return
+ }
+ for n>>k == 0 {
+ n = n * 10
+ r++
+ }
+ break
+ }
+ c := int(a.d[r])
+ n = n*10 + c - '0'
+ }
+ a.dp -= r - 1
+
+ // Pick up a digit, put down a digit.
+ for ; r < a.nd; r++ {
+ c := int(a.d[r])
+ dig := n >> k
+ n -= dig << k
+ a.d[w] = byte(dig + '0')
+ w++
+ n = n*10 + c - '0'
+ }
+
+ // Put down extra digits.
+ for n > 0 {
+ dig := n >> k
+ n -= dig << k
+ if w < len(a.d) {
+ a.d[w] = byte(dig + '0')
+ w++
+ } else if dig > 0 {
+ a.trunc = true
+ }
+ n = n * 10
+ }
+
+ a.nd = w
+ trim(a)
+}
+
+// Cheat sheet for left shift: table indexed by shift count giving
+// number of new digits that will be introduced by that shift.
+//
+// For example, leftcheats[4] = {2, "625"}. That means that
+// if we are shifting by 4 (multiplying by 16), it will add 2 digits
+// when the string prefix is "625" through "999", and one fewer digit
+// if the string prefix is "000" through "624".
+//
+// Credit for this trick goes to Ken.
+
+type leftCheat struct {
+ delta int // number of new digits
+ cutoff string // minus one digit if original < a.
+}
+
+var leftcheats = []leftCheat{
+ // Leading digits of 1/2^i = 5^i.
+ // 5^23 is not an exact 64-bit floating point number,
+ // so have to use bc for the math.
+ /*
+ seq 27 | sed 's/^/5^/' | bc |
+ awk 'BEGIN{ print "\tleftCheat{ 0, \"\" }," }
+ {
+ log2 = log(2)/log(10)
+ printf("\tleftCheat{ %d, \"%s\" },\t// * %d\n",
+ int(log2*NR+1), $0, 2**NR)
+ }'
+ */
+ {0, ""},
+ {1, "5"}, // * 2
+ {1, "25"}, // * 4
+ {1, "125"}, // * 8
+ {2, "625"}, // * 16
+ {2, "3125"}, // * 32
+ {2, "15625"}, // * 64
+ {3, "78125"}, // * 128
+ {3, "390625"}, // * 256
+ {3, "1953125"}, // * 512
+ {4, "9765625"}, // * 1024
+ {4, "48828125"}, // * 2048
+ {4, "244140625"}, // * 4096
+ {4, "1220703125"}, // * 8192
+ {5, "6103515625"}, // * 16384
+ {5, "30517578125"}, // * 32768
+ {5, "152587890625"}, // * 65536
+ {6, "762939453125"}, // * 131072
+ {6, "3814697265625"}, // * 262144
+ {6, "19073486328125"}, // * 524288
+ {7, "95367431640625"}, // * 1048576
+ {7, "476837158203125"}, // * 2097152
+ {7, "2384185791015625"}, // * 4194304
+ {7, "11920928955078125"}, // * 8388608
+ {8, "59604644775390625"}, // * 16777216
+ {8, "298023223876953125"}, // * 33554432
+ {8, "1490116119384765625"}, // * 67108864
+ {9, "7450580596923828125"}, // * 134217728
+}
+
+// Is the leading prefix of b lexicographically less than s?
+func prefixIsLessThan(b []byte, s string) bool {
+ for i := 0; i < len(s); i++ {
+ if i >= len(b) {
+ return true
+ }
+ if b[i] != s[i] {
+ return b[i] < s[i]
+ }
+ }
+ return false
+}
+
+// Binary shift left (/ 2) by k bits. k <= maxShift to avoid overflow.
+func leftShift(a *decimal, k uint) {
+ delta := leftcheats[k].delta
+ if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) {
+ delta--
+ }
+
+ r := a.nd // read index
+ w := a.nd + delta // write index
+ n := 0
+
+ // Pick up a digit, put down a digit.
+ for r--; r >= 0; r-- {
+ n += (int(a.d[r]) - '0') << k
+ quo := n / 10
+ rem := n - 10*quo
+ w--
+ if w < len(a.d) {
+ a.d[w] = byte(rem + '0')
+ } else if rem != 0 {
+ a.trunc = true
+ }
+ n = quo
+ }
+
+ // Put down extra digits.
+ for n > 0 {
+ quo := n / 10
+ rem := n - 10*quo
+ w--
+ if w < len(a.d) {
+ a.d[w] = byte(rem + '0')
+ } else if rem != 0 {
+ a.trunc = true
+ }
+ n = quo
+ }
+
+ a.nd += delta
+ if a.nd >= len(a.d) {
+ a.nd = len(a.d)
+ }
+ a.dp += delta
+ trim(a)
+}
+
+// Binary shift left (k > 0) or right (k < 0).
+func (a *decimal) Shift(k int) {
+ switch {
+ case a.nd == 0:
+ // nothing to do: a == 0
+ case k > 0:
+ for k > maxShift {
+ leftShift(a, maxShift)
+ k -= maxShift
+ }
+ leftShift(a, uint(k))
+ case k < 0:
+ for k < -maxShift {
+ rightShift(a, maxShift)
+ k += maxShift
+ }
+ rightShift(a, uint(-k))
+ }
+}
+
+// If we chop a at nd digits, should we round up?
+func shouldRoundUp(a *decimal, nd int) bool {
+ if nd < 0 || nd >= a.nd {
+ return false
+ }
+ if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even
+ // if we truncated, a little higher than what's recorded - always round up
+ if a.trunc {
+ return true
+ }
+ return nd > 0 && (a.d[nd-1]-'0')%2 != 0
+ }
+ // not halfway - digit tells all
+ return a.d[nd] >= '5'
+}
+
+// Round a to nd digits (or fewer).
+// If nd is zero, it means we're rounding
+// just to the left of the digits, as in
+// 0.09 -> 0.1.
+func (a *decimal) Round(nd int) {
+ if nd < 0 || nd >= a.nd {
+ return
+ }
+ if shouldRoundUp(a, nd) {
+ a.RoundUp(nd)
+ } else {
+ a.RoundDown(nd)
+ }
+}
+
+// Round a down to nd digits (or fewer).
+func (a *decimal) RoundDown(nd int) {
+ if nd < 0 || nd >= a.nd {
+ return
+ }
+ a.nd = nd
+ trim(a)
+}
+
+// Round a up to nd digits (or fewer).
+func (a *decimal) RoundUp(nd int) {
+ if nd < 0 || nd >= a.nd {
+ return
+ }
+
+ // round up
+ for i := nd - 1; i >= 0; i-- {
+ c := a.d[i]
+ if c < '9' { // can stop after this digit
+ a.d[i]++
+ a.nd = i + 1
+ return
+ }
+ }
+
+ // Number is all 9s.
+ // Change to single 1 with adjusted decimal point.
+ a.d[0] = '1'
+ a.nd = 1
+ a.dp++
+}
+
+// Extract integer part, rounded appropriately.
+// No guarantees about overflow.
+func (a *decimal) RoundedInteger() uint64 {
+ if a.dp > 20 {
+ return 0xFFFFFFFFFFFFFFFF
+ }
+ var i int
+ n := uint64(0)
+ for i = 0; i < a.dp && i < a.nd; i++ {
+ n = n*10 + uint64(a.d[i]-'0')
+ }
+ for ; i < a.dp; i++ {
+ n *= 10
+ }
+ if shouldRoundUp(a, a.dp) {
+ n++
+ }
+ return n
+}
diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/extfloat.go b/vendor/github.com/pquerna/ffjson/fflib/v1/extfloat.go
new file mode 100644
index 000000000..508ddc6be
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/fflib/v1/extfloat.go
@@ -0,0 +1,668 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package v1
+
+// An extFloat represents an extended floating-point number, with more
+// precision than a float64. It does not try to save bits: the
+// number represented by the structure is mant*(2^exp), with a negative
+// sign if neg is true.
+type extFloat struct {
+ mant uint64
+ exp int
+ neg bool
+}
+
+// Powers of ten taken from double-conversion library.
+// http://code.google.com/p/double-conversion/
+const (
+ firstPowerOfTen = -348
+ stepPowerOfTen = 8
+)
+
+var smallPowersOfTen = [...]extFloat{
+ {1 << 63, -63, false}, // 1
+ {0xa << 60, -60, false}, // 1e1
+ {0x64 << 57, -57, false}, // 1e2
+ {0x3e8 << 54, -54, false}, // 1e3
+ {0x2710 << 50, -50, false}, // 1e4
+ {0x186a0 << 47, -47, false}, // 1e5
+ {0xf4240 << 44, -44, false}, // 1e6
+ {0x989680 << 40, -40, false}, // 1e7
+}
+
+var powersOfTen = [...]extFloat{
+ {0xfa8fd5a0081c0288, -1220, false}, // 10^-348
+ {0xbaaee17fa23ebf76, -1193, false}, // 10^-340
+ {0x8b16fb203055ac76, -1166, false}, // 10^-332
+ {0xcf42894a5dce35ea, -1140, false}, // 10^-324
+ {0x9a6bb0aa55653b2d, -1113, false}, // 10^-316
+ {0xe61acf033d1a45df, -1087, false}, // 10^-308
+ {0xab70fe17c79ac6ca, -1060, false}, // 10^-300
+ {0xff77b1fcbebcdc4f, -1034, false}, // 10^-292
+ {0xbe5691ef416bd60c, -1007, false}, // 10^-284
+ {0x8dd01fad907ffc3c, -980, false}, // 10^-276
+ {0xd3515c2831559a83, -954, false}, // 10^-268
+ {0x9d71ac8fada6c9b5, -927, false}, // 10^-260
+ {0xea9c227723ee8bcb, -901, false}, // 10^-252
+ {0xaecc49914078536d, -874, false}, // 10^-244
+ {0x823c12795db6ce57, -847, false}, // 10^-236
+ {0xc21094364dfb5637, -821, false}, // 10^-228
+ {0x9096ea6f3848984f, -794, false}, // 10^-220
+ {0xd77485cb25823ac7, -768, false}, // 10^-212
+ {0xa086cfcd97bf97f4, -741, false}, // 10^-204
+ {0xef340a98172aace5, -715, false}, // 10^-196
+ {0xb23867fb2a35b28e, -688, false}, // 10^-188
+ {0x84c8d4dfd2c63f3b, -661, false}, // 10^-180
+ {0xc5dd44271ad3cdba, -635, false}, // 10^-172
+ {0x936b9fcebb25c996, -608, false}, // 10^-164
+ {0xdbac6c247d62a584, -582, false}, // 10^-156
+ {0xa3ab66580d5fdaf6, -555, false}, // 10^-148
+ {0xf3e2f893dec3f126, -529, false}, // 10^-140
+ {0xb5b5ada8aaff80b8, -502, false}, // 10^-132
+ {0x87625f056c7c4a8b, -475, false}, // 10^-124
+ {0xc9bcff6034c13053, -449, false}, // 10^-116
+ {0x964e858c91ba2655, -422, false}, // 10^-108
+ {0xdff9772470297ebd, -396, false}, // 10^-100
+ {0xa6dfbd9fb8e5b88f, -369, false}, // 10^-92
+ {0xf8a95fcf88747d94, -343, false}, // 10^-84
+ {0xb94470938fa89bcf, -316, false}, // 10^-76
+ {0x8a08f0f8bf0f156b, -289, false}, // 10^-68
+ {0xcdb02555653131b6, -263, false}, // 10^-60
+ {0x993fe2c6d07b7fac, -236, false}, // 10^-52
+ {0xe45c10c42a2b3b06, -210, false}, // 10^-44
+ {0xaa242499697392d3, -183, false}, // 10^-36
+ {0xfd87b5f28300ca0e, -157, false}, // 10^-28
+ {0xbce5086492111aeb, -130, false}, // 10^-20
+ {0x8cbccc096f5088cc, -103, false}, // 10^-12
+ {0xd1b71758e219652c, -77, false}, // 10^-4
+ {0x9c40000000000000, -50, false}, // 10^4
+ {0xe8d4a51000000000, -24, false}, // 10^12
+ {0xad78ebc5ac620000, 3, false}, // 10^20
+ {0x813f3978f8940984, 30, false}, // 10^28
+ {0xc097ce7bc90715b3, 56, false}, // 10^36
+ {0x8f7e32ce7bea5c70, 83, false}, // 10^44
+ {0xd5d238a4abe98068, 109, false}, // 10^52
+ {0x9f4f2726179a2245, 136, false}, // 10^60
+ {0xed63a231d4c4fb27, 162, false}, // 10^68
+ {0xb0de65388cc8ada8, 189, false}, // 10^76
+ {0x83c7088e1aab65db, 216, false}, // 10^84
+ {0xc45d1df942711d9a, 242, false}, // 10^92
+ {0x924d692ca61be758, 269, false}, // 10^100
+ {0xda01ee641a708dea, 295, false}, // 10^108
+ {0xa26da3999aef774a, 322, false}, // 10^116
+ {0xf209787bb47d6b85, 348, false}, // 10^124
+ {0xb454e4a179dd1877, 375, false}, // 10^132
+ {0x865b86925b9bc5c2, 402, false}, // 10^140
+ {0xc83553c5c8965d3d, 428, false}, // 10^148
+ {0x952ab45cfa97a0b3, 455, false}, // 10^156
+ {0xde469fbd99a05fe3, 481, false}, // 10^164
+ {0xa59bc234db398c25, 508, false}, // 10^172
+ {0xf6c69a72a3989f5c, 534, false}, // 10^180
+ {0xb7dcbf5354e9bece, 561, false}, // 10^188
+ {0x88fcf317f22241e2, 588, false}, // 10^196
+ {0xcc20ce9bd35c78a5, 614, false}, // 10^204
+ {0x98165af37b2153df, 641, false}, // 10^212
+ {0xe2a0b5dc971f303a, 667, false}, // 10^220
+ {0xa8d9d1535ce3b396, 694, false}, // 10^228
+ {0xfb9b7cd9a4a7443c, 720, false}, // 10^236
+ {0xbb764c4ca7a44410, 747, false}, // 10^244
+ {0x8bab8eefb6409c1a, 774, false}, // 10^252
+ {0xd01fef10a657842c, 800, false}, // 10^260
+ {0x9b10a4e5e9913129, 827, false}, // 10^268
+ {0xe7109bfba19c0c9d, 853, false}, // 10^276
+ {0xac2820d9623bf429, 880, false}, // 10^284
+ {0x80444b5e7aa7cf85, 907, false}, // 10^292
+ {0xbf21e44003acdd2d, 933, false}, // 10^300
+ {0x8e679c2f5e44ff8f, 960, false}, // 10^308
+ {0xd433179d9c8cb841, 986, false}, // 10^316
+ {0x9e19db92b4e31ba9, 1013, false}, // 10^324
+ {0xeb96bf6ebadf77d9, 1039, false}, // 10^332
+ {0xaf87023b9bf0ee6b, 1066, false}, // 10^340
+}
+
+// floatBits returns the bits of the float64 that best approximates
+// the extFloat passed as receiver. Overflow is set to true if
+// the resulting float64 is ±Inf.
+func (f *extFloat) floatBits(flt *floatInfo) (bits uint64, overflow bool) {
+ f.Normalize()
+
+ exp := f.exp + 63
+
+ // Exponent too small.
+ if exp < flt.bias+1 {
+ n := flt.bias + 1 - exp
+ f.mant >>= uint(n)
+ exp += n
+ }
+
+ // Extract 1+flt.mantbits bits from the 64-bit mantissa.
+ mant := f.mant >> (63 - flt.mantbits)
+ if f.mant&(1<<(62-flt.mantbits)) != 0 {
+ // Round up.
+ mant += 1
+ }
+
+ // Rounding might have added a bit; shift down.
+ if mant == 2<<flt.mantbits {
+ mant >>= 1
+ exp++
+ }
+
+ // Infinities.
+ if exp-flt.bias >= 1<<flt.expbits-1 {
+ // ±Inf
+ mant = 0
+ exp = 1<<flt.expbits - 1 + flt.bias
+ overflow = true
+ } else if mant&(1<<flt.mantbits) == 0 {
+ // Denormalized?
+ exp = flt.bias
+ }
+ // Assemble bits.
+ bits = mant & (uint64(1)<<flt.mantbits - 1)
+ bits |= uint64((exp-flt.bias)&(1<<flt.expbits-1)) << flt.mantbits
+ if f.neg {
+ bits |= 1 << (flt.mantbits + flt.expbits)
+ }
+ return
+}
+
+// AssignComputeBounds sets f to the floating point value
+// defined by mant, exp and precision given by flt. It returns
+// lower, upper such that any number in the closed interval
+// [lower, upper] is converted back to the same floating point number.
+func (f *extFloat) AssignComputeBounds(mant uint64, exp int, neg bool, flt *floatInfo) (lower, upper extFloat) {
+ f.mant = mant
+ f.exp = exp - int(flt.mantbits)
+ f.neg = neg
+ if f.exp <= 0 && mant == (mant>>uint(-f.exp))<<uint(-f.exp) {
+ // An exact integer
+ f.mant >>= uint(-f.exp)
+ f.exp = 0
+ return *f, *f
+ }
+ expBiased := exp - flt.bias
+
+ upper = extFloat{mant: 2*f.mant + 1, exp: f.exp - 1, neg: f.neg}
+ if mant != 1<<flt.mantbits || expBiased == 1 {
+ lower = extFloat{mant: 2*f.mant - 1, exp: f.exp - 1, neg: f.neg}
+ } else {
+ lower = extFloat{mant: 4*f.mant - 1, exp: f.exp - 2, neg: f.neg}
+ }
+ return
+}
+
+// Normalize normalizes f so that the highest bit of the mantissa is
+// set, and returns the number by which the mantissa was left-shifted.
+func (f *extFloat) Normalize() (shift uint) {
+ mant, exp := f.mant, f.exp
+ if mant == 0 {
+ return 0
+ }
+ if mant>>(64-32) == 0 {
+ mant <<= 32
+ exp -= 32
+ }
+ if mant>>(64-16) == 0 {
+ mant <<= 16
+ exp -= 16
+ }
+ if mant>>(64-8) == 0 {
+ mant <<= 8
+ exp -= 8
+ }
+ if mant>>(64-4) == 0 {
+ mant <<= 4
+ exp -= 4
+ }
+ if mant>>(64-2) == 0 {
+ mant <<= 2
+ exp -= 2
+ }
+ if mant>>(64-1) == 0 {
+ mant <<= 1
+ exp -= 1
+ }
+ shift = uint(f.exp - exp)
+ f.mant, f.exp = mant, exp
+ return
+}
+
+// Multiply sets f to the product f*g: the result is correctly rounded,
+// but not normalized.
+func (f *extFloat) Multiply(g extFloat) {
+ fhi, flo := f.mant>>32, uint64(uint32(f.mant))
+ ghi, glo := g.mant>>32, uint64(uint32(g.mant))
+
+ // Cross products.
+ cross1 := fhi * glo
+ cross2 := flo * ghi
+
+ // f.mant*g.mant is fhi*ghi << 64 + (cross1+cross2) << 32 + flo*glo
+ f.mant = fhi*ghi + (cross1 >> 32) + (cross2 >> 32)
+ rem := uint64(uint32(cross1)) + uint64(uint32(cross2)) + ((flo * glo) >> 32)
+ // Round up.
+ rem += (1 << 31)
+
+ f.mant += (rem >> 32)
+ f.exp = f.exp + g.exp + 64
+}
+
+var uint64pow10 = [...]uint64{
+ 1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+}
+
+// AssignDecimal sets f to an approximate value mantissa*10^exp. It
+// returns true if the value represented by f is guaranteed to be the
+// best approximation of d after being rounded to a float64 or
+// float32 depending on flt.
+func (f *extFloat) AssignDecimal(mantissa uint64, exp10 int, neg bool, trunc bool, flt *floatInfo) (ok bool) {
+ const uint64digits = 19
+ const errorscale = 8
+ errors := 0 // An upper bound for error, computed in errorscale*ulp.
+ if trunc {
+ // the decimal number was truncated.
+ errors += errorscale / 2
+ }
+
+ f.mant = mantissa
+ f.exp = 0
+ f.neg = neg
+
+ // Multiply by powers of ten.
+ i := (exp10 - firstPowerOfTen) / stepPowerOfTen
+ if exp10 < firstPowerOfTen || i >= len(powersOfTen) {
+ return false
+ }
+ adjExp := (exp10 - firstPowerOfTen) % stepPowerOfTen
+
+ // We multiply by exp%step
+ if adjExp < uint64digits && mantissa < uint64pow10[uint64digits-adjExp] {
+ // We can multiply the mantissa exactly.
+ f.mant *= uint64pow10[adjExp]
+ f.Normalize()
+ } else {
+ f.Normalize()
+ f.Multiply(smallPowersOfTen[adjExp])
+ errors += errorscale / 2
+ }
+
+ // We multiply by 10 to the exp - exp%step.
+ f.Multiply(powersOfTen[i])
+ if errors > 0 {
+ errors += 1
+ }
+ errors += errorscale / 2
+
+ // Normalize
+ shift := f.Normalize()
+ errors <<= shift
+
+ // Now f is a good approximation of the decimal.
+ // Check whether the error is too large: that is, if the mantissa
+ // is perturbated by the error, the resulting float64 will change.
+ // The 64 bits mantissa is 1 + 52 bits for float64 + 11 extra bits.
+ //
+ // In many cases the approximation will be good enough.
+ denormalExp := flt.bias - 63
+ var extrabits uint
+ if f.exp <= denormalExp {
+ // f.mant * 2^f.exp is smaller than 2^(flt.bias+1).
+ extrabits = uint(63 - flt.mantbits + 1 + uint(denormalExp-f.exp))
+ } else {
+ extrabits = uint(63 - flt.mantbits)
+ }
+
+ halfway := uint64(1) << (extrabits - 1)
+ mant_extra := f.mant & (1<<extrabits - 1)
+
+ // Do a signed comparison here! If the error estimate could make
+ // the mantissa round differently for the conversion to double,
+ // then we can't give a definite answer.
+ if int64(halfway)-int64(errors) < int64(mant_extra) &&
+ int64(mant_extra) < int64(halfway)+int64(errors) {
+ return false
+ }
+ return true
+}
+
+// Frexp10 is an analogue of math.Frexp for decimal powers. It scales
+// f by an approximate power of ten 10^-exp, and returns exp10, so
+// that f*10^exp10 has the same value as the old f, up to an ulp,
+// as well as the index of 10^-exp in the powersOfTen table.
+func (f *extFloat) frexp10() (exp10, index int) {
+ // The constants expMin and expMax constrain the final value of the
+ // binary exponent of f. We want a small integral part in the result
+ // because finding digits of an integer requires divisions, whereas
+ // digits of the fractional part can be found by repeatedly multiplying
+ // by 10.
+ const expMin = -60
+ const expMax = -32
+ // Find power of ten such that x * 10^n has a binary exponent
+ // between expMin and expMax.
+ approxExp10 := ((expMin+expMax)/2 - f.exp) * 28 / 93 // log(10)/log(2) is close to 93/28.
+ i := (approxExp10 - firstPowerOfTen) / stepPowerOfTen
+Loop:
+ for {
+ exp := f.exp + powersOfTen[i].exp + 64
+ switch {
+ case exp < expMin:
+ i++
+ case exp > expMax:
+ i--
+ default:
+ break Loop
+ }
+ }
+ // Apply the desired decimal shift on f. It will have exponent
+ // in the desired range. This is multiplication by 10^-exp10.
+ f.Multiply(powersOfTen[i])
+
+ return -(firstPowerOfTen + i*stepPowerOfTen), i
+}
+
+// frexp10Many applies a common shift by a power of ten to a, b, c.
+func frexp10Many(a, b, c *extFloat) (exp10 int) {
+ exp10, i := c.frexp10()
+ a.Multiply(powersOfTen[i])
+ b.Multiply(powersOfTen[i])
+ return
+}
+
+// FixedDecimal stores in d the first n significant digits
+// of the decimal representation of f. It returns false
+// if it cannot be sure of the answer.
+func (f *extFloat) FixedDecimal(d *decimalSlice, n int) bool {
+ if f.mant == 0 {
+ d.nd = 0
+ d.dp = 0
+ d.neg = f.neg
+ return true
+ }
+ if n == 0 {
+ panic("strconv: internal error: extFloat.FixedDecimal called with n == 0")
+ }
+ // Multiply by an appropriate power of ten to have a reasonable
+ // number to process.
+ f.Normalize()
+ exp10, _ := f.frexp10()
+
+ shift := uint(-f.exp)
+ integer := uint32(f.mant >> shift)
+ fraction := f.mant - (uint64(integer) << shift)
+ ε := uint64(1) // ε is the uncertainty we have on the mantissa of f.
+
+ // Write exactly n digits to d.
+ needed := n // how many digits are left to write.
+ integerDigits := 0 // the number of decimal digits of integer.
+ pow10 := uint64(1) // the power of ten by which f was scaled.
+ for i, pow := 0, uint64(1); i < 20; i++ {
+ if pow > uint64(integer) {
+ integerDigits = i
+ break
+ }
+ pow *= 10
+ }
+ rest := integer
+ if integerDigits > needed {
+ // the integral part is already large, trim the last digits.
+ pow10 = uint64pow10[integerDigits-needed]
+ integer /= uint32(pow10)
+ rest -= integer * uint32(pow10)
+ } else {
+ rest = 0
+ }
+
+ // Write the digits of integer: the digits of rest are omitted.
+ var buf [32]byte
+ pos := len(buf)
+ for v := integer; v > 0; {
+ v1 := v / 10
+ v -= 10 * v1
+ pos--
+ buf[pos] = byte(v + '0')
+ v = v1
+ }
+ for i := pos; i < len(buf); i++ {
+ d.d[i-pos] = buf[i]
+ }
+ nd := len(buf) - pos
+ d.nd = nd
+ d.dp = integerDigits + exp10
+ needed -= nd
+
+ if needed > 0 {
+ if rest != 0 || pow10 != 1 {
+ panic("strconv: internal error, rest != 0 but needed > 0")
+ }
+ // Emit digits for the fractional part. Each time, 10*fraction
+ // fits in a uint64 without overflow.
+ for needed > 0 {
+ fraction *= 10
+ ε *= 10 // the uncertainty scales as we multiply by ten.
+ if 2*ε > 1<<shift {
+ // the error is so large it could modify which digit to write, abort.
+ return false
+ }
+ digit := fraction >> shift
+ d.d[nd] = byte(digit + '0')
+ fraction -= digit << shift
+ nd++
+ needed--
+ }
+ d.nd = nd
+ }
+
+ // We have written a truncation of f (a numerator / 10^d.dp). The remaining part
+ // can be interpreted as a small number (< 1) to be added to the last digit of the
+ // numerator.
+ //
+ // If rest > 0, the amount is:
+ // (rest<<shift | fraction) / (pow10 << shift)
+ // fraction being known with a ±ε uncertainty.
+ // The fact that n > 0 guarantees that pow10 << shift does not overflow a uint64.
+ //
+ // If rest = 0, pow10 == 1 and the amount is
+ // fraction / (1 << shift)
+ // fraction being known with a ±ε uncertainty.
+ //
+ // We pass this information to the rounding routine for adjustment.
+
+ ok := adjustLastDigitFixed(d, uint64(rest)<<shift|fraction, pow10, shift, ε)
+ if !ok {
+ return false
+ }
+ // Trim trailing zeros.
+ for i := d.nd - 1; i >= 0; i-- {
+ if d.d[i] != '0' {
+ d.nd = i + 1
+ break
+ }
+ }
+ return true
+}
+
+// adjustLastDigitFixed assumes d contains the representation of the integral part
+// of some number, whose fractional part is num / (den << shift). The numerator
+// num is only known up to an uncertainty of size ε, assumed to be less than
+// (den << shift)/2.
+//
+// It will increase the last digit by one to account for correct rounding, typically
+// when the fractional part is greater than 1/2, and will return false if ε is such
+// that no correct answer can be given.
+func adjustLastDigitFixed(d *decimalSlice, num, den uint64, shift uint, ε uint64) bool {
+ if num > den<<shift {
+ panic("strconv: num > den<<shift in adjustLastDigitFixed")
+ }
+ if 2*ε > den<<shift {
+ panic("strconv: ε > (den<<shift)/2")
+ }
+ if 2*(num+ε) < den<<shift {
+ return true
+ }
+ if 2*(num-ε) > den<<shift {
+ // increment d by 1.
+ i := d.nd - 1
+ for ; i >= 0; i-- {
+ if d.d[i] == '9' {
+ d.nd--
+ } else {
+ break
+ }
+ }
+ if i < 0 {
+ d.d[0] = '1'
+ d.nd = 1
+ d.dp++
+ } else {
+ d.d[i]++
+ }
+ return true
+ }
+ return false
+}
+
+// ShortestDecimal stores in d the shortest decimal representation of f
+// which belongs to the open interval (lower, upper), where f is supposed
+// to lie. It returns false whenever the result is unsure. The implementation
+// uses the Grisu3 algorithm.
+func (f *extFloat) ShortestDecimal(d *decimalSlice, lower, upper *extFloat) bool {
+ if f.mant == 0 {
+ d.nd = 0
+ d.dp = 0
+ d.neg = f.neg
+ return true
+ }
+ if f.exp == 0 && *lower == *f && *lower == *upper {
+ // an exact integer.
+ var buf [24]byte
+ n := len(buf) - 1
+ for v := f.mant; v > 0; {
+ v1 := v / 10
+ v -= 10 * v1
+ buf[n] = byte(v + '0')
+ n--
+ v = v1
+ }
+ nd := len(buf) - n - 1
+ for i := 0; i < nd; i++ {
+ d.d[i] = buf[n+1+i]
+ }
+ d.nd, d.dp = nd, nd
+ for d.nd > 0 && d.d[d.nd-1] == '0' {
+ d.nd--
+ }
+ if d.nd == 0 {
+ d.dp = 0
+ }
+ d.neg = f.neg
+ return true
+ }
+ upper.Normalize()
+ // Uniformize exponents.
+ if f.exp > upper.exp {
+ f.mant <<= uint(f.exp - upper.exp)
+ f.exp = upper.exp
+ }
+ if lower.exp > upper.exp {
+ lower.mant <<= uint(lower.exp - upper.exp)
+ lower.exp = upper.exp
+ }
+
+ exp10 := frexp10Many(lower, f, upper)
+ // Take a safety margin due to rounding in frexp10Many, but we lose precision.
+ upper.mant++
+ lower.mant--
+
+ // The shortest representation of f is either rounded up or down, but
+ // in any case, it is a truncation of upper.
+ shift := uint(-upper.exp)
+ integer := uint32(upper.mant >> shift)
+ fraction := upper.mant - (uint64(integer) << shift)
+
+ // How far we can go down from upper until the result is wrong.
+ allowance := upper.mant - lower.mant
+ // How far we should go to get a very precise result.
+ targetDiff := upper.mant - f.mant
+
+ // Count integral digits: there are at most 10.
+ var integerDigits int
+ for i, pow := 0, uint64(1); i < 20; i++ {
+ if pow > uint64(integer) {
+ integerDigits = i
+ break
+ }
+ pow *= 10
+ }
+ for i := 0; i < integerDigits; i++ {
+ pow := uint64pow10[integerDigits-i-1]
+ digit := integer / uint32(pow)
+ d.d[i] = byte(digit + '0')
+ integer -= digit * uint32(pow)
+ // evaluate whether we should stop.
+ if currentDiff := uint64(integer)<<shift + fraction; currentDiff < allowance {
+ d.nd = i + 1
+ d.dp = integerDigits + exp10
+ d.neg = f.neg
+ // Sometimes allowance is so large the last digit might need to be
+ // decremented to get closer to f.
+ return adjustLastDigit(d, currentDiff, targetDiff, allowance, pow<<shift, 2)
+ }
+ }
+ d.nd = integerDigits
+ d.dp = d.nd + exp10
+ d.neg = f.neg
+
+ // Compute digits of the fractional part. At each step fraction does not
+ // overflow. The choice of minExp implies that fraction is less than 2^60.
+ var digit int
+ multiplier := uint64(1)
+ for {
+ fraction *= 10
+ multiplier *= 10
+ digit = int(fraction >> shift)
+ d.d[d.nd] = byte(digit + '0')
+ d.nd++
+ fraction -= uint64(digit) << shift
+ if fraction < allowance*multiplier {
+ // We are in the admissible range. Note that if allowance is about to
+ // overflow, that is, allowance > 2^64/10, the condition is automatically
+ // true due to the limited range of fraction.
+ return adjustLastDigit(d,
+ fraction, targetDiff*multiplier, allowance*multiplier,
+ 1<<shift, multiplier*2)
+ }
+ }
+}
+
+// adjustLastDigit modifies d = x-currentDiff*ε, to get closest to
+// d = x-targetDiff*ε, without becoming smaller than x-maxDiff*ε.
+// It assumes that a decimal digit is worth ulpDecimal*ε, and that
+// all data is known with a error estimate of ulpBinary*ε.
+func adjustLastDigit(d *decimalSlice, currentDiff, targetDiff, maxDiff, ulpDecimal, ulpBinary uint64) bool {
+ if ulpDecimal < 2*ulpBinary {
+ // Approximation is too wide.
+ return false
+ }
+ for currentDiff+ulpDecimal/2+ulpBinary < targetDiff {
+ d.d[d.nd-1]--
+ currentDiff += ulpDecimal
+ }
+ if currentDiff+ulpDecimal <= targetDiff+ulpDecimal/2+ulpBinary {
+ // we have two choices, and don't know what to do.
+ return false
+ }
+ if currentDiff < ulpBinary || currentDiff > maxDiff-ulpBinary {
+ // we went too far
+ return false
+ }
+ if d.nd == 1 && d.d[0] == '0' {
+ // the number has actually reached zero.
+ d.nd = 0
+ d.dp = 0
+ }
+ return true
+}
diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/fold.go b/vendor/github.com/pquerna/ffjson/fflib/v1/fold.go
new file mode 100644
index 000000000..4d33e6f77
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/fflib/v1/fold.go
@@ -0,0 +1,121 @@
+/**
+ * Copyright 2014 Paul Querna
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/* Portions of this file are on Go stdlib's encoding/json/fold.go */
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package v1
+
+import (
+ "unicode/utf8"
+)
+
+const (
+ caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
+ kelvin = '\u212a'
+ smallLongEss = '\u017f'
+)
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func EqualFoldRight(s, t []byte) bool {
+ for _, sb := range s {
+ if len(t) == 0 {
+ return false
+ }
+ tb := t[0]
+ if tb < utf8.RuneSelf {
+ if sb != tb {
+ sbUpper := sb & caseMask
+ if 'A' <= sbUpper && sbUpper <= 'Z' {
+ if sbUpper != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ t = t[1:]
+ continue
+ }
+ // sb is ASCII and t is not. t must be either kelvin
+ // sign or long s; sb must be s, S, k, or K.
+ tr, size := utf8.DecodeRune(t)
+ switch sb {
+ case 's', 'S':
+ if tr != smallLongEss {
+ return false
+ }
+ case 'k', 'K':
+ if tr != kelvin {
+ return false
+ }
+ default:
+ return false
+ }
+ t = t[size:]
+
+ }
+ if len(t) > 0 {
+ return false
+ }
+ return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func AsciiEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, sb := range s {
+ tb := t[i]
+ if sb == tb {
+ continue
+ }
+ if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+ if sb&caseMask != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func SimpleLetterEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, b := range s {
+ if b&caseMask != t[i]&caseMask {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/ftoa.go b/vendor/github.com/pquerna/ffjson/fflib/v1/ftoa.go
new file mode 100644
index 000000000..360d6dbcf
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/fflib/v1/ftoa.go
@@ -0,0 +1,542 @@
+package v1
+
+/**
+ * Copyright 2015 Paul Querna, Klaus Post
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/* Most of this file are on Go stdlib's strconv/ftoa.go */
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+import "math"
+
+// TODO: move elsewhere?
+type floatInfo struct {
+ mantbits uint
+ expbits uint
+ bias int
+}
+
+var optimize = true // can change for testing
+
+var float32info = floatInfo{23, 8, -127}
+var float64info = floatInfo{52, 11, -1023}
+
+// AppendFloat appends the string form of the floating-point number f,
+// as generated by FormatFloat
+func AppendFloat(dst EncodingBuffer, val float64, fmt byte, prec, bitSize int) {
+ var bits uint64
+ var flt *floatInfo
+ switch bitSize {
+ case 32:
+ bits = uint64(math.Float32bits(float32(val)))
+ flt = &float32info
+ case 64:
+ bits = math.Float64bits(val)
+ flt = &float64info
+ default:
+ panic("strconv: illegal AppendFloat/FormatFloat bitSize")
+ }
+
+ neg := bits>>(flt.expbits+flt.mantbits) != 0
+ exp := int(bits>>flt.mantbits) & (1<<flt.expbits - 1)
+ mant := bits & (uint64(1)<<flt.mantbits - 1)
+
+ switch exp {
+ case 1<<flt.expbits - 1:
+ // Inf, NaN
+ var s string
+ switch {
+ case mant != 0:
+ s = "NaN"
+ case neg:
+ s = "-Inf"
+ default:
+ s = "+Inf"
+ }
+ dst.WriteString(s)
+ return
+
+ case 0:
+ // denormalized
+ exp++
+
+ default:
+ // add implicit top bit
+ mant |= uint64(1) << flt.mantbits
+ }
+ exp += flt.bias
+
+ // Pick off easy binary format.
+ if fmt == 'b' {
+ fmtB(dst, neg, mant, exp, flt)
+ return
+ }
+
+ if !optimize {
+ bigFtoa(dst, prec, fmt, neg, mant, exp, flt)
+ return
+ }
+
+ var digs decimalSlice
+ ok := false
+ // Negative precision means "only as much as needed to be exact."
+ shortest := prec < 0
+ if shortest {
+ // Try Grisu3 algorithm.
+ f := new(extFloat)
+ lower, upper := f.AssignComputeBounds(mant, exp, neg, flt)
+ var buf [32]byte
+ digs.d = buf[:]
+ ok = f.ShortestDecimal(&digs, &lower, &upper)
+ if !ok {
+ bigFtoa(dst, prec, fmt, neg, mant, exp, flt)
+ return
+ }
+ // Precision for shortest representation mode.
+ switch fmt {
+ case 'e', 'E':
+ prec = max(digs.nd-1, 0)
+ case 'f':
+ prec = max(digs.nd-digs.dp, 0)
+ case 'g', 'G':
+ prec = digs.nd
+ }
+ } else if fmt != 'f' {
+ // Fixed number of digits.
+ digits := prec
+ switch fmt {
+ case 'e', 'E':
+ digits++
+ case 'g', 'G':
+ if prec == 0 {
+ prec = 1
+ }
+ digits = prec
+ }
+ if digits <= 15 {
+ // try fast algorithm when the number of digits is reasonable.
+ var buf [24]byte
+ digs.d = buf[:]
+ f := extFloat{mant, exp - int(flt.mantbits), neg}
+ ok = f.FixedDecimal(&digs, digits)
+ }
+ }
+ if !ok {
+ bigFtoa(dst, prec, fmt, neg, mant, exp, flt)
+ return
+ }
+ formatDigits(dst, shortest, neg, digs, prec, fmt)
+ return
+}
+
+// bigFtoa uses multiprecision computations to format a float.
+func bigFtoa(dst EncodingBuffer, prec int, fmt byte, neg bool, mant uint64, exp int, flt *floatInfo) {
+ d := new(decimal)
+ d.Assign(mant)
+ d.Shift(exp - int(flt.mantbits))
+ var digs decimalSlice
+ shortest := prec < 0
+ if shortest {
+ roundShortest(d, mant, exp, flt)
+ digs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp}
+ // Precision for shortest representation mode.
+ switch fmt {
+ case 'e', 'E':
+ prec = digs.nd - 1
+ case 'f':
+ prec = max(digs.nd-digs.dp, 0)
+ case 'g', 'G':
+ prec = digs.nd
+ }
+ } else {
+ // Round appropriately.
+ switch fmt {
+ case 'e', 'E':
+ d.Round(prec + 1)
+ case 'f':
+ d.Round(d.dp + prec)
+ case 'g', 'G':
+ if prec == 0 {
+ prec = 1
+ }
+ d.Round(prec)
+ }
+ digs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp}
+ }
+ formatDigits(dst, shortest, neg, digs, prec, fmt)
+ return
+}
+
+func formatDigits(dst EncodingBuffer, shortest bool, neg bool, digs decimalSlice, prec int, fmt byte) {
+ switch fmt {
+ case 'e', 'E':
+ fmtE(dst, neg, digs, prec, fmt)
+ return
+ case 'f':
+ fmtF(dst, neg, digs, prec)
+ return
+ case 'g', 'G':
+ // trailing fractional zeros in 'e' form will be trimmed.
+ eprec := prec
+ if eprec > digs.nd && digs.nd >= digs.dp {
+ eprec = digs.nd
+ }
+ // %e is used if the exponent from the conversion
+ // is less than -4 or greater than or equal to the precision.
+ // if precision was the shortest possible, use precision 6 for this decision.
+ if shortest {
+ eprec = 6
+ }
+ exp := digs.dp - 1
+ if exp < -4 || exp >= eprec {
+ if prec > digs.nd {
+ prec = digs.nd
+ }
+ fmtE(dst, neg, digs, prec-1, fmt+'e'-'g')
+ return
+ }
+ if prec > digs.dp {
+ prec = digs.nd
+ }
+ fmtF(dst, neg, digs, max(prec-digs.dp, 0))
+ return
+ }
+
+ // unknown format
+ dst.Write([]byte{'%', fmt})
+ return
+}
+
+// Round d (= mant * 2^exp) to the shortest number of digits
+// that will let the original floating point value be precisely
+// reconstructed. Size is original floating point size (64 or 32).
+func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) {
+ // If mantissa is zero, the number is zero; stop now.
+ if mant == 0 {
+ d.nd = 0
+ return
+ }
+
+ // Compute upper and lower such that any decimal number
+ // between upper and lower (possibly inclusive)
+ // will round to the original floating point number.
+
+ // We may see at once that the number is already shortest.
+ //
+ // Suppose d is not denormal, so that 2^exp <= d < 10^dp.
+ // The closest shorter number is at least 10^(dp-nd) away.
+ // The lower/upper bounds computed below are at distance
+ // at most 2^(exp-mantbits).
+ //
+ // So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits),
+ // or equivalently log2(10)*(dp-nd) > exp-mantbits.
+ // It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32).
+ minexp := flt.bias + 1 // minimum possible exponent
+ if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) {
+ // The number is already shortest.
+ return
+ }
+
+ // d = mant << (exp - mantbits)
+ // Next highest floating point number is mant+1 << exp-mantbits.
+ // Our upper bound is halfway between, mant*2+1 << exp-mantbits-1.
+ upper := new(decimal)
+ upper.Assign(mant*2 + 1)
+ upper.Shift(exp - int(flt.mantbits) - 1)
+
+ // d = mant << (exp - mantbits)
+ // Next lowest floating point number is mant-1 << exp-mantbits,
+ // unless mant-1 drops the significant bit and exp is not the minimum exp,
+ // in which case the next lowest is mant*2-1 << exp-mantbits-1.
+ // Either way, call it mantlo << explo-mantbits.
+ // Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1.
+ var mantlo uint64
+ var explo int
+ if mant > 1<<flt.mantbits || exp == minexp {
+ mantlo = mant - 1
+ explo = exp
+ } else {
+ mantlo = mant*2 - 1
+ explo = exp - 1
+ }
+ lower := new(decimal)
+ lower.Assign(mantlo*2 + 1)
+ lower.Shift(explo - int(flt.mantbits) - 1)
+
+ // The upper and lower bounds are possible outputs only if
+ // the original mantissa is even, so that IEEE round-to-even
+ // would round to the original mantissa and not the neighbors.
+ inclusive := mant%2 == 0
+
+ // Now we can figure out the minimum number of digits required.
+ // Walk along until d has distinguished itself from upper and lower.
+ for i := 0; i < d.nd; i++ {
+ var l, m, u byte // lower, middle, upper digits
+ if i < lower.nd {
+ l = lower.d[i]
+ } else {
+ l = '0'
+ }
+ m = d.d[i]
+ if i < upper.nd {
+ u = upper.d[i]
+ } else {
+ u = '0'
+ }
+
+ // Okay to round down (truncate) if lower has a different digit
+ // or if lower is inclusive and is exactly the result of rounding down.
+ okdown := l != m || (inclusive && l == m && i+1 == lower.nd)
+
+ // Okay to round up if upper has a different digit and
+ // either upper is inclusive or upper is bigger than the result of rounding up.
+ okup := m != u && (inclusive || m+1 < u || i+1 < upper.nd)
+
+ // If it's okay to do either, then round to the nearest one.
+ // If it's okay to do only one, do it.
+ switch {
+ case okdown && okup:
+ d.Round(i + 1)
+ return
+ case okdown:
+ d.RoundDown(i + 1)
+ return
+ case okup:
+ d.RoundUp(i + 1)
+ return
+ }
+ }
+}
+
+type decimalSlice struct {
+ d []byte
+ nd, dp int
+ neg bool
+}
+
+// %e: -d.ddddde±dd
+func fmtE(dst EncodingBuffer, neg bool, d decimalSlice, prec int, fmt byte) {
+ // sign
+ if neg {
+ dst.WriteByte('-')
+ }
+
+ // first digit
+ ch := byte('0')
+ if d.nd != 0 {
+ ch = d.d[0]
+ }
+ dst.WriteByte(ch)
+
+ // .moredigits
+ if prec > 0 {
+ dst.WriteByte('.')
+ i := 1
+ m := min(d.nd, prec+1)
+ if i < m {
+ dst.Write(d.d[i:m])
+ i = m
+ }
+ for i <= prec {
+ dst.WriteByte('0')
+ i++
+ }
+ }
+
+ // e±
+ dst.WriteByte(fmt)
+ exp := d.dp - 1
+ if d.nd == 0 { // special case: 0 has exponent 0
+ exp = 0
+ }
+ if exp < 0 {
+ ch = '-'
+ exp = -exp
+ } else {
+ ch = '+'
+ }
+ dst.WriteByte(ch)
+
+ // dd or ddd
+ switch {
+ case exp < 10:
+ dst.WriteByte('0')
+ dst.WriteByte(byte(exp) + '0')
+ case exp < 100:
+ dst.WriteByte(byte(exp/10) + '0')
+ dst.WriteByte(byte(exp%10) + '0')
+ default:
+ dst.WriteByte(byte(exp/100) + '0')
+ dst.WriteByte(byte(exp/10)%10 + '0')
+ dst.WriteByte(byte(exp%10) + '0')
+ }
+
+ return
+}
+
+// %f: -ddddddd.ddddd
+func fmtF(dst EncodingBuffer, neg bool, d decimalSlice, prec int) {
+ // sign
+ if neg {
+ dst.WriteByte('-')
+ }
+
+ // integer, padded with zeros as needed.
+ if d.dp > 0 {
+ m := min(d.nd, d.dp)
+ dst.Write(d.d[:m])
+ for ; m < d.dp; m++ {
+ dst.WriteByte('0')
+ }
+ } else {
+ dst.WriteByte('0')
+ }
+
+ // fraction
+ if prec > 0 {
+ dst.WriteByte('.')
+ for i := 0; i < prec; i++ {
+ ch := byte('0')
+ if j := d.dp + i; 0 <= j && j < d.nd {
+ ch = d.d[j]
+ }
+ dst.WriteByte(ch)
+ }
+ }
+
+ return
+}
+
+// %b: -ddddddddp±ddd
+func fmtB(dst EncodingBuffer, neg bool, mant uint64, exp int, flt *floatInfo) {
+ // sign
+ if neg {
+ dst.WriteByte('-')
+ }
+
+ // mantissa
+ formatBits(dst, mant, 10, false)
+
+ // p
+ dst.WriteByte('p')
+
+ // ±exponent
+ exp -= int(flt.mantbits)
+ if exp >= 0 {
+ dst.WriteByte('+')
+ }
+ formatBits(dst, uint64(exp), 10, exp < 0)
+
+ return
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// formatBits computes the string representation of u in the given base.
+// If neg is set, u is treated as negative int64 value.
+func formatBits(dst EncodingBuffer, u uint64, base int, neg bool) {
+ if base < 2 || base > len(digits) {
+ panic("strconv: illegal AppendInt/FormatInt base")
+ }
+ // 2 <= base && base <= len(digits)
+
+ var a [64 + 1]byte // +1 for sign of 64bit value in base 2
+ i := len(a)
+
+ if neg {
+ u = -u
+ }
+
+ // convert bits
+ if base == 10 {
+ // common case: use constants for / because
+ // the compiler can optimize it into a multiply+shift
+
+ if ^uintptr(0)>>32 == 0 {
+ for u > uint64(^uintptr(0)) {
+ q := u / 1e9
+ us := uintptr(u - q*1e9) // us % 1e9 fits into a uintptr
+ for j := 9; j > 0; j-- {
+ i--
+ qs := us / 10
+ a[i] = byte(us - qs*10 + '0')
+ us = qs
+ }
+ u = q
+ }
+ }
+
+ // u guaranteed to fit into a uintptr
+ us := uintptr(u)
+ for us >= 10 {
+ i--
+ q := us / 10
+ a[i] = byte(us - q*10 + '0')
+ us = q
+ }
+ // u < 10
+ i--
+ a[i] = byte(us + '0')
+
+ } else if s := shifts[base]; s > 0 {
+ // base is power of 2: use shifts and masks instead of / and %
+ b := uint64(base)
+ m := uintptr(b) - 1 // == 1<<s - 1
+ for u >= b {
+ i--
+ a[i] = digits[uintptr(u)&m]
+ u >>= s
+ }
+ // u < base
+ i--
+ a[i] = digits[uintptr(u)]
+
+ } else {
+ // general case
+ b := uint64(base)
+ for u >= b {
+ i--
+ q := u / b
+ a[i] = digits[uintptr(u-q*b)]
+ u = q
+ }
+ // u < base
+ i--
+ a[i] = digits[uintptr(u)]
+ }
+
+ // add sign, if any
+ if neg {
+ i--
+ a[i] = '-'
+ }
+
+ dst.Write(a[i:])
+}
diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atof.go b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atof.go
new file mode 100644
index 000000000..46c1289ec
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atof.go
@@ -0,0 +1,936 @@
+/**
+ * Copyright 2014 Paul Querna
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/* Portions of this file are on Go stdlib's strconv/atof.go */
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+// decimal to binary floating point conversion.
+// Algorithm:
+// 1) Store input in multiprecision decimal.
+// 2) Multiply/divide decimal by powers of two until in range [0.5, 1)
+// 3) Multiply by 2^precision and round to get mantissa.
+
+import "math"
+
+var optimize = true // can change for testing
+
+func equalIgnoreCase(s1 []byte, s2 []byte) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := 0; i < len(s1); i++ {
+ c1 := s1[i]
+ if 'A' <= c1 && c1 <= 'Z' {
+ c1 += 'a' - 'A'
+ }
+ c2 := s2[i]
+ if 'A' <= c2 && c2 <= 'Z' {
+ c2 += 'a' - 'A'
+ }
+ if c1 != c2 {
+ return false
+ }
+ }
+ return true
+}
+
+func special(s []byte) (f float64, ok bool) {
+ if len(s) == 0 {
+ return
+ }
+ switch s[0] {
+ default:
+ return
+ case '+':
+ if equalIgnoreCase(s, []byte("+inf")) || equalIgnoreCase(s, []byte("+infinity")) {
+ return math.Inf(1), true
+ }
+ case '-':
+ if equalIgnoreCase(s, []byte("-inf")) || equalIgnoreCase(s, []byte("-infinity")) {
+ return math.Inf(-1), true
+ }
+ case 'n', 'N':
+ if equalIgnoreCase(s, []byte("nan")) {
+ return math.NaN(), true
+ }
+ case 'i', 'I':
+ if equalIgnoreCase(s, []byte("inf")) || equalIgnoreCase(s, []byte("infinity")) {
+ return math.Inf(1), true
+ }
+ }
+ return
+}
+
+func (b *decimal) set(s []byte) (ok bool) {
+ i := 0
+ b.neg = false
+ b.trunc = false
+
+ // optional sign
+ if i >= len(s) {
+ return
+ }
+ switch {
+ case s[i] == '+':
+ i++
+ case s[i] == '-':
+ b.neg = true
+ i++
+ }
+
+ // digits
+ sawdot := false
+ sawdigits := false
+ for ; i < len(s); i++ {
+ switch {
+ case s[i] == '.':
+ if sawdot {
+ return
+ }
+ sawdot = true
+ b.dp = b.nd
+ continue
+
+ case '0' <= s[i] && s[i] <= '9':
+ sawdigits = true
+ if s[i] == '0' && b.nd == 0 { // ignore leading zeros
+ b.dp--
+ continue
+ }
+ if b.nd < len(b.d) {
+ b.d[b.nd] = s[i]
+ b.nd++
+ } else if s[i] != '0' {
+ b.trunc = true
+ }
+ continue
+ }
+ break
+ }
+ if !sawdigits {
+ return
+ }
+ if !sawdot {
+ b.dp = b.nd
+ }
+
+ // optional exponent moves decimal point.
+ // if we read a very large, very long number,
+ // just be sure to move the decimal point by
+ // a lot (say, 100000). it doesn't matter if it's
+ // not the exact number.
+ if i < len(s) && (s[i] == 'e' || s[i] == 'E') {
+ i++
+ if i >= len(s) {
+ return
+ }
+ esign := 1
+ if s[i] == '+' {
+ i++
+ } else if s[i] == '-' {
+ i++
+ esign = -1
+ }
+ if i >= len(s) || s[i] < '0' || s[i] > '9' {
+ return
+ }
+ e := 0
+ for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ {
+ if e < 10000 {
+ e = e*10 + int(s[i]) - '0'
+ }
+ }
+ b.dp += e * esign
+ }
+
+ if i != len(s) {
+ return
+ }
+
+ ok = true
+ return
+}
+
+// readFloat reads a decimal mantissa and exponent from a float
+// string representation. It sets ok to false if the number could
+// not fit return types or is invalid.
+func readFloat(s []byte) (mantissa uint64, exp int, neg, trunc, ok bool) {
+ const uint64digits = 19
+ i := 0
+
+ // optional sign
+ if i >= len(s) {
+ return
+ }
+ switch {
+ case s[i] == '+':
+ i++
+ case s[i] == '-':
+ neg = true
+ i++
+ }
+
+ // digits
+ sawdot := false
+ sawdigits := false
+ nd := 0
+ ndMant := 0
+ dp := 0
+ for ; i < len(s); i++ {
+ switch c := s[i]; true {
+ case c == '.':
+ if sawdot {
+ return
+ }
+ sawdot = true
+ dp = nd
+ continue
+
+ case '0' <= c && c <= '9':
+ sawdigits = true
+ if c == '0' && nd == 0 { // ignore leading zeros
+ dp--
+ continue
+ }
+ nd++
+ if ndMant < uint64digits {
+ mantissa *= 10
+ mantissa += uint64(c - '0')
+ ndMant++
+ } else if s[i] != '0' {
+ trunc = true
+ }
+ continue
+ }
+ break
+ }
+ if !sawdigits {
+ return
+ }
+ if !sawdot {
+ dp = nd
+ }
+
+ // optional exponent moves decimal point.
+ // if we read a very large, very long number,
+ // just be sure to move the decimal point by
+ // a lot (say, 100000). it doesn't matter if it's
+ // not the exact number.
+ if i < len(s) && (s[i] == 'e' || s[i] == 'E') {
+ i++
+ if i >= len(s) {
+ return
+ }
+ esign := 1
+ if s[i] == '+' {
+ i++
+ } else if s[i] == '-' {
+ i++
+ esign = -1
+ }
+ if i >= len(s) || s[i] < '0' || s[i] > '9' {
+ return
+ }
+ e := 0
+ for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ {
+ if e < 10000 {
+ e = e*10 + int(s[i]) - '0'
+ }
+ }
+ dp += e * esign
+ }
+
+ if i != len(s) {
+ return
+ }
+
+ exp = dp - ndMant
+ ok = true
+ return
+
+}
+
+// decimal power of ten to binary power of two.
+var powtab = []int{1, 3, 6, 9, 13, 16, 19, 23, 26}
+
+func (d *decimal) floatBits(flt *floatInfo) (b uint64, overflow bool) {
+ var exp int
+ var mant uint64
+
+ // Zero is always a special case.
+ if d.nd == 0 {
+ mant = 0
+ exp = flt.bias
+ goto out
+ }
+
+ // Obvious overflow/underflow.
+ // These bounds are for 64-bit floats.
+ // Will have to change if we want to support 80-bit floats in the future.
+ if d.dp > 310 {
+ goto overflow
+ }
+ if d.dp < -330 {
+ // zero
+ mant = 0
+ exp = flt.bias
+ goto out
+ }
+
+ // Scale by powers of two until in range [0.5, 1.0)
+ exp = 0
+ for d.dp > 0 {
+ var n int
+ if d.dp >= len(powtab) {
+ n = 27
+ } else {
+ n = powtab[d.dp]
+ }
+ d.Shift(-n)
+ exp += n
+ }
+ for d.dp < 0 || d.dp == 0 && d.d[0] < '5' {
+ var n int
+ if -d.dp >= len(powtab) {
+ n = 27
+ } else {
+ n = powtab[-d.dp]
+ }
+ d.Shift(n)
+ exp -= n
+ }
+
+ // Our range is [0.5,1) but floating point range is [1,2).
+ exp--
+
+ // Minimum representable exponent is flt.bias+1.
+ // If the exponent is smaller, move it up and
+ // adjust d accordingly.
+ if exp < flt.bias+1 {
+ n := flt.bias + 1 - exp
+ d.Shift(-n)
+ exp += n
+ }
+
+ if exp-flt.bias >= 1<<flt.expbits-1 {
+ goto overflow
+ }
+
+ // Extract 1+flt.mantbits bits.
+ d.Shift(int(1 + flt.mantbits))
+ mant = d.RoundedInteger()
+
+ // Rounding might have added a bit; shift down.
+ if mant == 2<<flt.mantbits {
+ mant >>= 1
+ exp++
+ if exp-flt.bias >= 1<<flt.expbits-1 {
+ goto overflow
+ }
+ }
+
+ // Denormalized?
+ if mant&(1<<flt.mantbits) == 0 {
+ exp = flt.bias
+ }
+ goto out
+
+overflow:
+ // ±Inf
+ mant = 0
+ exp = 1<<flt.expbits - 1 + flt.bias
+ overflow = true
+
+out:
+ // Assemble bits.
+ bits := mant & (uint64(1)<<flt.mantbits - 1)
+ bits |= uint64((exp-flt.bias)&(1<<flt.expbits-1)) << flt.mantbits
+ if d.neg {
+ bits |= 1 << flt.mantbits << flt.expbits
+ }
+ return bits, overflow
+}
+
+// Exact powers of 10.
+var float64pow10 = []float64{
+ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+ 1e20, 1e21, 1e22,
+}
+var float32pow10 = []float32{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10}
+
+// If possible to convert decimal representation to 64-bit float f exactly,
+// entirely in floating-point math, do so, avoiding the expense of decimalToFloatBits.
+// Three common cases:
+// value is exact integer
+// value is exact integer * exact power of ten
+// value is exact integer / exact power of ten
+// These all produce potentially inexact but correctly rounded answers.
+func atof64exact(mantissa uint64, exp int, neg bool) (f float64, ok bool) {
+ if mantissa>>float64info.mantbits != 0 {
+ return
+ }
+ f = float64(mantissa)
+ if neg {
+ f = -f
+ }
+ switch {
+ case exp == 0:
+ // an integer.
+ return f, true
+ // Exact integers are <= 10^15.
+ // Exact powers of ten are <= 10^22.
+ case exp > 0 && exp <= 15+22: // int * 10^k
+ // If exponent is big but number of digits is not,
+ // can move a few zeros into the integer part.
+ if exp > 22 {
+ f *= float64pow10[exp-22]
+ exp = 22
+ }
+ if f > 1e15 || f < -1e15 {
+ // the exponent was really too large.
+ return
+ }
+ return f * float64pow10[exp], true
+ case exp < 0 && exp >= -22: // int / 10^k
+ return f / float64pow10[-exp], true
+ }
+ return
+}
+
+// If possible to compute mantissa*10^exp to 32-bit float f exactly,
+// entirely in floating-point math, do so, avoiding the machinery above.
+func atof32exact(mantissa uint64, exp int, neg bool) (f float32, ok bool) {
+ if mantissa>>float32info.mantbits != 0 {
+ return
+ }
+ f = float32(mantissa)
+ if neg {
+ f = -f
+ }
+ switch {
+ case exp == 0:
+ return f, true
+ // Exact integers are <= 10^7.
+ // Exact powers of ten are <= 10^10.
+ case exp > 0 && exp <= 7+10: // int * 10^k
+ // If exponent is big but number of digits is not,
+ // can move a few zeros into the integer part.
+ if exp > 10 {
+ f *= float32pow10[exp-10]
+ exp = 10
+ }
+ if f > 1e7 || f < -1e7 {
+ // the exponent was really too large.
+ return
+ }
+ return f * float32pow10[exp], true
+ case exp < 0 && exp >= -10: // int / 10^k
+ return f / float32pow10[-exp], true
+ }
+ return
+}
+
+const fnParseFloat = "ParseFloat"
+
+func atof32(s []byte) (f float32, err error) {
+ if val, ok := special(s); ok {
+ return float32(val), nil
+ }
+
+ if optimize {
+ // Parse mantissa and exponent.
+ mantissa, exp, neg, trunc, ok := readFloat(s)
+ if ok {
+ // Try pure floating-point arithmetic conversion.
+ if !trunc {
+ if f, ok := atof32exact(mantissa, exp, neg); ok {
+ return f, nil
+ }
+ }
+ // Try another fast path.
+ ext := new(extFloat)
+ if ok := ext.AssignDecimal(mantissa, exp, neg, trunc, &float32info); ok {
+ b, ovf := ext.floatBits(&float32info)
+ f = math.Float32frombits(uint32(b))
+ if ovf {
+ err = rangeError(fnParseFloat, string(s))
+ }
+ return f, err
+ }
+ }
+ }
+ var d decimal
+ if !d.set(s) {
+ return 0, syntaxError(fnParseFloat, string(s))
+ }
+ b, ovf := d.floatBits(&float32info)
+ f = math.Float32frombits(uint32(b))
+ if ovf {
+ err = rangeError(fnParseFloat, string(s))
+ }
+ return f, err
+}
+
+func atof64(s []byte) (f float64, err error) {
+ if val, ok := special(s); ok {
+ return val, nil
+ }
+
+ if optimize {
+ // Parse mantissa and exponent.
+ mantissa, exp, neg, trunc, ok := readFloat(s)
+ if ok {
+ // Try pure floating-point arithmetic conversion.
+ if !trunc {
+ if f, ok := atof64exact(mantissa, exp, neg); ok {
+ return f, nil
+ }
+ }
+ // Try another fast path.
+ ext := new(extFloat)
+ if ok := ext.AssignDecimal(mantissa, exp, neg, trunc, &float64info); ok {
+ b, ovf := ext.floatBits(&float64info)
+ f = math.Float64frombits(b)
+ if ovf {
+ err = rangeError(fnParseFloat, string(s))
+ }
+ return f, err
+ }
+ }
+ }
+ var d decimal
+ if !d.set(s) {
+ return 0, syntaxError(fnParseFloat, string(s))
+ }
+ b, ovf := d.floatBits(&float64info)
+ f = math.Float64frombits(b)
+ if ovf {
+ err = rangeError(fnParseFloat, string(s))
+ }
+ return f, err
+}
+
+// ParseFloat converts the string s to a floating-point number
+// with the precision specified by bitSize: 32 for float32, or 64 for float64.
+// When bitSize=32, the result still has type float64, but it will be
+// convertible to float32 without changing its value.
+//
+// If s is well-formed and near a valid floating point number,
+// ParseFloat returns the nearest floating point number rounded
+// using IEEE754 unbiased rounding.
+//
+// The errors that ParseFloat returns have concrete type *NumError
+// and include err.Num = s.
+//
+// If s is not syntactically well-formed, ParseFloat returns err.Err = ErrSyntax.
+//
+// If s is syntactically well-formed but is more than 1/2 ULP
+// away from the largest floating point number of the given size,
+// ParseFloat returns f = ±Inf, err.Err = ErrRange.
+func ParseFloat(s []byte, bitSize int) (f float64, err error) {
+ if bitSize == 32 {
+ f1, err1 := atof32(s)
+ return float64(f1), err1
+ }
+ f1, err1 := atof64(s)
+ return f1, err1
+}
+
+// oroginal: strconv/decimal.go, but not exported, and needed for PareFloat.
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Multiprecision decimal numbers.
+// For floating-point formatting only; not general purpose.
+// Only operations are assign and (binary) left/right shift.
+// Can do binary floating point in multiprecision decimal precisely
+// because 2 divides 10; cannot do decimal floating point
+// in multiprecision binary precisely.
+
+type decimal struct {
+ d [800]byte // digits
+ nd int // number of digits used
+ dp int // decimal point
+ neg bool
+ trunc bool // discarded nonzero digits beyond d[:nd]
+}
+
+func (a *decimal) String() string {
+ n := 10 + a.nd
+ if a.dp > 0 {
+ n += a.dp
+ }
+ if a.dp < 0 {
+ n += -a.dp
+ }
+
+ buf := make([]byte, n)
+ w := 0
+ switch {
+ case a.nd == 0:
+ return "0"
+
+ case a.dp <= 0:
+ // zeros fill space between decimal point and digits
+ buf[w] = '0'
+ w++
+ buf[w] = '.'
+ w++
+ w += digitZero(buf[w : w+-a.dp])
+ w += copy(buf[w:], a.d[0:a.nd])
+
+ case a.dp < a.nd:
+ // decimal point in middle of digits
+ w += copy(buf[w:], a.d[0:a.dp])
+ buf[w] = '.'
+ w++
+ w += copy(buf[w:], a.d[a.dp:a.nd])
+
+ default:
+ // zeros fill space between digits and decimal point
+ w += copy(buf[w:], a.d[0:a.nd])
+ w += digitZero(buf[w : w+a.dp-a.nd])
+ }
+ return string(buf[0:w])
+}
+
+func digitZero(dst []byte) int {
+ for i := range dst {
+ dst[i] = '0'
+ }
+ return len(dst)
+}
+
+// trim trailing zeros from number.
+// (They are meaningless; the decimal point is tracked
+// independent of the number of digits.)
+func trim(a *decimal) {
+ for a.nd > 0 && a.d[a.nd-1] == '0' {
+ a.nd--
+ }
+ if a.nd == 0 {
+ a.dp = 0
+ }
+}
+
+// Assign v to a.
+func (a *decimal) Assign(v uint64) {
+ var buf [24]byte
+
+ // Write reversed decimal in buf.
+ n := 0
+ for v > 0 {
+ v1 := v / 10
+ v -= 10 * v1
+ buf[n] = byte(v + '0')
+ n++
+ v = v1
+ }
+
+ // Reverse again to produce forward decimal in a.d.
+ a.nd = 0
+ for n--; n >= 0; n-- {
+ a.d[a.nd] = buf[n]
+ a.nd++
+ }
+ a.dp = a.nd
+ trim(a)
+}
+
+// Maximum shift that we can do in one pass without overflow.
+// Signed int has 31 bits, and we have to be able to accommodate 9<<k.
+const maxShift = 27
+
+// Binary shift right (* 2) by k bits. k <= maxShift to avoid overflow.
+func rightShift(a *decimal, k uint) {
+ r := 0 // read pointer
+ w := 0 // write pointer
+
+ // Pick up enough leading digits to cover first shift.
+ n := 0
+ for ; n>>k == 0; r++ {
+ if r >= a.nd {
+ if n == 0 {
+ // a == 0; shouldn't get here, but handle anyway.
+ a.nd = 0
+ return
+ }
+ for n>>k == 0 {
+ n = n * 10
+ r++
+ }
+ break
+ }
+ c := int(a.d[r])
+ n = n*10 + c - '0'
+ }
+ a.dp -= r - 1
+
+ // Pick up a digit, put down a digit.
+ for ; r < a.nd; r++ {
+ c := int(a.d[r])
+ dig := n >> k
+ n -= dig << k
+ a.d[w] = byte(dig + '0')
+ w++
+ n = n*10 + c - '0'
+ }
+
+ // Put down extra digits.
+ for n > 0 {
+ dig := n >> k
+ n -= dig << k
+ if w < len(a.d) {
+ a.d[w] = byte(dig + '0')
+ w++
+ } else if dig > 0 {
+ a.trunc = true
+ }
+ n = n * 10
+ }
+
+ a.nd = w
+ trim(a)
+}
+
+// Cheat sheet for left shift: table indexed by shift count giving
+// number of new digits that will be introduced by that shift.
+//
+// For example, leftcheats[4] = {2, "625"}. That means that
+// if we are shifting by 4 (multiplying by 16), it will add 2 digits
+// when the string prefix is "625" through "999", and one fewer digit
+// if the string prefix is "000" through "624".
+//
+// Credit for this trick goes to Ken.
+
+type leftCheat struct {
+ delta int // number of new digits
+ cutoff string // minus one digit if original < a.
+}
+
+var leftcheats = []leftCheat{
+ // Leading digits of 1/2^i = 5^i.
+ // 5^23 is not an exact 64-bit floating point number,
+ // so have to use bc for the math.
+ /*
+ seq 27 | sed 's/^/5^/' | bc |
+ awk 'BEGIN{ print "\tleftCheat{ 0, \"\" }," }
+ {
+ log2 = log(2)/log(10)
+ printf("\tleftCheat{ %d, \"%s\" },\t// * %d\n",
+ int(log2*NR+1), $0, 2**NR)
+ }'
+ */
+ {0, ""},
+ {1, "5"}, // * 2
+ {1, "25"}, // * 4
+ {1, "125"}, // * 8
+ {2, "625"}, // * 16
+ {2, "3125"}, // * 32
+ {2, "15625"}, // * 64
+ {3, "78125"}, // * 128
+ {3, "390625"}, // * 256
+ {3, "1953125"}, // * 512
+ {4, "9765625"}, // * 1024
+ {4, "48828125"}, // * 2048
+ {4, "244140625"}, // * 4096
+ {4, "1220703125"}, // * 8192
+ {5, "6103515625"}, // * 16384
+ {5, "30517578125"}, // * 32768
+ {5, "152587890625"}, // * 65536
+ {6, "762939453125"}, // * 131072
+ {6, "3814697265625"}, // * 262144
+ {6, "19073486328125"}, // * 524288
+ {7, "95367431640625"}, // * 1048576
+ {7, "476837158203125"}, // * 2097152
+ {7, "2384185791015625"}, // * 4194304
+ {7, "11920928955078125"}, // * 8388608
+ {8, "59604644775390625"}, // * 16777216
+ {8, "298023223876953125"}, // * 33554432
+ {8, "1490116119384765625"}, // * 67108864
+ {9, "7450580596923828125"}, // * 134217728
+}
+
+// Is the leading prefix of b lexicographically less than s?
+func prefixIsLessThan(b []byte, s string) bool {
+ for i := 0; i < len(s); i++ {
+ if i >= len(b) {
+ return true
+ }
+ if b[i] != s[i] {
+ return b[i] < s[i]
+ }
+ }
+ return false
+}
+
+// Binary shift left (/ 2) by k bits. k <= maxShift to avoid overflow.
+func leftShift(a *decimal, k uint) {
+ delta := leftcheats[k].delta
+ if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) {
+ delta--
+ }
+
+ r := a.nd // read index
+ w := a.nd + delta // write index
+ n := 0
+
+ // Pick up a digit, put down a digit.
+ for r--; r >= 0; r-- {
+ n += (int(a.d[r]) - '0') << k
+ quo := n / 10
+ rem := n - 10*quo
+ w--
+ if w < len(a.d) {
+ a.d[w] = byte(rem + '0')
+ } else if rem != 0 {
+ a.trunc = true
+ }
+ n = quo
+ }
+
+ // Put down extra digits.
+ for n > 0 {
+ quo := n / 10
+ rem := n - 10*quo
+ w--
+ if w < len(a.d) {
+ a.d[w] = byte(rem + '0')
+ } else if rem != 0 {
+ a.trunc = true
+ }
+ n = quo
+ }
+
+ a.nd += delta
+ if a.nd >= len(a.d) {
+ a.nd = len(a.d)
+ }
+ a.dp += delta
+ trim(a)
+}
+
+// Binary shift left (k > 0) or right (k < 0).
+func (a *decimal) Shift(k int) {
+ switch {
+ case a.nd == 0:
+ // nothing to do: a == 0
+ case k > 0:
+ for k > maxShift {
+ leftShift(a, maxShift)
+ k -= maxShift
+ }
+ leftShift(a, uint(k))
+ case k < 0:
+ for k < -maxShift {
+ rightShift(a, maxShift)
+ k += maxShift
+ }
+ rightShift(a, uint(-k))
+ }
+}
+
+// If we chop a at nd digits, should we round up?
+func shouldRoundUp(a *decimal, nd int) bool {
+ if nd < 0 || nd >= a.nd {
+ return false
+ }
+ if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even
+ // if we truncated, a little higher than what's recorded - always round up
+ if a.trunc {
+ return true
+ }
+ return nd > 0 && (a.d[nd-1]-'0')%2 != 0
+ }
+ // not halfway - digit tells all
+ return a.d[nd] >= '5'
+}
+
+// Round a to nd digits (or fewer).
+// If nd is zero, it means we're rounding
+// just to the left of the digits, as in
+// 0.09 -> 0.1.
+func (a *decimal) Round(nd int) {
+ if nd < 0 || nd >= a.nd {
+ return
+ }
+ if shouldRoundUp(a, nd) {
+ a.RoundUp(nd)
+ } else {
+ a.RoundDown(nd)
+ }
+}
+
+// Round a down to nd digits (or fewer).
+func (a *decimal) RoundDown(nd int) {
+ if nd < 0 || nd >= a.nd {
+ return
+ }
+ a.nd = nd
+ trim(a)
+}
+
+// Round a up to nd digits (or fewer).
+func (a *decimal) RoundUp(nd int) {
+ if nd < 0 || nd >= a.nd {
+ return
+ }
+
+ // round up
+ for i := nd - 1; i >= 0; i-- {
+ c := a.d[i]
+ if c < '9' { // can stop after this digit
+ a.d[i]++
+ a.nd = i + 1
+ return
+ }
+ }
+
+ // Number is all 9s.
+ // Change to single 1 with adjusted decimal point.
+ a.d[0] = '1'
+ a.nd = 1
+ a.dp++
+}
+
+// Extract integer part, rounded appropriately.
+// No guarantees about overflow.
+func (a *decimal) RoundedInteger() uint64 {
+ if a.dp > 20 {
+ return 0xFFFFFFFFFFFFFFFF
+ }
+ var i int
+ n := uint64(0)
+ for i = 0; i < a.dp && i < a.nd; i++ {
+ n = n*10 + uint64(a.d[i]-'0')
+ }
+ for ; i < a.dp; i++ {
+ n *= 10
+ }
+ if shouldRoundUp(a, a.dp) {
+ n++
+ }
+ return n
+}
diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atoi.go b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atoi.go
new file mode 100644
index 000000000..06eb2ec29
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atoi.go
@@ -0,0 +1,213 @@
+/**
+ * Copyright 2014 Paul Querna
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/* Portions of this file are on Go stdlib's strconv/atoi.go */
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "errors"
+ "strconv"
+)
+
+// ErrRange indicates that a value is out of range for the target type.
+var ErrRange = errors.New("value out of range")
+
+// ErrSyntax indicates that a value does not have the right syntax for the target type.
+var ErrSyntax = errors.New("invalid syntax")
+
+// A NumError records a failed conversion.
+type NumError struct {
+ Func string // the failing function (ParseBool, ParseInt, ParseUint, ParseFloat)
+ Num string // the input
+ Err error // the reason the conversion failed (ErrRange, ErrSyntax)
+}
+
+func (e *NumError) Error() string {
+ return "strconv." + e.Func + ": " + "parsing " + strconv.Quote(e.Num) + ": " + e.Err.Error()
+}
+
+func syntaxError(fn, str string) *NumError {
+ return &NumError{fn, str, ErrSyntax}
+}
+
+func rangeError(fn, str string) *NumError {
+ return &NumError{fn, str, ErrRange}
+}
+
+const intSize = 32 << uint(^uint(0)>>63)
+
+// IntSize is the size in bits of an int or uint value.
+const IntSize = intSize
+
+// Return the first number n such that n*base >= 1<<64.
+func cutoff64(base int) uint64 {
+ if base < 2 {
+ return 0
+ }
+ return (1<<64-1)/uint64(base) + 1
+}
+
+// ParseUint is like ParseInt but for unsigned numbers, and oeprating on []byte
+func ParseUint(s []byte, base int, bitSize int) (n uint64, err error) {
+ var cutoff, maxVal uint64
+
+ if bitSize == 0 {
+ bitSize = int(IntSize)
+ }
+
+ s0 := s
+ switch {
+ case len(s) < 1:
+ err = ErrSyntax
+ goto Error
+
+ case 2 <= base && base <= 36:
+ // valid base; nothing to do
+
+ case base == 0:
+ // Look for octal, hex prefix.
+ switch {
+ case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
+ base = 16
+ s = s[2:]
+ if len(s) < 1 {
+ err = ErrSyntax
+ goto Error
+ }
+ case s[0] == '0':
+ base = 8
+ default:
+ base = 10
+ }
+
+ default:
+ err = errors.New("invalid base " + strconv.Itoa(base))
+ goto Error
+ }
+
+ n = 0
+ cutoff = cutoff64(base)
+ maxVal = 1<<uint(bitSize) - 1
+
+ for i := 0; i < len(s); i++ {
+ var v byte
+ d := s[i]
+ switch {
+ case '0' <= d && d <= '9':
+ v = d - '0'
+ case 'a' <= d && d <= 'z':
+ v = d - 'a' + 10
+ case 'A' <= d && d <= 'Z':
+ v = d - 'A' + 10
+ default:
+ n = 0
+ err = ErrSyntax
+ goto Error
+ }
+ if int(v) >= base {
+ n = 0
+ err = ErrSyntax
+ goto Error
+ }
+
+ if n >= cutoff {
+ // n*base overflows
+ n = 1<<64 - 1
+ err = ErrRange
+ goto Error
+ }
+ n *= uint64(base)
+
+ n1 := n + uint64(v)
+ if n1 < n || n1 > maxVal {
+ // n+v overflows
+ n = 1<<64 - 1
+ err = ErrRange
+ goto Error
+ }
+ n = n1
+ }
+
+ return n, nil
+
+Error:
+ return n, &NumError{"ParseUint", string(s0), err}
+}
+
+// ParseInt interprets a string s in the given base (2 to 36) and
+// returns the corresponding value i. If base == 0, the base is
+// implied by the string's prefix: base 16 for "0x", base 8 for
+// "0", and base 10 otherwise.
+//
+// The bitSize argument specifies the integer type
+// that the result must fit into. Bit sizes 0, 8, 16, 32, and 64
+// correspond to int, int8, int16, int32, and int64.
+//
+// The errors that ParseInt returns have concrete type *NumError
+// and include err.Num = s. If s is empty or contains invalid
+// digits, err.Err = ErrSyntax and the returned value is 0;
+// if the value corresponding to s cannot be represented by a
+// signed integer of the given size, err.Err = ErrRange and the
+// returned value is the maximum magnitude integer of the
+// appropriate bitSize and sign.
+func ParseInt(s []byte, base int, bitSize int) (i int64, err error) {
+ const fnParseInt = "ParseInt"
+
+ if bitSize == 0 {
+ bitSize = int(IntSize)
+ }
+
+ // Empty string bad.
+ if len(s) == 0 {
+ return 0, syntaxError(fnParseInt, string(s))
+ }
+
+ // Pick off leading sign.
+ s0 := s
+ neg := false
+ if s[0] == '+' {
+ s = s[1:]
+ } else if s[0] == '-' {
+ neg = true
+ s = s[1:]
+ }
+
+ // Convert unsigned and check range.
+ var un uint64
+ un, err = ParseUint(s, base, bitSize)
+ if err != nil && err.(*NumError).Err != ErrRange {
+ err.(*NumError).Func = fnParseInt
+ err.(*NumError).Num = string(s0)
+ return 0, err
+ }
+ cutoff := uint64(1 << uint(bitSize-1))
+ if !neg && un >= cutoff {
+ return int64(cutoff - 1), rangeError(fnParseInt, string(s0))
+ }
+ if neg && un > cutoff {
+ return -int64(cutoff), rangeError(fnParseInt, string(s0))
+ }
+ n := int64(un)
+ if neg {
+ n = -n
+ }
+ return n, nil
+}
diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/internal/extfloat.go b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/extfloat.go
new file mode 100644
index 000000000..ab791085a
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/extfloat.go
@@ -0,0 +1,668 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+// An extFloat represents an extended floating-point number, with more
+// precision than a float64. It does not try to save bits: the
+// number represented by the structure is mant*(2^exp), with a negative
+// sign if neg is true.
+type extFloat struct {
+ mant uint64
+ exp int
+ neg bool
+}
+
+// Powers of ten taken from double-conversion library.
+// http://code.google.com/p/double-conversion/
+const (
+ firstPowerOfTen = -348
+ stepPowerOfTen = 8
+)
+
+var smallPowersOfTen = [...]extFloat{
+ {1 << 63, -63, false}, // 1
+ {0xa << 60, -60, false}, // 1e1
+ {0x64 << 57, -57, false}, // 1e2
+ {0x3e8 << 54, -54, false}, // 1e3
+ {0x2710 << 50, -50, false}, // 1e4
+ {0x186a0 << 47, -47, false}, // 1e5
+ {0xf4240 << 44, -44, false}, // 1e6
+ {0x989680 << 40, -40, false}, // 1e7
+}
+
+var powersOfTen = [...]extFloat{
+ {0xfa8fd5a0081c0288, -1220, false}, // 10^-348
+ {0xbaaee17fa23ebf76, -1193, false}, // 10^-340
+ {0x8b16fb203055ac76, -1166, false}, // 10^-332
+ {0xcf42894a5dce35ea, -1140, false}, // 10^-324
+ {0x9a6bb0aa55653b2d, -1113, false}, // 10^-316
+ {0xe61acf033d1a45df, -1087, false}, // 10^-308
+ {0xab70fe17c79ac6ca, -1060, false}, // 10^-300
+ {0xff77b1fcbebcdc4f, -1034, false}, // 10^-292
+ {0xbe5691ef416bd60c, -1007, false}, // 10^-284
+ {0x8dd01fad907ffc3c, -980, false}, // 10^-276
+ {0xd3515c2831559a83, -954, false}, // 10^-268
+ {0x9d71ac8fada6c9b5, -927, false}, // 10^-260
+ {0xea9c227723ee8bcb, -901, false}, // 10^-252
+ {0xaecc49914078536d, -874, false}, // 10^-244
+ {0x823c12795db6ce57, -847, false}, // 10^-236
+ {0xc21094364dfb5637, -821, false}, // 10^-228
+ {0x9096ea6f3848984f, -794, false}, // 10^-220
+ {0xd77485cb25823ac7, -768, false}, // 10^-212
+ {0xa086cfcd97bf97f4, -741, false}, // 10^-204
+ {0xef340a98172aace5, -715, false}, // 10^-196
+ {0xb23867fb2a35b28e, -688, false}, // 10^-188
+ {0x84c8d4dfd2c63f3b, -661, false}, // 10^-180
+ {0xc5dd44271ad3cdba, -635, false}, // 10^-172
+ {0x936b9fcebb25c996, -608, false}, // 10^-164
+ {0xdbac6c247d62a584, -582, false}, // 10^-156
+ {0xa3ab66580d5fdaf6, -555, false}, // 10^-148
+ {0xf3e2f893dec3f126, -529, false}, // 10^-140
+ {0xb5b5ada8aaff80b8, -502, false}, // 10^-132
+ {0x87625f056c7c4a8b, -475, false}, // 10^-124
+ {0xc9bcff6034c13053, -449, false}, // 10^-116
+ {0x964e858c91ba2655, -422, false}, // 10^-108
+ {0xdff9772470297ebd, -396, false}, // 10^-100
+ {0xa6dfbd9fb8e5b88f, -369, false}, // 10^-92
+ {0xf8a95fcf88747d94, -343, false}, // 10^-84
+ {0xb94470938fa89bcf, -316, false}, // 10^-76
+ {0x8a08f0f8bf0f156b, -289, false}, // 10^-68
+ {0xcdb02555653131b6, -263, false}, // 10^-60
+ {0x993fe2c6d07b7fac, -236, false}, // 10^-52
+ {0xe45c10c42a2b3b06, -210, false}, // 10^-44
+ {0xaa242499697392d3, -183, false}, // 10^-36
+ {0xfd87b5f28300ca0e, -157, false}, // 10^-28
+ {0xbce5086492111aeb, -130, false}, // 10^-20
+ {0x8cbccc096f5088cc, -103, false}, // 10^-12
+ {0xd1b71758e219652c, -77, false}, // 10^-4
+ {0x9c40000000000000, -50, false}, // 10^4
+ {0xe8d4a51000000000, -24, false}, // 10^12
+ {0xad78ebc5ac620000, 3, false}, // 10^20
+ {0x813f3978f8940984, 30, false}, // 10^28
+ {0xc097ce7bc90715b3, 56, false}, // 10^36
+ {0x8f7e32ce7bea5c70, 83, false}, // 10^44
+ {0xd5d238a4abe98068, 109, false}, // 10^52
+ {0x9f4f2726179a2245, 136, false}, // 10^60
+ {0xed63a231d4c4fb27, 162, false}, // 10^68
+ {0xb0de65388cc8ada8, 189, false}, // 10^76
+ {0x83c7088e1aab65db, 216, false}, // 10^84
+ {0xc45d1df942711d9a, 242, false}, // 10^92
+ {0x924d692ca61be758, 269, false}, // 10^100
+ {0xda01ee641a708dea, 295, false}, // 10^108
+ {0xa26da3999aef774a, 322, false}, // 10^116
+ {0xf209787bb47d6b85, 348, false}, // 10^124
+ {0xb454e4a179dd1877, 375, false}, // 10^132
+ {0x865b86925b9bc5c2, 402, false}, // 10^140
+ {0xc83553c5c8965d3d, 428, false}, // 10^148
+ {0x952ab45cfa97a0b3, 455, false}, // 10^156
+ {0xde469fbd99a05fe3, 481, false}, // 10^164
+ {0xa59bc234db398c25, 508, false}, // 10^172
+ {0xf6c69a72a3989f5c, 534, false}, // 10^180
+ {0xb7dcbf5354e9bece, 561, false}, // 10^188
+ {0x88fcf317f22241e2, 588, false}, // 10^196
+ {0xcc20ce9bd35c78a5, 614, false}, // 10^204
+ {0x98165af37b2153df, 641, false}, // 10^212
+ {0xe2a0b5dc971f303a, 667, false}, // 10^220
+ {0xa8d9d1535ce3b396, 694, false}, // 10^228
+ {0xfb9b7cd9a4a7443c, 720, false}, // 10^236
+ {0xbb764c4ca7a44410, 747, false}, // 10^244
+ {0x8bab8eefb6409c1a, 774, false}, // 10^252
+ {0xd01fef10a657842c, 800, false}, // 10^260
+ {0x9b10a4e5e9913129, 827, false}, // 10^268
+ {0xe7109bfba19c0c9d, 853, false}, // 10^276
+ {0xac2820d9623bf429, 880, false}, // 10^284
+ {0x80444b5e7aa7cf85, 907, false}, // 10^292
+ {0xbf21e44003acdd2d, 933, false}, // 10^300
+ {0x8e679c2f5e44ff8f, 960, false}, // 10^308
+ {0xd433179d9c8cb841, 986, false}, // 10^316
+ {0x9e19db92b4e31ba9, 1013, false}, // 10^324
+ {0xeb96bf6ebadf77d9, 1039, false}, // 10^332
+ {0xaf87023b9bf0ee6b, 1066, false}, // 10^340
+}
+
+// floatBits returns the bits of the float64 that best approximates
+// the extFloat passed as receiver. Overflow is set to true if
+// the resulting float64 is ±Inf.
+func (f *extFloat) floatBits(flt *floatInfo) (bits uint64, overflow bool) {
+ f.Normalize()
+
+ exp := f.exp + 63
+
+ // Exponent too small.
+ if exp < flt.bias+1 {
+ n := flt.bias + 1 - exp
+ f.mant >>= uint(n)
+ exp += n
+ }
+
+ // Extract 1+flt.mantbits bits from the 64-bit mantissa.
+ mant := f.mant >> (63 - flt.mantbits)
+ if f.mant&(1<<(62-flt.mantbits)) != 0 {
+ // Round up.
+ mant += 1
+ }
+
+ // Rounding might have added a bit; shift down.
+ if mant == 2<<flt.mantbits {
+ mant >>= 1
+ exp++
+ }
+
+ // Infinities.
+ if exp-flt.bias >= 1<<flt.expbits-1 {
+ // ±Inf
+ mant = 0
+ exp = 1<<flt.expbits - 1 + flt.bias
+ overflow = true
+ } else if mant&(1<<flt.mantbits) == 0 {
+ // Denormalized?
+ exp = flt.bias
+ }
+ // Assemble bits.
+ bits = mant & (uint64(1)<<flt.mantbits - 1)
+ bits |= uint64((exp-flt.bias)&(1<<flt.expbits-1)) << flt.mantbits
+ if f.neg {
+ bits |= 1 << (flt.mantbits + flt.expbits)
+ }
+ return
+}
+
+// AssignComputeBounds sets f to the floating point value
+// defined by mant, exp and precision given by flt. It returns
+// lower, upper such that any number in the closed interval
+// [lower, upper] is converted back to the same floating point number.
+func (f *extFloat) AssignComputeBounds(mant uint64, exp int, neg bool, flt *floatInfo) (lower, upper extFloat) {
+ f.mant = mant
+ f.exp = exp - int(flt.mantbits)
+ f.neg = neg
+ if f.exp <= 0 && mant == (mant>>uint(-f.exp))<<uint(-f.exp) {
+ // An exact integer
+ f.mant >>= uint(-f.exp)
+ f.exp = 0
+ return *f, *f
+ }
+ expBiased := exp - flt.bias
+
+ upper = extFloat{mant: 2*f.mant + 1, exp: f.exp - 1, neg: f.neg}
+ if mant != 1<<flt.mantbits || expBiased == 1 {
+ lower = extFloat{mant: 2*f.mant - 1, exp: f.exp - 1, neg: f.neg}
+ } else {
+ lower = extFloat{mant: 4*f.mant - 1, exp: f.exp - 2, neg: f.neg}
+ }
+ return
+}
+
+// Normalize normalizes f so that the highest bit of the mantissa is
+// set, and returns the number by which the mantissa was left-shifted.
+func (f *extFloat) Normalize() (shift uint) {
+ mant, exp := f.mant, f.exp
+ if mant == 0 {
+ return 0
+ }
+ if mant>>(64-32) == 0 {
+ mant <<= 32
+ exp -= 32
+ }
+ if mant>>(64-16) == 0 {
+ mant <<= 16
+ exp -= 16
+ }
+ if mant>>(64-8) == 0 {
+ mant <<= 8
+ exp -= 8
+ }
+ if mant>>(64-4) == 0 {
+ mant <<= 4
+ exp -= 4
+ }
+ if mant>>(64-2) == 0 {
+ mant <<= 2
+ exp -= 2
+ }
+ if mant>>(64-1) == 0 {
+ mant <<= 1
+ exp -= 1
+ }
+ shift = uint(f.exp - exp)
+ f.mant, f.exp = mant, exp
+ return
+}
+
+// Multiply sets f to the product f*g: the result is correctly rounded,
+// but not normalized.
+func (f *extFloat) Multiply(g extFloat) {
+ fhi, flo := f.mant>>32, uint64(uint32(f.mant))
+ ghi, glo := g.mant>>32, uint64(uint32(g.mant))
+
+ // Cross products.
+ cross1 := fhi * glo
+ cross2 := flo * ghi
+
+ // f.mant*g.mant is fhi*ghi << 64 + (cross1+cross2) << 32 + flo*glo
+ f.mant = fhi*ghi + (cross1 >> 32) + (cross2 >> 32)
+ rem := uint64(uint32(cross1)) + uint64(uint32(cross2)) + ((flo * glo) >> 32)
+ // Round up.
+ rem += (1 << 31)
+
+ f.mant += (rem >> 32)
+ f.exp = f.exp + g.exp + 64
+}
+
+var uint64pow10 = [...]uint64{
+ 1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+}
+
+// AssignDecimal sets f to an approximate value mantissa*10^exp. It
+// returns true if the value represented by f is guaranteed to be the
+// best approximation of d after being rounded to a float64 or
+// float32 depending on flt.
+func (f *extFloat) AssignDecimal(mantissa uint64, exp10 int, neg bool, trunc bool, flt *floatInfo) (ok bool) {
+ const uint64digits = 19
+ const errorscale = 8
+ errors := 0 // An upper bound for error, computed in errorscale*ulp.
+ if trunc {
+ // the decimal number was truncated.
+ errors += errorscale / 2
+ }
+
+ f.mant = mantissa
+ f.exp = 0
+ f.neg = neg
+
+ // Multiply by powers of ten.
+ i := (exp10 - firstPowerOfTen) / stepPowerOfTen
+ if exp10 < firstPowerOfTen || i >= len(powersOfTen) {
+ return false
+ }
+ adjExp := (exp10 - firstPowerOfTen) % stepPowerOfTen
+
+ // We multiply by exp%step
+ if adjExp < uint64digits && mantissa < uint64pow10[uint64digits-adjExp] {
+ // We can multiply the mantissa exactly.
+ f.mant *= uint64pow10[adjExp]
+ f.Normalize()
+ } else {
+ f.Normalize()
+ f.Multiply(smallPowersOfTen[adjExp])
+ errors += errorscale / 2
+ }
+
+ // We multiply by 10 to the exp - exp%step.
+ f.Multiply(powersOfTen[i])
+ if errors > 0 {
+ errors += 1
+ }
+ errors += errorscale / 2
+
+ // Normalize
+ shift := f.Normalize()
+ errors <<= shift
+
+ // Now f is a good approximation of the decimal.
+ // Check whether the error is too large: that is, if the mantissa
+ // is perturbated by the error, the resulting float64 will change.
+ // The 64 bits mantissa is 1 + 52 bits for float64 + 11 extra bits.
+ //
+ // In many cases the approximation will be good enough.
+ denormalExp := flt.bias - 63
+ var extrabits uint
+ if f.exp <= denormalExp {
+ // f.mant * 2^f.exp is smaller than 2^(flt.bias+1).
+ extrabits = uint(63 - flt.mantbits + 1 + uint(denormalExp-f.exp))
+ } else {
+ extrabits = uint(63 - flt.mantbits)
+ }
+
+ halfway := uint64(1) << (extrabits - 1)
+ mant_extra := f.mant & (1<<extrabits - 1)
+
+ // Do a signed comparison here! If the error estimate could make
+ // the mantissa round differently for the conversion to double,
+ // then we can't give a definite answer.
+ if int64(halfway)-int64(errors) < int64(mant_extra) &&
+ int64(mant_extra) < int64(halfway)+int64(errors) {
+ return false
+ }
+ return true
+}
+
+// Frexp10 is an analogue of math.Frexp for decimal powers. It scales
+// f by an approximate power of ten 10^-exp, and returns exp10, so
+// that f*10^exp10 has the same value as the old f, up to an ulp,
+// as well as the index of 10^-exp in the powersOfTen table.
+func (f *extFloat) frexp10() (exp10, index int) {
+ // The constants expMin and expMax constrain the final value of the
+ // binary exponent of f. We want a small integral part in the result
+ // because finding digits of an integer requires divisions, whereas
+ // digits of the fractional part can be found by repeatedly multiplying
+ // by 10.
+ const expMin = -60
+ const expMax = -32
+ // Find power of ten such that x * 10^n has a binary exponent
+ // between expMin and expMax.
+ approxExp10 := ((expMin+expMax)/2 - f.exp) * 28 / 93 // log(10)/log(2) is close to 93/28.
+ i := (approxExp10 - firstPowerOfTen) / stepPowerOfTen
+Loop:
+ for {
+ exp := f.exp + powersOfTen[i].exp + 64
+ switch {
+ case exp < expMin:
+ i++
+ case exp > expMax:
+ i--
+ default:
+ break Loop
+ }
+ }
+ // Apply the desired decimal shift on f. It will have exponent
+ // in the desired range. This is multiplication by 10^-exp10.
+ f.Multiply(powersOfTen[i])
+
+ return -(firstPowerOfTen + i*stepPowerOfTen), i
+}
+
+// frexp10Many applies a common shift by a power of ten to a, b, c.
+func frexp10Many(a, b, c *extFloat) (exp10 int) {
+ exp10, i := c.frexp10()
+ a.Multiply(powersOfTen[i])
+ b.Multiply(powersOfTen[i])
+ return
+}
+
+// FixedDecimal stores in d the first n significant digits
+// of the decimal representation of f. It returns false
+// if it cannot be sure of the answer.
+func (f *extFloat) FixedDecimal(d *decimalSlice, n int) bool {
+ if f.mant == 0 {
+ d.nd = 0
+ d.dp = 0
+ d.neg = f.neg
+ return true
+ }
+ if n == 0 {
+ panic("strconv: internal error: extFloat.FixedDecimal called with n == 0")
+ }
+ // Multiply by an appropriate power of ten to have a reasonable
+ // number to process.
+ f.Normalize()
+ exp10, _ := f.frexp10()
+
+ shift := uint(-f.exp)
+ integer := uint32(f.mant >> shift)
+ fraction := f.mant - (uint64(integer) << shift)
+ ε := uint64(1) // ε is the uncertainty we have on the mantissa of f.
+
+ // Write exactly n digits to d.
+ needed := n // how many digits are left to write.
+ integerDigits := 0 // the number of decimal digits of integer.
+ pow10 := uint64(1) // the power of ten by which f was scaled.
+ for i, pow := 0, uint64(1); i < 20; i++ {
+ if pow > uint64(integer) {
+ integerDigits = i
+ break
+ }
+ pow *= 10
+ }
+ rest := integer
+ if integerDigits > needed {
+ // the integral part is already large, trim the last digits.
+ pow10 = uint64pow10[integerDigits-needed]
+ integer /= uint32(pow10)
+ rest -= integer * uint32(pow10)
+ } else {
+ rest = 0
+ }
+
+ // Write the digits of integer: the digits of rest are omitted.
+ var buf [32]byte
+ pos := len(buf)
+ for v := integer; v > 0; {
+ v1 := v / 10
+ v -= 10 * v1
+ pos--
+ buf[pos] = byte(v + '0')
+ v = v1
+ }
+ for i := pos; i < len(buf); i++ {
+ d.d[i-pos] = buf[i]
+ }
+ nd := len(buf) - pos
+ d.nd = nd
+ d.dp = integerDigits + exp10
+ needed -= nd
+
+ if needed > 0 {
+ if rest != 0 || pow10 != 1 {
+ panic("strconv: internal error, rest != 0 but needed > 0")
+ }
+ // Emit digits for the fractional part. Each time, 10*fraction
+ // fits in a uint64 without overflow.
+ for needed > 0 {
+ fraction *= 10
+ ε *= 10 // the uncertainty scales as we multiply by ten.
+ if 2*ε > 1<<shift {
+ // the error is so large it could modify which digit to write, abort.
+ return false
+ }
+ digit := fraction >> shift
+ d.d[nd] = byte(digit + '0')
+ fraction -= digit << shift
+ nd++
+ needed--
+ }
+ d.nd = nd
+ }
+
+ // We have written a truncation of f (a numerator / 10^d.dp). The remaining part
+ // can be interpreted as a small number (< 1) to be added to the last digit of the
+ // numerator.
+ //
+ // If rest > 0, the amount is:
+ // (rest<<shift | fraction) / (pow10 << shift)
+ // fraction being known with a ±ε uncertainty.
+ // The fact that n > 0 guarantees that pow10 << shift does not overflow a uint64.
+ //
+ // If rest = 0, pow10 == 1 and the amount is
+ // fraction / (1 << shift)
+ // fraction being known with a ±ε uncertainty.
+ //
+ // We pass this information to the rounding routine for adjustment.
+
+ ok := adjustLastDigitFixed(d, uint64(rest)<<shift|fraction, pow10, shift, ε)
+ if !ok {
+ return false
+ }
+ // Trim trailing zeros.
+ for i := d.nd - 1; i >= 0; i-- {
+ if d.d[i] != '0' {
+ d.nd = i + 1
+ break
+ }
+ }
+ return true
+}
+
+// adjustLastDigitFixed assumes d contains the representation of the integral part
+// of some number, whose fractional part is num / (den << shift). The numerator
+// num is only known up to an uncertainty of size ε, assumed to be less than
+// (den << shift)/2.
+//
+// It will increase the last digit by one to account for correct rounding, typically
+// when the fractional part is greater than 1/2, and will return false if ε is such
+// that no correct answer can be given.
+func adjustLastDigitFixed(d *decimalSlice, num, den uint64, shift uint, ε uint64) bool {
+ if num > den<<shift {
+ panic("strconv: num > den<<shift in adjustLastDigitFixed")
+ }
+ if 2*ε > den<<shift {
+ panic("strconv: ε > (den<<shift)/2")
+ }
+ if 2*(num+ε) < den<<shift {
+ return true
+ }
+ if 2*(num-ε) > den<<shift {
+ // increment d by 1.
+ i := d.nd - 1
+ for ; i >= 0; i-- {
+ if d.d[i] == '9' {
+ d.nd--
+ } else {
+ break
+ }
+ }
+ if i < 0 {
+ d.d[0] = '1'
+ d.nd = 1
+ d.dp++
+ } else {
+ d.d[i]++
+ }
+ return true
+ }
+ return false
+}
+
+// ShortestDecimal stores in d the shortest decimal representation of f
+// which belongs to the open interval (lower, upper), where f is supposed
+// to lie. It returns false whenever the result is unsure. The implementation
+// uses the Grisu3 algorithm.
+func (f *extFloat) ShortestDecimal(d *decimalSlice, lower, upper *extFloat) bool {
+ if f.mant == 0 {
+ d.nd = 0
+ d.dp = 0
+ d.neg = f.neg
+ return true
+ }
+ if f.exp == 0 && *lower == *f && *lower == *upper {
+ // an exact integer.
+ var buf [24]byte
+ n := len(buf) - 1
+ for v := f.mant; v > 0; {
+ v1 := v / 10
+ v -= 10 * v1
+ buf[n] = byte(v + '0')
+ n--
+ v = v1
+ }
+ nd := len(buf) - n - 1
+ for i := 0; i < nd; i++ {
+ d.d[i] = buf[n+1+i]
+ }
+ d.nd, d.dp = nd, nd
+ for d.nd > 0 && d.d[d.nd-1] == '0' {
+ d.nd--
+ }
+ if d.nd == 0 {
+ d.dp = 0
+ }
+ d.neg = f.neg
+ return true
+ }
+ upper.Normalize()
+ // Uniformize exponents.
+ if f.exp > upper.exp {
+ f.mant <<= uint(f.exp - upper.exp)
+ f.exp = upper.exp
+ }
+ if lower.exp > upper.exp {
+ lower.mant <<= uint(lower.exp - upper.exp)
+ lower.exp = upper.exp
+ }
+
+ exp10 := frexp10Many(lower, f, upper)
+ // Take a safety margin due to rounding in frexp10Many, but we lose precision.
+ upper.mant++
+ lower.mant--
+
+ // The shortest representation of f is either rounded up or down, but
+ // in any case, it is a truncation of upper.
+ shift := uint(-upper.exp)
+ integer := uint32(upper.mant >> shift)
+ fraction := upper.mant - (uint64(integer) << shift)
+
+ // How far we can go down from upper until the result is wrong.
+ allowance := upper.mant - lower.mant
+ // How far we should go to get a very precise result.
+ targetDiff := upper.mant - f.mant
+
+ // Count integral digits: there are at most 10.
+ var integerDigits int
+ for i, pow := 0, uint64(1); i < 20; i++ {
+ if pow > uint64(integer) {
+ integerDigits = i
+ break
+ }
+ pow *= 10
+ }
+ for i := 0; i < integerDigits; i++ {
+ pow := uint64pow10[integerDigits-i-1]
+ digit := integer / uint32(pow)
+ d.d[i] = byte(digit + '0')
+ integer -= digit * uint32(pow)
+ // evaluate whether we should stop.
+ if currentDiff := uint64(integer)<<shift + fraction; currentDiff < allowance {
+ d.nd = i + 1
+ d.dp = integerDigits + exp10
+ d.neg = f.neg
+ // Sometimes allowance is so large the last digit might need to be
+ // decremented to get closer to f.
+ return adjustLastDigit(d, currentDiff, targetDiff, allowance, pow<<shift, 2)
+ }
+ }
+ d.nd = integerDigits
+ d.dp = d.nd + exp10
+ d.neg = f.neg
+
+ // Compute digits of the fractional part. At each step fraction does not
+ // overflow. The choice of minExp implies that fraction is less than 2^60.
+ var digit int
+ multiplier := uint64(1)
+ for {
+ fraction *= 10
+ multiplier *= 10
+ digit = int(fraction >> shift)
+ d.d[d.nd] = byte(digit + '0')
+ d.nd++
+ fraction -= uint64(digit) << shift
+ if fraction < allowance*multiplier {
+ // We are in the admissible range. Note that if allowance is about to
+ // overflow, that is, allowance > 2^64/10, the condition is automatically
+ // true due to the limited range of fraction.
+ return adjustLastDigit(d,
+ fraction, targetDiff*multiplier, allowance*multiplier,
+ 1<<shift, multiplier*2)
+ }
+ }
+}
+
+// adjustLastDigit modifies d = x-currentDiff*ε, to get closest to
+// d = x-targetDiff*ε, without becoming smaller than x-maxDiff*ε.
+// It assumes that a decimal digit is worth ulpDecimal*ε, and that
+// all data is known with a error estimate of ulpBinary*ε.
+func adjustLastDigit(d *decimalSlice, currentDiff, targetDiff, maxDiff, ulpDecimal, ulpBinary uint64) bool {
+ if ulpDecimal < 2*ulpBinary {
+ // Approximation is too wide.
+ return false
+ }
+ for currentDiff+ulpDecimal/2+ulpBinary < targetDiff {
+ d.d[d.nd-1]--
+ currentDiff += ulpDecimal
+ }
+ if currentDiff+ulpDecimal <= targetDiff+ulpDecimal/2+ulpBinary {
+ // we have two choices, and don't know what to do.
+ return false
+ }
+ if currentDiff < ulpBinary || currentDiff > maxDiff-ulpBinary {
+ // we went too far
+ return false
+ }
+ if d.nd == 1 && d.d[0] == '0' {
+ // the number has actually reached zero.
+ d.nd = 0
+ d.dp = 0
+ }
+ return true
+}
diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/internal/ftoa.go b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/ftoa.go
new file mode 100644
index 000000000..253f83b45
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/ftoa.go
@@ -0,0 +1,475 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Binary to decimal floating point conversion.
+// Algorithm:
+// 1) store mantissa in multiprecision decimal
+// 2) shift decimal by exponent
+// 3) read digits out & format
+
+package internal
+
+import "math"
+
+// TODO: move elsewhere?
+type floatInfo struct {
+ mantbits uint
+ expbits uint
+ bias int
+}
+
+var float32info = floatInfo{23, 8, -127}
+var float64info = floatInfo{52, 11, -1023}
+
+// FormatFloat converts the floating-point number f to a string,
+// according to the format fmt and precision prec. It rounds the
+// result assuming that the original was obtained from a floating-point
+// value of bitSize bits (32 for float32, 64 for float64).
+//
+// The format fmt is one of
+// 'b' (-ddddp±ddd, a binary exponent),
+// 'e' (-d.dddde±dd, a decimal exponent),
+// 'E' (-d.ddddE±dd, a decimal exponent),
+// 'f' (-ddd.dddd, no exponent),
+// 'g' ('e' for large exponents, 'f' otherwise), or
+// 'G' ('E' for large exponents, 'f' otherwise).
+//
+// The precision prec controls the number of digits
+// (excluding the exponent) printed by the 'e', 'E', 'f', 'g', and 'G' formats.
+// For 'e', 'E', and 'f' it is the number of digits after the decimal point.
+// For 'g' and 'G' it is the total number of digits.
+// The special precision -1 uses the smallest number of digits
+// necessary such that ParseFloat will return f exactly.
+func formatFloat(f float64, fmt byte, prec, bitSize int) string {
+ return string(genericFtoa(make([]byte, 0, max(prec+4, 24)), f, fmt, prec, bitSize))
+}
+
+// AppendFloat appends the string form of the floating-point number f,
+// as generated by FormatFloat, to dst and returns the extended buffer.
+func appendFloat(dst []byte, f float64, fmt byte, prec int, bitSize int) []byte {
+ return genericFtoa(dst, f, fmt, prec, bitSize)
+}
+
+func genericFtoa(dst []byte, val float64, fmt byte, prec, bitSize int) []byte {
+ var bits uint64
+ var flt *floatInfo
+ switch bitSize {
+ case 32:
+ bits = uint64(math.Float32bits(float32(val)))
+ flt = &float32info
+ case 64:
+ bits = math.Float64bits(val)
+ flt = &float64info
+ default:
+ panic("strconv: illegal AppendFloat/FormatFloat bitSize")
+ }
+
+ neg := bits>>(flt.expbits+flt.mantbits) != 0
+ exp := int(bits>>flt.mantbits) & (1<<flt.expbits - 1)
+ mant := bits & (uint64(1)<<flt.mantbits - 1)
+
+ switch exp {
+ case 1<<flt.expbits - 1:
+ // Inf, NaN
+ var s string
+ switch {
+ case mant != 0:
+ s = "NaN"
+ case neg:
+ s = "-Inf"
+ default:
+ s = "+Inf"
+ }
+ return append(dst, s...)
+
+ case 0:
+ // denormalized
+ exp++
+
+ default:
+ // add implicit top bit
+ mant |= uint64(1) << flt.mantbits
+ }
+ exp += flt.bias
+
+ // Pick off easy binary format.
+ if fmt == 'b' {
+ return fmtB(dst, neg, mant, exp, flt)
+ }
+
+ if !optimize {
+ return bigFtoa(dst, prec, fmt, neg, mant, exp, flt)
+ }
+
+ var digs decimalSlice
+ ok := false
+ // Negative precision means "only as much as needed to be exact."
+ shortest := prec < 0
+ if shortest {
+ // Try Grisu3 algorithm.
+ f := new(extFloat)
+ lower, upper := f.AssignComputeBounds(mant, exp, neg, flt)
+ var buf [32]byte
+ digs.d = buf[:]
+ ok = f.ShortestDecimal(&digs, &lower, &upper)
+ if !ok {
+ return bigFtoa(dst, prec, fmt, neg, mant, exp, flt)
+ }
+ // Precision for shortest representation mode.
+ switch fmt {
+ case 'e', 'E':
+ prec = digs.nd - 1
+ case 'f':
+ prec = max(digs.nd-digs.dp, 0)
+ case 'g', 'G':
+ prec = digs.nd
+ }
+ } else if fmt != 'f' {
+ // Fixed number of digits.
+ digits := prec
+ switch fmt {
+ case 'e', 'E':
+ digits++
+ case 'g', 'G':
+ if prec == 0 {
+ prec = 1
+ }
+ digits = prec
+ }
+ if digits <= 15 {
+ // try fast algorithm when the number of digits is reasonable.
+ var buf [24]byte
+ digs.d = buf[:]
+ f := extFloat{mant, exp - int(flt.mantbits), neg}
+ ok = f.FixedDecimal(&digs, digits)
+ }
+ }
+ if !ok {
+ return bigFtoa(dst, prec, fmt, neg, mant, exp, flt)
+ }
+ return formatDigits(dst, shortest, neg, digs, prec, fmt)
+}
+
+// bigFtoa uses multiprecision computations to format a float.
+func bigFtoa(dst []byte, prec int, fmt byte, neg bool, mant uint64, exp int, flt *floatInfo) []byte {
+ d := new(decimal)
+ d.Assign(mant)
+ d.Shift(exp - int(flt.mantbits))
+ var digs decimalSlice
+ shortest := prec < 0
+ if shortest {
+ roundShortest(d, mant, exp, flt)
+ digs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp}
+ // Precision for shortest representation mode.
+ switch fmt {
+ case 'e', 'E':
+ prec = digs.nd - 1
+ case 'f':
+ prec = max(digs.nd-digs.dp, 0)
+ case 'g', 'G':
+ prec = digs.nd
+ }
+ } else {
+ // Round appropriately.
+ switch fmt {
+ case 'e', 'E':
+ d.Round(prec + 1)
+ case 'f':
+ d.Round(d.dp + prec)
+ case 'g', 'G':
+ if prec == 0 {
+ prec = 1
+ }
+ d.Round(prec)
+ }
+ digs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp}
+ }
+ return formatDigits(dst, shortest, neg, digs, prec, fmt)
+}
+
+func formatDigits(dst []byte, shortest bool, neg bool, digs decimalSlice, prec int, fmt byte) []byte {
+ switch fmt {
+ case 'e', 'E':
+ return fmtE(dst, neg, digs, prec, fmt)
+ case 'f':
+ return fmtF(dst, neg, digs, prec)
+ case 'g', 'G':
+ // trailing fractional zeros in 'e' form will be trimmed.
+ eprec := prec
+ if eprec > digs.nd && digs.nd >= digs.dp {
+ eprec = digs.nd
+ }
+ // %e is used if the exponent from the conversion
+ // is less than -4 or greater than or equal to the precision.
+ // if precision was the shortest possible, use precision 6 for this decision.
+ if shortest {
+ eprec = 6
+ }
+ exp := digs.dp - 1
+ if exp < -4 || exp >= eprec {
+ if prec > digs.nd {
+ prec = digs.nd
+ }
+ return fmtE(dst, neg, digs, prec-1, fmt+'e'-'g')
+ }
+ if prec > digs.dp {
+ prec = digs.nd
+ }
+ return fmtF(dst, neg, digs, max(prec-digs.dp, 0))
+ }
+
+ // unknown format
+ return append(dst, '%', fmt)
+}
+
+// Round d (= mant * 2^exp) to the shortest number of digits
+// that will let the original floating point value be precisely
+// reconstructed. Size is original floating point size (64 or 32).
+func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) {
+ // If mantissa is zero, the number is zero; stop now.
+ if mant == 0 {
+ d.nd = 0
+ return
+ }
+
+ // Compute upper and lower such that any decimal number
+ // between upper and lower (possibly inclusive)
+ // will round to the original floating point number.
+
+ // We may see at once that the number is already shortest.
+ //
+ // Suppose d is not denormal, so that 2^exp <= d < 10^dp.
+ // The closest shorter number is at least 10^(dp-nd) away.
+ // The lower/upper bounds computed below are at distance
+ // at most 2^(exp-mantbits).
+ //
+ // So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits),
+ // or equivalently log2(10)*(dp-nd) > exp-mantbits.
+ // It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32).
+ minexp := flt.bias + 1 // minimum possible exponent
+ if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) {
+ // The number is already shortest.
+ return
+ }
+
+ // d = mant << (exp - mantbits)
+ // Next highest floating point number is mant+1 << exp-mantbits.
+ // Our upper bound is halfway between, mant*2+1 << exp-mantbits-1.
+ upper := new(decimal)
+ upper.Assign(mant*2 + 1)
+ upper.Shift(exp - int(flt.mantbits) - 1)
+
+ // d = mant << (exp - mantbits)
+ // Next lowest floating point number is mant-1 << exp-mantbits,
+ // unless mant-1 drops the significant bit and exp is not the minimum exp,
+ // in which case the next lowest is mant*2-1 << exp-mantbits-1.
+ // Either way, call it mantlo << explo-mantbits.
+ // Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1.
+ var mantlo uint64
+ var explo int
+ if mant > 1<<flt.mantbits || exp == minexp {
+ mantlo = mant - 1
+ explo = exp
+ } else {
+ mantlo = mant*2 - 1
+ explo = exp - 1
+ }
+ lower := new(decimal)
+ lower.Assign(mantlo*2 + 1)
+ lower.Shift(explo - int(flt.mantbits) - 1)
+
+ // The upper and lower bounds are possible outputs only if
+ // the original mantissa is even, so that IEEE round-to-even
+ // would round to the original mantissa and not the neighbors.
+ inclusive := mant%2 == 0
+
+ // Now we can figure out the minimum number of digits required.
+ // Walk along until d has distinguished itself from upper and lower.
+ for i := 0; i < d.nd; i++ {
+ var l, m, u byte // lower, middle, upper digits
+ if i < lower.nd {
+ l = lower.d[i]
+ } else {
+ l = '0'
+ }
+ m = d.d[i]
+ if i < upper.nd {
+ u = upper.d[i]
+ } else {
+ u = '0'
+ }
+
+ // Okay to round down (truncate) if lower has a different digit
+ // or if lower is inclusive and is exactly the result of rounding down.
+ okdown := l != m || (inclusive && l == m && i+1 == lower.nd)
+
+ // Okay to round up if upper has a different digit and
+ // either upper is inclusive or upper is bigger than the result of rounding up.
+ okup := m != u && (inclusive || m+1 < u || i+1 < upper.nd)
+
+ // If it's okay to do either, then round to the nearest one.
+ // If it's okay to do only one, do it.
+ switch {
+ case okdown && okup:
+ d.Round(i + 1)
+ return
+ case okdown:
+ d.RoundDown(i + 1)
+ return
+ case okup:
+ d.RoundUp(i + 1)
+ return
+ }
+ }
+}
+
+type decimalSlice struct {
+ d []byte
+ nd, dp int
+ neg bool
+}
+
+// %e: -d.ddddde±dd
+func fmtE(dst []byte, neg bool, d decimalSlice, prec int, fmt byte) []byte {
+ // sign
+ if neg {
+ dst = append(dst, '-')
+ }
+
+ // first digit
+ ch := byte('0')
+ if d.nd != 0 {
+ ch = d.d[0]
+ }
+ dst = append(dst, ch)
+
+ // .moredigits
+ if prec > 0 {
+ dst = append(dst, '.')
+ i := 1
+ m := d.nd + prec + 1 - max(d.nd, prec+1)
+ for i < m {
+ dst = append(dst, d.d[i])
+ i++
+ }
+ for i <= prec {
+ dst = append(dst, '0')
+ i++
+ }
+ }
+
+ // e±
+ dst = append(dst, fmt)
+ exp := d.dp - 1
+ if d.nd == 0 { // special case: 0 has exponent 0
+ exp = 0
+ }
+ if exp < 0 {
+ ch = '-'
+ exp = -exp
+ } else {
+ ch = '+'
+ }
+ dst = append(dst, ch)
+
+ // dddd
+ var buf [3]byte
+ i := len(buf)
+ for exp >= 10 {
+ i--
+ buf[i] = byte(exp%10 + '0')
+ exp /= 10
+ }
+ // exp < 10
+ i--
+ buf[i] = byte(exp + '0')
+
+ switch i {
+ case 0:
+ dst = append(dst, buf[0], buf[1], buf[2])
+ case 1:
+ dst = append(dst, buf[1], buf[2])
+ case 2:
+ // leading zeroes
+ dst = append(dst, '0', buf[2])
+ }
+ return dst
+}
+
+// %f: -ddddddd.ddddd
+func fmtF(dst []byte, neg bool, d decimalSlice, prec int) []byte {
+ // sign
+ if neg {
+ dst = append(dst, '-')
+ }
+
+ // integer, padded with zeros as needed.
+ if d.dp > 0 {
+ var i int
+ for i = 0; i < d.dp && i < d.nd; i++ {
+ dst = append(dst, d.d[i])
+ }
+ for ; i < d.dp; i++ {
+ dst = append(dst, '0')
+ }
+ } else {
+ dst = append(dst, '0')
+ }
+
+ // fraction
+ if prec > 0 {
+ dst = append(dst, '.')
+ for i := 0; i < prec; i++ {
+ ch := byte('0')
+ if j := d.dp + i; 0 <= j && j < d.nd {
+ ch = d.d[j]
+ }
+ dst = append(dst, ch)
+ }
+ }
+
+ return dst
+}
+
+// %b: -ddddddddp+ddd
+func fmtB(dst []byte, neg bool, mant uint64, exp int, flt *floatInfo) []byte {
+ var buf [50]byte
+ w := len(buf)
+ exp -= int(flt.mantbits)
+ esign := byte('+')
+ if exp < 0 {
+ esign = '-'
+ exp = -exp
+ }
+ n := 0
+ for exp > 0 || n < 1 {
+ n++
+ w--
+ buf[w] = byte(exp%10 + '0')
+ exp /= 10
+ }
+ w--
+ buf[w] = esign
+ w--
+ buf[w] = 'p'
+ n = 0
+ for mant > 0 || n < 1 {
+ n++
+ w--
+ buf[w] = byte(mant%10 + '0')
+ mant /= 10
+ }
+ if neg {
+ w--
+ buf[w] = '-'
+ }
+ return append(dst, buf[w:]...)
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/iota.go b/vendor/github.com/pquerna/ffjson/fflib/v1/iota.go
new file mode 100644
index 000000000..3e50f0c41
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/fflib/v1/iota.go
@@ -0,0 +1,161 @@
+/**
+ * Copyright 2014 Paul Querna
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/* Portions of this file are on Go stdlib's strconv/iota.go */
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package v1
+
+import (
+ "io"
+)
+
+const (
+ digits = "0123456789abcdefghijklmnopqrstuvwxyz"
+ digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
+ digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
+)
+
+var shifts = [len(digits) + 1]uint{
+ 1 << 1: 1,
+ 1 << 2: 2,
+ 1 << 3: 3,
+ 1 << 4: 4,
+ 1 << 5: 5,
+}
+
+var smallNumbers = [][]byte{
+ []byte("0"),
+ []byte("1"),
+ []byte("2"),
+ []byte("3"),
+ []byte("4"),
+ []byte("5"),
+ []byte("6"),
+ []byte("7"),
+ []byte("8"),
+ []byte("9"),
+ []byte("10"),
+}
+
+type FormatBitsWriter interface {
+ io.Writer
+ io.ByteWriter
+}
+
+type FormatBitsScratch struct{}
+
+//
+// DEPRECIATED: `scratch` is no longer used, FormatBits2 is available.
+//
+// FormatBits computes the string representation of u in the given base.
+// If neg is set, u is treated as negative int64 value. If append_ is
+// set, the string is appended to dst and the resulting byte slice is
+// returned as the first result value; otherwise the string is returned
+// as the second result value.
+//
+func FormatBits(scratch *FormatBitsScratch, dst FormatBitsWriter, u uint64, base int, neg bool) {
+ FormatBits2(dst, u, base, neg)
+}
+
+// FormatBits2 computes the string representation of u in the given base.
+// If neg is set, u is treated as negative int64 value. If append_ is
+// set, the string is appended to dst and the resulting byte slice is
+// returned as the first result value; otherwise the string is returned
+// as the second result value.
+//
+func FormatBits2(dst FormatBitsWriter, u uint64, base int, neg bool) {
+ if base < 2 || base > len(digits) {
+ panic("strconv: illegal AppendInt/FormatInt base")
+ }
+ // fast path for small common numbers
+ if u <= 10 {
+ if neg {
+ dst.WriteByte('-')
+ }
+ dst.Write(smallNumbers[u])
+ return
+ }
+
+ // 2 <= base && base <= len(digits)
+
+ var a = makeSlice(65)
+ // var a [64 + 1]byte // +1 for sign of 64bit value in base 2
+ i := len(a)
+
+ if neg {
+ u = -u
+ }
+
+ // convert bits
+ if base == 10 {
+ // common case: use constants for / and % because
+ // the compiler can optimize it into a multiply+shift,
+ // and unroll loop
+ for u >= 100 {
+ i -= 2
+ q := u / 100
+ j := uintptr(u - q*100)
+ a[i+1] = digits01[j]
+ a[i+0] = digits10[j]
+ u = q
+ }
+ if u >= 10 {
+ i--
+ q := u / 10
+ a[i] = digits[uintptr(u-q*10)]
+ u = q
+ }
+
+ } else if s := shifts[base]; s > 0 {
+ // base is power of 2: use shifts and masks instead of / and %
+ b := uint64(base)
+ m := uintptr(b) - 1 // == 1<<s - 1
+ for u >= b {
+ i--
+ a[i] = digits[uintptr(u)&m]
+ u >>= s
+ }
+
+ } else {
+ // general case
+ b := uint64(base)
+ for u >= b {
+ i--
+ a[i] = digits[uintptr(u%b)]
+ u /= b
+ }
+ }
+
+ // u < base
+ i--
+ a[i] = digits[uintptr(u)]
+
+ // add sign, if any
+ if neg {
+ i--
+ a[i] = '-'
+ }
+
+ dst.Write(a[i:])
+
+ Pool(a)
+
+ return
+}
diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/jsonstring.go b/vendor/github.com/pquerna/ffjson/fflib/v1/jsonstring.go
new file mode 100644
index 000000000..513b45d57
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/fflib/v1/jsonstring.go
@@ -0,0 +1,512 @@
+/**
+ * Copyright 2014 Paul Querna
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/* Portions of this file are on Go stdlib's encoding/json/encode.go */
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package v1
+
+import (
+ "io"
+ "unicode/utf8"
+ "strconv"
+ "unicode/utf16"
+ "unicode"
+)
+
+const hex = "0123456789abcdef"
+
+type JsonStringWriter interface {
+ io.Writer
+ io.ByteWriter
+ stringWriter
+}
+
+func WriteJsonString(buf JsonStringWriter, s string) {
+ WriteJson(buf, []byte(s))
+}
+
+/**
+ * Function ported from encoding/json: func (e *encodeState) string(s string) (int, error)
+ */
+func WriteJson(buf JsonStringWriter, s []byte) {
+ buf.WriteByte('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ /*
+ if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+ i++
+ continue
+ }
+ */
+ if lt[b] == true {
+ i++
+ continue
+ }
+
+ if start < i {
+ buf.Write(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ buf.WriteByte('\\')
+ buf.WriteByte(b)
+ case '\n':
+ buf.WriteByte('\\')
+ buf.WriteByte('n')
+ case '\r':
+ buf.WriteByte('\\')
+ buf.WriteByte('r')
+ default:
+ // This encodes bytes < 0x20 except for \n and \r,
+ // as well as < and >. The latter are escaped because they
+ // can lead to security holes when user-controlled strings
+ // are rendered into JSON and served to some browsers.
+ buf.WriteString(`\u00`)
+ buf.WriteByte(hex[b>>4])
+ buf.WriteByte(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRune(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ buf.Write(s[start:i])
+ }
+ buf.WriteString(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR.
+ // U+2029 is PARAGRAPH SEPARATOR.
+ // They are both technically valid characters in JSON strings,
+ // but don't work in JSONP, which has to be evaluated as JavaScript,
+ // and can lead to security holes there. It is valid JSON to
+ // escape them, so we do so unconditionally.
+ // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ buf.Write(s[start:i])
+ }
+ buf.WriteString(`\u202`)
+ buf.WriteByte(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ buf.Write(s[start:])
+ }
+ buf.WriteByte('"')
+}
+
+// UnquoteBytes will decode []byte containing json string to go string
+// ported from encoding/json/decode.go
+func UnquoteBytes(s []byte) (t []byte, ok bool) {
+ if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ return
+ }
+ s = s[1 : len(s)-1]
+
+ // Check for unusual characters. If there are none,
+ // then no unquoting is needed, so return a slice of the
+ // original bytes.
+ r := 0
+ for r < len(s) {
+ c := s[r]
+ if c == '\\' || c == '"' || c < ' ' {
+ break
+ }
+ if c < utf8.RuneSelf {
+ r++
+ continue
+ }
+ rr, size := utf8.DecodeRune(s[r:])
+ if rr == utf8.RuneError && size == 1 {
+ break
+ }
+ r += size
+ }
+ if r == len(s) {
+ return s, true
+ }
+
+ b := make([]byte, len(s)+2*utf8.UTFMax)
+ w := copy(b, s[0:r])
+ for r < len(s) {
+ // Out of room? Can only happen if s is full of
+ // malformed UTF-8 and we're replacing each
+ // byte with RuneError.
+ if w >= len(b)-2*utf8.UTFMax {
+ nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+ copy(nb, b[0:w])
+ b = nb
+ }
+ switch c := s[r]; {
+ case c == '\\':
+ r++
+ if r >= len(s) {
+ return
+ }
+ switch s[r] {
+ default:
+ return
+ case '"', '\\', '/', '\'':
+ b[w] = s[r]
+ r++
+ w++
+ case 'b':
+ b[w] = '\b'
+ r++
+ w++
+ case 'f':
+ b[w] = '\f'
+ r++
+ w++
+ case 'n':
+ b[w] = '\n'
+ r++
+ w++
+ case 'r':
+ b[w] = '\r'
+ r++
+ w++
+ case 't':
+ b[w] = '\t'
+ r++
+ w++
+ case 'u':
+ r--
+ rr := getu4(s[r:])
+ if rr < 0 {
+ return
+ }
+ r += 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(s[r:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ // A valid pair; consume.
+ r += 6
+ w += utf8.EncodeRune(b[w:], dec)
+ break
+ }
+ // Invalid surrogate; fall back to replacement rune.
+ rr = unicode.ReplacementChar
+ }
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+
+ // Quote, control characters are invalid.
+ case c == '"', c < ' ':
+ return
+
+ // ASCII
+ case c < utf8.RuneSelf:
+ b[w] = c
+ r++
+ w++
+
+ // Coerce to well-formed UTF-8.
+ default:
+ rr, size := utf8.DecodeRune(s[r:])
+ r += size
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+ }
+ return b[0:w], true
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ return -1
+ }
+ r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
+ if err != nil {
+ return -1
+ }
+ return rune(r)
+}
+
+// TODO(pquerna): consider combining wibth the normal byte mask.
+var lt [256]bool = [256]bool{
+ false, /* 0 */
+ false, /* 1 */
+ false, /* 2 */
+ false, /* 3 */
+ false, /* 4 */
+ false, /* 5 */
+ false, /* 6 */
+ false, /* 7 */
+ false, /* 8 */
+ false, /* 9 */
+ false, /* 10 */
+ false, /* 11 */
+ false, /* 12 */
+ false, /* 13 */
+ false, /* 14 */
+ false, /* 15 */
+ false, /* 16 */
+ false, /* 17 */
+ false, /* 18 */
+ false, /* 19 */
+ false, /* 20 */
+ false, /* 21 */
+ false, /* 22 */
+ false, /* 23 */
+ false, /* 24 */
+ false, /* 25 */
+ false, /* 26 */
+ false, /* 27 */
+ false, /* 28 */
+ false, /* 29 */
+ false, /* 30 */
+ false, /* 31 */
+ true, /* 32 */
+ true, /* 33 */
+ false, /* 34 */
+ true, /* 35 */
+ true, /* 36 */
+ true, /* 37 */
+ false, /* 38 */
+ true, /* 39 */
+ true, /* 40 */
+ true, /* 41 */
+ true, /* 42 */
+ true, /* 43 */
+ true, /* 44 */
+ true, /* 45 */
+ true, /* 46 */
+ true, /* 47 */
+ true, /* 48 */
+ true, /* 49 */
+ true, /* 50 */
+ true, /* 51 */
+ true, /* 52 */
+ true, /* 53 */
+ true, /* 54 */
+ true, /* 55 */
+ true, /* 56 */
+ true, /* 57 */
+ true, /* 58 */
+ true, /* 59 */
+ false, /* 60 */
+ true, /* 61 */
+ false, /* 62 */
+ true, /* 63 */
+ true, /* 64 */
+ true, /* 65 */
+ true, /* 66 */
+ true, /* 67 */
+ true, /* 68 */
+ true, /* 69 */
+ true, /* 70 */
+ true, /* 71 */
+ true, /* 72 */
+ true, /* 73 */
+ true, /* 74 */
+ true, /* 75 */
+ true, /* 76 */
+ true, /* 77 */
+ true, /* 78 */
+ true, /* 79 */
+ true, /* 80 */
+ true, /* 81 */
+ true, /* 82 */
+ true, /* 83 */
+ true, /* 84 */
+ true, /* 85 */
+ true, /* 86 */
+ true, /* 87 */
+ true, /* 88 */
+ true, /* 89 */
+ true, /* 90 */
+ true, /* 91 */
+ false, /* 92 */
+ true, /* 93 */
+ true, /* 94 */
+ true, /* 95 */
+ true, /* 96 */
+ true, /* 97 */
+ true, /* 98 */
+ true, /* 99 */
+ true, /* 100 */
+ true, /* 101 */
+ true, /* 102 */
+ true, /* 103 */
+ true, /* 104 */
+ true, /* 105 */
+ true, /* 106 */
+ true, /* 107 */
+ true, /* 108 */
+ true, /* 109 */
+ true, /* 110 */
+ true, /* 111 */
+ true, /* 112 */
+ true, /* 113 */
+ true, /* 114 */
+ true, /* 115 */
+ true, /* 116 */
+ true, /* 117 */
+ true, /* 118 */
+ true, /* 119 */
+ true, /* 120 */
+ true, /* 121 */
+ true, /* 122 */
+ true, /* 123 */
+ true, /* 124 */
+ true, /* 125 */
+ true, /* 126 */
+ true, /* 127 */
+ true, /* 128 */
+ true, /* 129 */
+ true, /* 130 */
+ true, /* 131 */
+ true, /* 132 */
+ true, /* 133 */
+ true, /* 134 */
+ true, /* 135 */
+ true, /* 136 */
+ true, /* 137 */
+ true, /* 138 */
+ true, /* 139 */
+ true, /* 140 */
+ true, /* 141 */
+ true, /* 142 */
+ true, /* 143 */
+ true, /* 144 */
+ true, /* 145 */
+ true, /* 146 */
+ true, /* 147 */
+ true, /* 148 */
+ true, /* 149 */
+ true, /* 150 */
+ true, /* 151 */
+ true, /* 152 */
+ true, /* 153 */
+ true, /* 154 */
+ true, /* 155 */
+ true, /* 156 */
+ true, /* 157 */
+ true, /* 158 */
+ true, /* 159 */
+ true, /* 160 */
+ true, /* 161 */
+ true, /* 162 */
+ true, /* 163 */
+ true, /* 164 */
+ true, /* 165 */
+ true, /* 166 */
+ true, /* 167 */
+ true, /* 168 */
+ true, /* 169 */
+ true, /* 170 */
+ true, /* 171 */
+ true, /* 172 */
+ true, /* 173 */
+ true, /* 174 */
+ true, /* 175 */
+ true, /* 176 */
+ true, /* 177 */
+ true, /* 178 */
+ true, /* 179 */
+ true, /* 180 */
+ true, /* 181 */
+ true, /* 182 */
+ true, /* 183 */
+ true, /* 184 */
+ true, /* 185 */
+ true, /* 186 */
+ true, /* 187 */
+ true, /* 188 */
+ true, /* 189 */
+ true, /* 190 */
+ true, /* 191 */
+ true, /* 192 */
+ true, /* 193 */
+ true, /* 194 */
+ true, /* 195 */
+ true, /* 196 */
+ true, /* 197 */
+ true, /* 198 */
+ true, /* 199 */
+ true, /* 200 */
+ true, /* 201 */
+ true, /* 202 */
+ true, /* 203 */
+ true, /* 204 */
+ true, /* 205 */
+ true, /* 206 */
+ true, /* 207 */
+ true, /* 208 */
+ true, /* 209 */
+ true, /* 210 */
+ true, /* 211 */
+ true, /* 212 */
+ true, /* 213 */
+ true, /* 214 */
+ true, /* 215 */
+ true, /* 216 */
+ true, /* 217 */
+ true, /* 218 */
+ true, /* 219 */
+ true, /* 220 */
+ true, /* 221 */
+ true, /* 222 */
+ true, /* 223 */
+ true, /* 224 */
+ true, /* 225 */
+ true, /* 226 */
+ true, /* 227 */
+ true, /* 228 */
+ true, /* 229 */
+ true, /* 230 */
+ true, /* 231 */
+ true, /* 232 */
+ true, /* 233 */
+ true, /* 234 */
+ true, /* 235 */
+ true, /* 236 */
+ true, /* 237 */
+ true, /* 238 */
+ true, /* 239 */
+ true, /* 240 */
+ true, /* 241 */
+ true, /* 242 */
+ true, /* 243 */
+ true, /* 244 */
+ true, /* 245 */
+ true, /* 246 */
+ true, /* 247 */
+ true, /* 248 */
+ true, /* 249 */
+ true, /* 250 */
+ true, /* 251 */
+ true, /* 252 */
+ true, /* 253 */
+ true, /* 254 */
+ true, /* 255 */
+}
diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/lexer.go b/vendor/github.com/pquerna/ffjson/fflib/v1/lexer.go
new file mode 100644
index 000000000..8ffd54be5
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/fflib/v1/lexer.go
@@ -0,0 +1,937 @@
+/**
+ * Copyright 2014 Paul Querna
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/* Portions of this file are on derived from yajl: <https://github.com/lloyd/yajl> */
+/*
+ * Copyright (c) 2007-2014, Lloyd Hilaiel <me@lloyd.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package v1
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+type FFParseState int
+
+const (
+ FFParse_map_start FFParseState = iota
+ FFParse_want_key
+ FFParse_want_colon
+ FFParse_want_value
+ FFParse_after_value
+)
+
+type FFTok int
+
+const (
+ FFTok_init FFTok = iota
+ FFTok_bool FFTok = iota
+ FFTok_colon FFTok = iota
+ FFTok_comma FFTok = iota
+ FFTok_eof FFTok = iota
+ FFTok_error FFTok = iota
+ FFTok_left_brace FFTok = iota
+ FFTok_left_bracket FFTok = iota
+ FFTok_null FFTok = iota
+ FFTok_right_brace FFTok = iota
+ FFTok_right_bracket FFTok = iota
+
+ /* we differentiate between integers and doubles to allow the
+ * parser to interpret the number without re-scanning */
+ FFTok_integer FFTok = iota
+ FFTok_double FFTok = iota
+
+ FFTok_string FFTok = iota
+
+ /* comment tokens are not currently returned to the parser, ever */
+ FFTok_comment FFTok = iota
+)
+
+type FFErr int
+
+const (
+ FFErr_e_ok FFErr = iota
+ FFErr_io FFErr = iota
+ FFErr_string_invalid_utf8 FFErr = iota
+ FFErr_string_invalid_escaped_char FFErr = iota
+ FFErr_string_invalid_json_char FFErr = iota
+ FFErr_string_invalid_hex_char FFErr = iota
+ FFErr_invalid_char FFErr = iota
+ FFErr_invalid_string FFErr = iota
+ FFErr_missing_integer_after_decimal FFErr = iota
+ FFErr_missing_integer_after_exponent FFErr = iota
+ FFErr_missing_integer_after_minus FFErr = iota
+ FFErr_unallowed_comment FFErr = iota
+ FFErr_incomplete_comment FFErr = iota
+ FFErr_unexpected_token_type FFErr = iota // TODO: improve this error
+)
+
+type FFLexer struct {
+ reader *ffReader
+ Output DecodingBuffer
+ Token FFTok
+ Error FFErr
+ BigError error
+ // TODO: convert all of this to an interface
+ lastCurrentChar int
+ captureAll bool
+ buf Buffer
+}
+
+func NewFFLexer(input []byte) *FFLexer {
+ fl := &FFLexer{
+ Token: FFTok_init,
+ Error: FFErr_e_ok,
+ reader: newffReader(input),
+ Output: &Buffer{},
+ }
+ // TODO: guess size?
+ //fl.Output.Grow(64)
+ return fl
+}
+
+type LexerError struct {
+ offset int
+ line int
+ char int
+ err error
+}
+
+// Reset the Lexer and add new input.
+func (ffl *FFLexer) Reset(input []byte) {
+ ffl.Token = FFTok_init
+ ffl.Error = FFErr_e_ok
+ ffl.BigError = nil
+ ffl.reader.Reset(input)
+ ffl.lastCurrentChar = 0
+ ffl.Output.Reset()
+}
+
+func (le *LexerError) Error() string {
+ return fmt.Sprintf(`ffjson error: (%T)%s offset=%d line=%d char=%d`,
+ le.err, le.err.Error(),
+ le.offset, le.line, le.char)
+}
+
+func (ffl *FFLexer) WrapErr(err error) error {
+ line, char := ffl.reader.PosWithLine()
+ // TOOD: calcualte lines/characters based on offset
+ return &LexerError{
+ offset: ffl.reader.Pos(),
+ line: line,
+ char: char,
+ err: err,
+ }
+}
+
+func (ffl *FFLexer) scanReadByte() (byte, error) {
+ var c byte
+ var err error
+ if ffl.captureAll {
+ c, err = ffl.reader.ReadByte()
+ } else {
+ c, err = ffl.reader.ReadByteNoWS()
+ }
+
+ if err != nil {
+ ffl.Error = FFErr_io
+ ffl.BigError = err
+ return 0, err
+ }
+
+ return c, nil
+}
+
+func (ffl *FFLexer) readByte() (byte, error) {
+
+ c, err := ffl.reader.ReadByte()
+ if err != nil {
+ ffl.Error = FFErr_io
+ ffl.BigError = err
+ return 0, err
+ }
+
+ return c, nil
+}
+
+func (ffl *FFLexer) unreadByte() {
+ ffl.reader.UnreadByte()
+}
+
+func (ffl *FFLexer) wantBytes(want []byte, iftrue FFTok) FFTok {
+ startPos := ffl.reader.Pos()
+ for _, b := range want {
+ c, err := ffl.readByte()
+
+ if err != nil {
+ return FFTok_error
+ }
+
+ if c != b {
+ ffl.unreadByte()
+ // fmt.Printf("wanted bytes: %s\n", string(want))
+ // TODO(pquerna): thsi is a bad error message
+ ffl.Error = FFErr_invalid_string
+ return FFTok_error
+ }
+ }
+
+ endPos := ffl.reader.Pos()
+ ffl.Output.Write(ffl.reader.Slice(startPos, endPos))
+ return iftrue
+}
+
+func (ffl *FFLexer) lexComment() FFTok {
+ c, err := ffl.readByte()
+ if err != nil {
+ return FFTok_error
+ }
+
+ if c == '/' {
+ // a // comment, scan until line ends.
+ for {
+ c, err := ffl.readByte()
+ if err != nil {
+ return FFTok_error
+ }
+
+ if c == '\n' {
+ return FFTok_comment
+ }
+ }
+ } else if c == '*' {
+ // a /* */ comment, scan */
+ for {
+ c, err := ffl.readByte()
+ if err != nil {
+ return FFTok_error
+ }
+
+ if c == '*' {
+ c, err := ffl.readByte()
+
+ if err != nil {
+ return FFTok_error
+ }
+
+ if c == '/' {
+ return FFTok_comment
+ }
+
+ ffl.Error = FFErr_incomplete_comment
+ return FFTok_error
+ }
+ }
+ } else {
+ ffl.Error = FFErr_incomplete_comment
+ return FFTok_error
+ }
+}
+
+func (ffl *FFLexer) lexString() FFTok {
+ if ffl.captureAll {
+ ffl.buf.Reset()
+ err := ffl.reader.SliceString(&ffl.buf)
+
+ if err != nil {
+ ffl.BigError = err
+ return FFTok_error
+ }
+
+ WriteJson(ffl.Output, ffl.buf.Bytes())
+
+ return FFTok_string
+ } else {
+ err := ffl.reader.SliceString(ffl.Output)
+
+ if err != nil {
+ ffl.BigError = err
+ return FFTok_error
+ }
+
+ return FFTok_string
+ }
+}
+
+func (ffl *FFLexer) lexNumber() FFTok {
+ var numRead int = 0
+ tok := FFTok_integer
+ startPos := ffl.reader.Pos()
+
+ c, err := ffl.readByte()
+ if err != nil {
+ return FFTok_error
+ }
+
+ /* optional leading minus */
+ if c == '-' {
+ c, err = ffl.readByte()
+ if err != nil {
+ return FFTok_error
+ }
+ }
+
+ /* a single zero, or a series of integers */
+ if c == '0' {
+ c, err = ffl.readByte()
+ if err != nil {
+ return FFTok_error
+ }
+ } else if c >= '1' && c <= '9' {
+ for c >= '0' && c <= '9' {
+ c, err = ffl.readByte()
+ if err != nil {
+ return FFTok_error
+ }
+ }
+ } else {
+ ffl.unreadByte()
+ ffl.Error = FFErr_missing_integer_after_minus
+ return FFTok_error
+ }
+
+ if c == '.' {
+ numRead = 0
+ c, err = ffl.readByte()
+ if err != nil {
+ return FFTok_error
+ }
+
+ for c >= '0' && c <= '9' {
+ numRead++
+ c, err = ffl.readByte()
+ if err != nil {
+ return FFTok_error
+ }
+ }
+
+ if numRead == 0 {
+ ffl.unreadByte()
+
+ ffl.Error = FFErr_missing_integer_after_decimal
+ return FFTok_error
+ }
+
+ tok = FFTok_double
+ }
+
+ /* optional exponent (indicates this is floating point) */
+ if c == 'e' || c == 'E' {
+ numRead = 0
+ c, err = ffl.readByte()
+ if err != nil {
+ return FFTok_error
+ }
+
+ /* optional sign */
+ if c == '+' || c == '-' {
+ c, err = ffl.readByte()
+ if err != nil {
+ return FFTok_error
+ }
+ }
+
+ for c >= '0' && c <= '9' {
+ numRead++
+ c, err = ffl.readByte()
+ if err != nil {
+ return FFTok_error
+ }
+ }
+
+ if numRead == 0 {
+ ffl.Error = FFErr_missing_integer_after_exponent
+ return FFTok_error
+ }
+
+ tok = FFTok_double
+ }
+
+ ffl.unreadByte()
+
+ endPos := ffl.reader.Pos()
+ ffl.Output.Write(ffl.reader.Slice(startPos, endPos))
+ return tok
+}
+
+var true_bytes = []byte{'r', 'u', 'e'}
+var false_bytes = []byte{'a', 'l', 's', 'e'}
+var null_bytes = []byte{'u', 'l', 'l'}
+
+func (ffl *FFLexer) Scan() FFTok {
+ tok := FFTok_error
+ if ffl.captureAll == false {
+ ffl.Output.Reset()
+ }
+ ffl.Token = FFTok_init
+
+ for {
+ c, err := ffl.scanReadByte()
+ if err != nil {
+ if err == io.EOF {
+ return FFTok_eof
+ } else {
+ return FFTok_error
+ }
+ }
+
+ switch c {
+ case '{':
+ tok = FFTok_left_bracket
+ if ffl.captureAll {
+ ffl.Output.WriteByte('{')
+ }
+ goto lexed
+ case '}':
+ tok = FFTok_right_bracket
+ if ffl.captureAll {
+ ffl.Output.WriteByte('}')
+ }
+ goto lexed
+ case '[':
+ tok = FFTok_left_brace
+ if ffl.captureAll {
+ ffl.Output.WriteByte('[')
+ }
+ goto lexed
+ case ']':
+ tok = FFTok_right_brace
+ if ffl.captureAll {
+ ffl.Output.WriteByte(']')
+ }
+ goto lexed
+ case ',':
+ tok = FFTok_comma
+ if ffl.captureAll {
+ ffl.Output.WriteByte(',')
+ }
+ goto lexed
+ case ':':
+ tok = FFTok_colon
+ if ffl.captureAll {
+ ffl.Output.WriteByte(':')
+ }
+ goto lexed
+ case '\t', '\n', '\v', '\f', '\r', ' ':
+ if ffl.captureAll {
+ ffl.Output.WriteByte(c)
+ }
+ break
+ case 't':
+ ffl.Output.WriteByte('t')
+ tok = ffl.wantBytes(true_bytes, FFTok_bool)
+ goto lexed
+ case 'f':
+ ffl.Output.WriteByte('f')
+ tok = ffl.wantBytes(false_bytes, FFTok_bool)
+ goto lexed
+ case 'n':
+ ffl.Output.WriteByte('n')
+ tok = ffl.wantBytes(null_bytes, FFTok_null)
+ goto lexed
+ case '"':
+ tok = ffl.lexString()
+ goto lexed
+ case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ ffl.unreadByte()
+ tok = ffl.lexNumber()
+ goto lexed
+ case '/':
+ tok = ffl.lexComment()
+ goto lexed
+ default:
+ tok = FFTok_error
+ ffl.Error = FFErr_invalid_char
+ }
+ }
+
+lexed:
+ ffl.Token = tok
+ return tok
+}
+
+func (ffl *FFLexer) scanField(start FFTok, capture bool) ([]byte, error) {
+ switch start {
+ case FFTok_left_brace,
+ FFTok_left_bracket:
+ {
+ end := FFTok_right_brace
+ if start == FFTok_left_bracket {
+ end = FFTok_right_bracket
+ if capture {
+ ffl.Output.WriteByte('{')
+ }
+ } else {
+ if capture {
+ ffl.Output.WriteByte('[')
+ }
+ }
+
+ depth := 1
+ if capture {
+ ffl.captureAll = true
+ }
+ // TODO: work.
+ scanloop:
+ for {
+ tok := ffl.Scan()
+ //fmt.Printf("capture-token: %v end: %v depth: %v\n", tok, end, depth)
+ switch tok {
+ case FFTok_eof:
+ return nil, errors.New("ffjson: unexpected EOF")
+ case FFTok_error:
+ if ffl.BigError != nil {
+ return nil, ffl.BigError
+ }
+ return nil, ffl.Error.ToError()
+ case end:
+ depth--
+ if depth == 0 {
+ break scanloop
+ }
+ case start:
+ depth++
+ }
+ }
+
+ if capture {
+ ffl.captureAll = false
+ }
+
+ if capture {
+ return ffl.Output.Bytes(), nil
+ } else {
+ return nil, nil
+ }
+ }
+ case FFTok_bool,
+ FFTok_integer,
+ FFTok_null,
+ FFTok_double:
+ // simple value, return it.
+ if capture {
+ return ffl.Output.Bytes(), nil
+ } else {
+ return nil, nil
+ }
+
+ case FFTok_string:
+ //TODO(pquerna): so, other users expect this to be a quoted string :(
+ if capture {
+ ffl.buf.Reset()
+ WriteJson(&ffl.buf, ffl.Output.Bytes())
+ return ffl.buf.Bytes(), nil
+ } else {
+ return nil, nil
+ }
+ }
+
+ return nil, fmt.Errorf("ffjson: invalid capture type: %v", start)
+}
+
+// Captures an entire field value, including recursive objects,
+// and converts them to a []byte suitable to pass to a sub-object's
+// UnmarshalJSON
+func (ffl *FFLexer) CaptureField(start FFTok) ([]byte, error) {
+ return ffl.scanField(start, true)
+}
+
+func (ffl *FFLexer) SkipField(start FFTok) error {
+ _, err := ffl.scanField(start, false)
+ return err
+}
+
+// TODO(pquerna): return line number and offset.
+func (err FFErr) ToError() error {
+ switch err {
+ case FFErr_e_ok:
+ return nil
+ case FFErr_io:
+ return errors.New("ffjson: IO error")
+ case FFErr_string_invalid_utf8:
+ return errors.New("ffjson: string with invalid UTF-8 sequence")
+ case FFErr_string_invalid_escaped_char:
+ return errors.New("ffjson: string with invalid escaped character")
+ case FFErr_string_invalid_json_char:
+ return errors.New("ffjson: string with invalid JSON character")
+ case FFErr_string_invalid_hex_char:
+ return errors.New("ffjson: string with invalid hex character")
+ case FFErr_invalid_char:
+ return errors.New("ffjson: invalid character")
+ case FFErr_invalid_string:
+ return errors.New("ffjson: invalid string")
+ case FFErr_missing_integer_after_decimal:
+ return errors.New("ffjson: missing integer after decimal")
+ case FFErr_missing_integer_after_exponent:
+ return errors.New("ffjson: missing integer after exponent")
+ case FFErr_missing_integer_after_minus:
+ return errors.New("ffjson: missing integer after minus")
+ case FFErr_unallowed_comment:
+ return errors.New("ffjson: unallowed comment")
+ case FFErr_incomplete_comment:
+ return errors.New("ffjson: incomplete comment")
+ case FFErr_unexpected_token_type:
+ return errors.New("ffjson: unexpected token sequence")
+ }
+
+ panic(fmt.Sprintf("unknown error type: %v ", err))
+}
+
+func (state FFParseState) String() string {
+ switch state {
+ case FFParse_map_start:
+ return "map:start"
+ case FFParse_want_key:
+ return "want_key"
+ case FFParse_want_colon:
+ return "want_colon"
+ case FFParse_want_value:
+ return "want_value"
+ case FFParse_after_value:
+ return "after_value"
+ }
+
+ panic(fmt.Sprintf("unknown parse state: %d", int(state)))
+}
+
+func (tok FFTok) String() string {
+ switch tok {
+ case FFTok_init:
+ return "tok:init"
+ case FFTok_bool:
+ return "tok:bool"
+ case FFTok_colon:
+ return "tok:colon"
+ case FFTok_comma:
+ return "tok:comma"
+ case FFTok_eof:
+ return "tok:eof"
+ case FFTok_error:
+ return "tok:error"
+ case FFTok_left_brace:
+ return "tok:left_brace"
+ case FFTok_left_bracket:
+ return "tok:left_bracket"
+ case FFTok_null:
+ return "tok:null"
+ case FFTok_right_brace:
+ return "tok:right_brace"
+ case FFTok_right_bracket:
+ return "tok:right_bracket"
+ case FFTok_integer:
+ return "tok:integer"
+ case FFTok_double:
+ return "tok:double"
+ case FFTok_string:
+ return "tok:string"
+ case FFTok_comment:
+ return "comment"
+ }
+
+ panic(fmt.Sprintf("unknown token: %d", int(tok)))
+}
+
+/* a lookup table which lets us quickly determine three things:
+ * cVEC - valid escaped control char
+ * note. the solidus '/' may be escaped or not.
+ * cIJC - invalid json char
+ * cVHC - valid hex char
+ * cNFP - needs further processing (from a string scanning perspective)
+ * cNUC - needs utf8 checking when enabled (from a string scanning perspective)
+ */
+
+const (
+ cVEC int8 = 0x01
+ cIJC int8 = 0x02
+ cVHC int8 = 0x04
+ cNFP int8 = 0x08
+ cNUC int8 = 0x10
+)
+
+var byteLookupTable [256]int8 = [256]int8{
+ cIJC, /* 0 */
+ cIJC, /* 1 */
+ cIJC, /* 2 */
+ cIJC, /* 3 */
+ cIJC, /* 4 */
+ cIJC, /* 5 */
+ cIJC, /* 6 */
+ cIJC, /* 7 */
+ cIJC, /* 8 */
+ cIJC, /* 9 */
+ cIJC, /* 10 */
+ cIJC, /* 11 */
+ cIJC, /* 12 */
+ cIJC, /* 13 */
+ cIJC, /* 14 */
+ cIJC, /* 15 */
+ cIJC, /* 16 */
+ cIJC, /* 17 */
+ cIJC, /* 18 */
+ cIJC, /* 19 */
+ cIJC, /* 20 */
+ cIJC, /* 21 */
+ cIJC, /* 22 */
+ cIJC, /* 23 */
+ cIJC, /* 24 */
+ cIJC, /* 25 */
+ cIJC, /* 26 */
+ cIJC, /* 27 */
+ cIJC, /* 28 */
+ cIJC, /* 29 */
+ cIJC, /* 30 */
+ cIJC, /* 31 */
+ 0, /* 32 */
+ 0, /* 33 */
+ cVEC | cIJC | cNFP, /* 34 */
+ 0, /* 35 */
+ 0, /* 36 */
+ 0, /* 37 */
+ 0, /* 38 */
+ 0, /* 39 */
+ 0, /* 40 */
+ 0, /* 41 */
+ 0, /* 42 */
+ 0, /* 43 */
+ 0, /* 44 */
+ 0, /* 45 */
+ 0, /* 46 */
+ cVEC, /* 47 */
+ cVHC, /* 48 */
+ cVHC, /* 49 */
+ cVHC, /* 50 */
+ cVHC, /* 51 */
+ cVHC, /* 52 */
+ cVHC, /* 53 */
+ cVHC, /* 54 */
+ cVHC, /* 55 */
+ cVHC, /* 56 */
+ cVHC, /* 57 */
+ 0, /* 58 */
+ 0, /* 59 */
+ 0, /* 60 */
+ 0, /* 61 */
+ 0, /* 62 */
+ 0, /* 63 */
+ 0, /* 64 */
+ cVHC, /* 65 */
+ cVHC, /* 66 */
+ cVHC, /* 67 */
+ cVHC, /* 68 */
+ cVHC, /* 69 */
+ cVHC, /* 70 */
+ 0, /* 71 */
+ 0, /* 72 */
+ 0, /* 73 */
+ 0, /* 74 */
+ 0, /* 75 */
+ 0, /* 76 */
+ 0, /* 77 */
+ 0, /* 78 */
+ 0, /* 79 */
+ 0, /* 80 */
+ 0, /* 81 */
+ 0, /* 82 */
+ 0, /* 83 */
+ 0, /* 84 */
+ 0, /* 85 */
+ 0, /* 86 */
+ 0, /* 87 */
+ 0, /* 88 */
+ 0, /* 89 */
+ 0, /* 90 */
+ 0, /* 91 */
+ cVEC | cIJC | cNFP, /* 92 */
+ 0, /* 93 */
+ 0, /* 94 */
+ 0, /* 95 */
+ 0, /* 96 */
+ cVHC, /* 97 */
+ cVEC | cVHC, /* 98 */
+ cVHC, /* 99 */
+ cVHC, /* 100 */
+ cVHC, /* 101 */
+ cVEC | cVHC, /* 102 */
+ 0, /* 103 */
+ 0, /* 104 */
+ 0, /* 105 */
+ 0, /* 106 */
+ 0, /* 107 */
+ 0, /* 108 */
+ 0, /* 109 */
+ cVEC, /* 110 */
+ 0, /* 111 */
+ 0, /* 112 */
+ 0, /* 113 */
+ cVEC, /* 114 */
+ 0, /* 115 */
+ cVEC, /* 116 */
+ 0, /* 117 */
+ 0, /* 118 */
+ 0, /* 119 */
+ 0, /* 120 */
+ 0, /* 121 */
+ 0, /* 122 */
+ 0, /* 123 */
+ 0, /* 124 */
+ 0, /* 125 */
+ 0, /* 126 */
+ 0, /* 127 */
+ cNUC, /* 128 */
+ cNUC, /* 129 */
+ cNUC, /* 130 */
+ cNUC, /* 131 */
+ cNUC, /* 132 */
+ cNUC, /* 133 */
+ cNUC, /* 134 */
+ cNUC, /* 135 */
+ cNUC, /* 136 */
+ cNUC, /* 137 */
+ cNUC, /* 138 */
+ cNUC, /* 139 */
+ cNUC, /* 140 */
+ cNUC, /* 141 */
+ cNUC, /* 142 */
+ cNUC, /* 143 */
+ cNUC, /* 144 */
+ cNUC, /* 145 */
+ cNUC, /* 146 */
+ cNUC, /* 147 */
+ cNUC, /* 148 */
+ cNUC, /* 149 */
+ cNUC, /* 150 */
+ cNUC, /* 151 */
+ cNUC, /* 152 */
+ cNUC, /* 153 */
+ cNUC, /* 154 */
+ cNUC, /* 155 */
+ cNUC, /* 156 */
+ cNUC, /* 157 */
+ cNUC, /* 158 */
+ cNUC, /* 159 */
+ cNUC, /* 160 */
+ cNUC, /* 161 */
+ cNUC, /* 162 */
+ cNUC, /* 163 */
+ cNUC, /* 164 */
+ cNUC, /* 165 */
+ cNUC, /* 166 */
+ cNUC, /* 167 */
+ cNUC, /* 168 */
+ cNUC, /* 169 */
+ cNUC, /* 170 */
+ cNUC, /* 171 */
+ cNUC, /* 172 */
+ cNUC, /* 173 */
+ cNUC, /* 174 */
+ cNUC, /* 175 */
+ cNUC, /* 176 */
+ cNUC, /* 177 */
+ cNUC, /* 178 */
+ cNUC, /* 179 */
+ cNUC, /* 180 */
+ cNUC, /* 181 */
+ cNUC, /* 182 */
+ cNUC, /* 183 */
+ cNUC, /* 184 */
+ cNUC, /* 185 */
+ cNUC, /* 186 */
+ cNUC, /* 187 */
+ cNUC, /* 188 */
+ cNUC, /* 189 */
+ cNUC, /* 190 */
+ cNUC, /* 191 */
+ cNUC, /* 192 */
+ cNUC, /* 193 */
+ cNUC, /* 194 */
+ cNUC, /* 195 */
+ cNUC, /* 196 */
+ cNUC, /* 197 */
+ cNUC, /* 198 */
+ cNUC, /* 199 */
+ cNUC, /* 200 */
+ cNUC, /* 201 */
+ cNUC, /* 202 */
+ cNUC, /* 203 */
+ cNUC, /* 204 */
+ cNUC, /* 205 */
+ cNUC, /* 206 */
+ cNUC, /* 207 */
+ cNUC, /* 208 */
+ cNUC, /* 209 */
+ cNUC, /* 210 */
+ cNUC, /* 211 */
+ cNUC, /* 212 */
+ cNUC, /* 213 */
+ cNUC, /* 214 */
+ cNUC, /* 215 */
+ cNUC, /* 216 */
+ cNUC, /* 217 */
+ cNUC, /* 218 */
+ cNUC, /* 219 */
+ cNUC, /* 220 */
+ cNUC, /* 221 */
+ cNUC, /* 222 */
+ cNUC, /* 223 */
+ cNUC, /* 224 */
+ cNUC, /* 225 */
+ cNUC, /* 226 */
+ cNUC, /* 227 */
+ cNUC, /* 228 */
+ cNUC, /* 229 */
+ cNUC, /* 230 */
+ cNUC, /* 231 */
+ cNUC, /* 232 */
+ cNUC, /* 233 */
+ cNUC, /* 234 */
+ cNUC, /* 235 */
+ cNUC, /* 236 */
+ cNUC, /* 237 */
+ cNUC, /* 238 */
+ cNUC, /* 239 */
+ cNUC, /* 240 */
+ cNUC, /* 241 */
+ cNUC, /* 242 */
+ cNUC, /* 243 */
+ cNUC, /* 244 */
+ cNUC, /* 245 */
+ cNUC, /* 246 */
+ cNUC, /* 247 */
+ cNUC, /* 248 */
+ cNUC, /* 249 */
+ cNUC, /* 250 */
+ cNUC, /* 251 */
+ cNUC, /* 252 */
+ cNUC, /* 253 */
+ cNUC, /* 254 */
+ cNUC, /* 255 */
+}
diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go b/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go
new file mode 100644
index 000000000..0f22c469d
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go
@@ -0,0 +1,512 @@
+/**
+ * Copyright 2014 Paul Querna
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package v1
+
+import (
+ "fmt"
+ "io"
+ "unicode"
+ "unicode/utf16"
+)
+
+const sliceStringMask = cIJC | cNFP
+
+type ffReader struct {
+ s []byte
+ i int
+ l int
+}
+
+func newffReader(d []byte) *ffReader {
+ return &ffReader{
+ s: d,
+ i: 0,
+ l: len(d),
+ }
+}
+
+func (r *ffReader) Slice(start, stop int) []byte {
+ return r.s[start:stop]
+}
+
+func (r *ffReader) Pos() int {
+ return r.i
+}
+
+// Reset the reader, and add new input.
+func (r *ffReader) Reset(d []byte) {
+ r.s = d
+ r.i = 0
+ r.l = len(d)
+}
+
+// Calcuates the Position with line and line offset,
+// because this isn't counted for performance reasons,
+// it will iterate the buffer from the beginning, and should
+// only be used in error-paths.
+func (r *ffReader) PosWithLine() (int, int) {
+ currentLine := 1
+ currentChar := 0
+
+ for i := 0; i < r.i; i++ {
+ c := r.s[i]
+ currentChar++
+ if c == '\n' {
+ currentLine++
+ currentChar = 0
+ }
+ }
+
+ return currentLine, currentChar
+}
+
+func (r *ffReader) ReadByteNoWS() (byte, error) {
+ if r.i >= r.l {
+ return 0, io.EOF
+ }
+
+ j := r.i
+
+ for {
+ c := r.s[j]
+ j++
+
+ // inline whitespace parsing gives another ~8% performance boost
+ // for many kinds of nicely indented JSON.
+ // ... and using a [255]bool instead of multiple ifs, gives another 2%
+ /*
+ if c != '\t' &&
+ c != '\n' &&
+ c != '\v' &&
+ c != '\f' &&
+ c != '\r' &&
+ c != ' ' {
+ r.i = j
+ return c, nil
+ }
+ */
+ if whitespaceLookupTable[c] == false {
+ r.i = j
+ return c, nil
+ }
+
+ if j >= r.l {
+ return 0, io.EOF
+ }
+ }
+}
+
+func (r *ffReader) ReadByte() (byte, error) {
+ if r.i >= r.l {
+ return 0, io.EOF
+ }
+
+ r.i++
+
+ return r.s[r.i-1], nil
+}
+
+func (r *ffReader) UnreadByte() error {
+ if r.i <= 0 {
+ panic("ffReader.UnreadByte: at beginning of slice")
+ }
+ r.i--
+ return nil
+}
+
+func (r *ffReader) readU4(j int) (rune, error) {
+
+ var u4 [4]byte
+ for i := 0; i < 4; i++ {
+ if j >= r.l {
+ return -1, io.EOF
+ }
+ c := r.s[j]
+ if byteLookupTable[c]&cVHC != 0 {
+ u4[i] = c
+ j++
+ continue
+ } else {
+ // TODO(pquerna): handle errors better. layering violation.
+ return -1, fmt.Errorf("lex_string_invalid_hex_char: %v %v", c, string(u4[:]))
+ }
+ }
+
+ // TODO(pquerna): utf16.IsSurrogate
+ rr, err := ParseUint(u4[:], 16, 64)
+ if err != nil {
+ return -1, err
+ }
+ return rune(rr), nil
+}
+
+func (r *ffReader) handleEscaped(c byte, j int, out DecodingBuffer) (int, error) {
+ if j >= r.l {
+ return 0, io.EOF
+ }
+
+ c = r.s[j]
+ j++
+
+ if c == 'u' {
+ ru, err := r.readU4(j)
+ if err != nil {
+ return 0, err
+ }
+
+ if utf16.IsSurrogate(ru) {
+ ru2, err := r.readU4(j + 6)
+ if err != nil {
+ return 0, err
+ }
+ out.Write(r.s[r.i : j-2])
+ r.i = j + 10
+ j = r.i
+ rval := utf16.DecodeRune(ru, ru2)
+ if rval != unicode.ReplacementChar {
+ out.WriteRune(rval)
+ } else {
+ return 0, fmt.Errorf("lex_string_invalid_unicode_surrogate: %v %v", ru, ru2)
+ }
+ } else {
+ out.Write(r.s[r.i : j-2])
+ r.i = j + 4
+ j = r.i
+ out.WriteRune(ru)
+ }
+ return j, nil
+ } else if byteLookupTable[c]&cVEC == 0 {
+ return 0, fmt.Errorf("lex_string_invalid_escaped_char: %v", c)
+ } else {
+ out.Write(r.s[r.i : j-2])
+ r.i = j
+ j = r.i
+
+ switch c {
+ case '"':
+ out.WriteByte('"')
+ case '\\':
+ out.WriteByte('\\')
+ case '/':
+ out.WriteByte('/')
+ case 'b':
+ out.WriteByte('\b')
+ case 'f':
+ out.WriteByte('\f')
+ case 'n':
+ out.WriteByte('\n')
+ case 'r':
+ out.WriteByte('\r')
+ case 't':
+ out.WriteByte('\t')
+ }
+ }
+
+ return j, nil
+}
+
+func (r *ffReader) SliceString(out DecodingBuffer) error {
+ var c byte
+ // TODO(pquerna): string_with_escapes? de-escape here?
+ j := r.i
+
+ for {
+ if j >= r.l {
+ return io.EOF
+ }
+
+ j, c = scanString(r.s, j)
+
+ if c == '"' {
+ if j != r.i {
+ out.Write(r.s[r.i : j-1])
+ r.i = j
+ }
+ return nil
+ } else if c == '\\' {
+ var err error
+ j, err = r.handleEscaped(c, j, out)
+ if err != nil {
+ return err
+ }
+ } else if byteLookupTable[c]&cIJC != 0 {
+ return fmt.Errorf("lex_string_invalid_json_char: %v", c)
+ }
+ continue
+ }
+}
+
+// TODO(pquerna): consider combining wibth the normal byte mask.
+var whitespaceLookupTable [256]bool = [256]bool{
+ false, /* 0 */
+ false, /* 1 */
+ false, /* 2 */
+ false, /* 3 */
+ false, /* 4 */
+ false, /* 5 */
+ false, /* 6 */
+ false, /* 7 */
+ false, /* 8 */
+ true, /* 9 */
+ true, /* 10 */
+ true, /* 11 */
+ true, /* 12 */
+ true, /* 13 */
+ false, /* 14 */
+ false, /* 15 */
+ false, /* 16 */
+ false, /* 17 */
+ false, /* 18 */
+ false, /* 19 */
+ false, /* 20 */
+ false, /* 21 */
+ false, /* 22 */
+ false, /* 23 */
+ false, /* 24 */
+ false, /* 25 */
+ false, /* 26 */
+ false, /* 27 */
+ false, /* 28 */
+ false, /* 29 */
+ false, /* 30 */
+ false, /* 31 */
+ true, /* 32 */
+ false, /* 33 */
+ false, /* 34 */
+ false, /* 35 */
+ false, /* 36 */
+ false, /* 37 */
+ false, /* 38 */
+ false, /* 39 */
+ false, /* 40 */
+ false, /* 41 */
+ false, /* 42 */
+ false, /* 43 */
+ false, /* 44 */
+ false, /* 45 */
+ false, /* 46 */
+ false, /* 47 */
+ false, /* 48 */
+ false, /* 49 */
+ false, /* 50 */
+ false, /* 51 */
+ false, /* 52 */
+ false, /* 53 */
+ false, /* 54 */
+ false, /* 55 */
+ false, /* 56 */
+ false, /* 57 */
+ false, /* 58 */
+ false, /* 59 */
+ false, /* 60 */
+ false, /* 61 */
+ false, /* 62 */
+ false, /* 63 */
+ false, /* 64 */
+ false, /* 65 */
+ false, /* 66 */
+ false, /* 67 */
+ false, /* 68 */
+ false, /* 69 */
+ false, /* 70 */
+ false, /* 71 */
+ false, /* 72 */
+ false, /* 73 */
+ false, /* 74 */
+ false, /* 75 */
+ false, /* 76 */
+ false, /* 77 */
+ false, /* 78 */
+ false, /* 79 */
+ false, /* 80 */
+ false, /* 81 */
+ false, /* 82 */
+ false, /* 83 */
+ false, /* 84 */
+ false, /* 85 */
+ false, /* 86 */
+ false, /* 87 */
+ false, /* 88 */
+ false, /* 89 */
+ false, /* 90 */
+ false, /* 91 */
+ false, /* 92 */
+ false, /* 93 */
+ false, /* 94 */
+ false, /* 95 */
+ false, /* 96 */
+ false, /* 97 */
+ false, /* 98 */
+ false, /* 99 */
+ false, /* 100 */
+ false, /* 101 */
+ false, /* 102 */
+ false, /* 103 */
+ false, /* 104 */
+ false, /* 105 */
+ false, /* 106 */
+ false, /* 107 */
+ false, /* 108 */
+ false, /* 109 */
+ false, /* 110 */
+ false, /* 111 */
+ false, /* 112 */
+ false, /* 113 */
+ false, /* 114 */
+ false, /* 115 */
+ false, /* 116 */
+ false, /* 117 */
+ false, /* 118 */
+ false, /* 119 */
+ false, /* 120 */
+ false, /* 121 */
+ false, /* 122 */
+ false, /* 123 */
+ false, /* 124 */
+ false, /* 125 */
+ false, /* 126 */
+ false, /* 127 */
+ false, /* 128 */
+ false, /* 129 */
+ false, /* 130 */
+ false, /* 131 */
+ false, /* 132 */
+ false, /* 133 */
+ false, /* 134 */
+ false, /* 135 */
+ false, /* 136 */
+ false, /* 137 */
+ false, /* 138 */
+ false, /* 139 */
+ false, /* 140 */
+ false, /* 141 */
+ false, /* 142 */
+ false, /* 143 */
+ false, /* 144 */
+ false, /* 145 */
+ false, /* 146 */
+ false, /* 147 */
+ false, /* 148 */
+ false, /* 149 */
+ false, /* 150 */
+ false, /* 151 */
+ false, /* 152 */
+ false, /* 153 */
+ false, /* 154 */
+ false, /* 155 */
+ false, /* 156 */
+ false, /* 157 */
+ false, /* 158 */
+ false, /* 159 */
+ false, /* 160 */
+ false, /* 161 */
+ false, /* 162 */
+ false, /* 163 */
+ false, /* 164 */
+ false, /* 165 */
+ false, /* 166 */
+ false, /* 167 */
+ false, /* 168 */
+ false, /* 169 */
+ false, /* 170 */
+ false, /* 171 */
+ false, /* 172 */
+ false, /* 173 */
+ false, /* 174 */
+ false, /* 175 */
+ false, /* 176 */
+ false, /* 177 */
+ false, /* 178 */
+ false, /* 179 */
+ false, /* 180 */
+ false, /* 181 */
+ false, /* 182 */
+ false, /* 183 */
+ false, /* 184 */
+ false, /* 185 */
+ false, /* 186 */
+ false, /* 187 */
+ false, /* 188 */
+ false, /* 189 */
+ false, /* 190 */
+ false, /* 191 */
+ false, /* 192 */
+ false, /* 193 */
+ false, /* 194 */
+ false, /* 195 */
+ false, /* 196 */
+ false, /* 197 */
+ false, /* 198 */
+ false, /* 199 */
+ false, /* 200 */
+ false, /* 201 */
+ false, /* 202 */
+ false, /* 203 */
+ false, /* 204 */
+ false, /* 205 */
+ false, /* 206 */
+ false, /* 207 */
+ false, /* 208 */
+ false, /* 209 */
+ false, /* 210 */
+ false, /* 211 */
+ false, /* 212 */
+ false, /* 213 */
+ false, /* 214 */
+ false, /* 215 */
+ false, /* 216 */
+ false, /* 217 */
+ false, /* 218 */
+ false, /* 219 */
+ false, /* 220 */
+ false, /* 221 */
+ false, /* 222 */
+ false, /* 223 */
+ false, /* 224 */
+ false, /* 225 */
+ false, /* 226 */
+ false, /* 227 */
+ false, /* 228 */
+ false, /* 229 */
+ false, /* 230 */
+ false, /* 231 */
+ false, /* 232 */
+ false, /* 233 */
+ false, /* 234 */
+ false, /* 235 */
+ false, /* 236 */
+ false, /* 237 */
+ false, /* 238 */
+ false, /* 239 */
+ false, /* 240 */
+ false, /* 241 */
+ false, /* 242 */
+ false, /* 243 */
+ false, /* 244 */
+ false, /* 245 */
+ false, /* 246 */
+ false, /* 247 */
+ false, /* 248 */
+ false, /* 249 */
+ false, /* 250 */
+ false, /* 251 */
+ false, /* 252 */
+ false, /* 253 */
+ false, /* 254 */
+ false, /* 255 */
+}
diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/reader_scan_generic.go b/vendor/github.com/pquerna/ffjson/fflib/v1/reader_scan_generic.go
new file mode 100644
index 000000000..47c260770
--- /dev/null
+++ b/vendor/github.com/pquerna/ffjson/fflib/v1/reader_scan_generic.go
@@ -0,0 +1,34 @@
+/**
+ * Copyright 2014 Paul Querna
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package v1
+
+func scanString(s []byte, j int) (int, byte) {
+ for {
+ if j >= len(s) {
+ return j, 0
+ }
+
+ c := s[j]
+ j++
+ if byteLookupTable[c]&sliceStringMask == 0 {
+ continue
+ }
+
+ return j, c
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE
new file mode 100644
index 000000000..dd878a30e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/NOTICE
@@ -0,0 +1,23 @@
+Prometheus instrumentation library for Go applications
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
+
+
+The following components are included in this product:
+
+perks - a fork of https://github.com/bmizerany/perks
+https://github.com/beorn7/perks
+Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
+See https://github.com/beorn7/perks/blob/master/README.md for license details.
+
+Go support for Protocol Buffers - Google's data interchange format
+http://github.com/golang/protobuf/
+Copyright 2010 The Go Authors
+See source code for license details.
+
+Support for streaming Protocol Buffer messages for the Go language (golang).
+https://github.com/matttproud/golang_protobuf_extensions
+Copyright 2013 Matt T. Proud
+Licensed under the Apache License, Version 2.0
diff --git a/vendor/github.com/prometheus/client_golang/README.md b/vendor/github.com/prometheus/client_golang/README.md
new file mode 100644
index 000000000..479290d27
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/README.md
@@ -0,0 +1,47 @@
+# Prometheus Go client library
+
+[![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang)
+[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/client_golang)](https://goreportcard.com/report/github.com/prometheus/client_golang)
+
+This is the [Go](http://golang.org) client library for
+[Prometheus](http://prometheus.io). It has two separate parts, one for
+instrumenting application code, and one for creating clients that talk to the
+Prometheus HTTP API.
+
+## Instrumenting applications
+
+[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus)
+
+The
+[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus)
+contains the instrumentation library. See the
+[best practices section](http://prometheus.io/docs/practices/naming/) of the
+Prometheus documentation to learn more about instrumenting applications.
+
+The
+[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples)
+contains simple examples of instrumented code.
+
+## Client for the Prometheus HTTP API
+
+[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api/prometheus)
+
+The
+[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus)
+contains the client for the
+[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you
+to write Go applications that query time series data from a Prometheus
+server. It is still in alpha stage.
+
+## Where is `model`, `extraction`, and `text`?
+
+The `model` packages has been moved to
+[`prometheus/common/model`](https://github.com/prometheus/common/tree/master/model).
+
+The `extraction` and `text` packages are now contained in
+[`prometheus/common/expfmt`](https://github.com/prometheus/common/tree/master/expfmt).
+
+## Contributing and community
+
+See the [contributing guidelines](CONTRIBUTING.md) and the
+[Community section](http://prometheus.io/community/) of the homepage.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
new file mode 100644
index 000000000..44986bff0
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md
@@ -0,0 +1 @@
+See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
new file mode 100644
index 000000000..623d3d83f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -0,0 +1,75 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Collector is the interface implemented by anything that can be used by
+// Prometheus to collect metrics. A Collector has to be registered for
+// collection. See Registerer.Register.
+//
+// The stock metrics provided by this package (Gauge, Counter, Summary,
+// Histogram, Untyped) are also Collectors (which only ever collect one metric,
+// namely itself). An implementer of Collector may, however, collect multiple
+// metrics in a coordinated fashion and/or create metrics on the fly. Examples
+// for collectors already implemented in this library are the metric vectors
+// (i.e. collection of multiple instances of the same Metric but with different
+// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
+type Collector interface {
+ // Describe sends the super-set of all possible descriptors of metrics
+ // collected by this Collector to the provided channel and returns once
+ // the last descriptor has been sent. The sent descriptors fulfill the
+ // consistency and uniqueness requirements described in the Desc
+ // documentation. (It is valid if one and the same Collector sends
+ // duplicate descriptors. Those duplicates are simply ignored. However,
+ // two different Collectors must not send duplicate descriptors.) This
+ // method idempotently sends the same descriptors throughout the
+ // lifetime of the Collector. If a Collector encounters an error while
+ // executing this method, it must send an invalid descriptor (created
+ // with NewInvalidDesc) to signal the error to the registry.
+ Describe(chan<- *Desc)
+ // Collect is called by the Prometheus registry when collecting
+ // metrics. The implementation sends each collected metric via the
+ // provided channel and returns once the last metric has been sent. The
+ // descriptor of each sent metric is one of those returned by
+ // Describe. Returned metrics that share the same descriptor must differ
+ // in their variable label values. This method may be called
+ // concurrently and must therefore be implemented in a concurrency safe
+ // way. Blocking occurs at the expense of total performance of rendering
+ // all registered metrics. Ideally, Collector implementations support
+ // concurrent readers.
+ Collect(chan<- Metric)
+}
+
+// selfCollector implements Collector for a single Metric so that the Metric
+// collects itself. Add it as an anonymous field to a struct that implements
+// Metric, and call init with the Metric itself as an argument.
+type selfCollector struct {
+ self Metric
+}
+
+// init provides the selfCollector with a reference to the metric it is supposed
+// to collect. It is usually called within the factory function to create a
+// metric. See example.
+func (c *selfCollector) init(self Metric) {
+ c.self = self
+}
+
+// Describe implements Collector.
+func (c *selfCollector) Describe(ch chan<- *Desc) {
+ ch <- c.self.Desc()
+}
+
+// Collect implements Collector.
+func (c *selfCollector) Collect(ch chan<- Metric) {
+ ch <- c.self
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
new file mode 100644
index 000000000..72d5256a5
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -0,0 +1,164 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+)
+
+// Counter is a Metric that represents a single numerical value that only ever
+// goes up. That implies that it cannot be used to count items whose number can
+// also go down, e.g. the number of currently running goroutines. Those
+// "counters" are represented by Gauges.
+//
+// A Counter is typically used to count requests served, tasks completed, errors
+// occurred, etc.
+//
+// To create Counter instances, use NewCounter.
+type Counter interface {
+ Metric
+ Collector
+
+ // Inc increments the counter by 1. Use Add to increment it by arbitrary
+ // non-negative values.
+ Inc()
+ // Add adds the given value to the counter. It panics if the value is <
+ // 0.
+ Add(float64)
+}
+
+// CounterOpts is an alias for Opts. See there for doc comments.
+type CounterOpts Opts
+
+// NewCounter creates a new Counter based on the provided CounterOpts.
+func NewCounter(opts CounterOpts) Counter {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ )
+ result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
+ result.init(result) // Init self-collection.
+ return result
+}
+
+type counter struct {
+ value
+}
+
+func (c *counter) Add(v float64) {
+ if v < 0 {
+ panic(errors.New("counter cannot decrease in value"))
+ }
+ c.value.Add(v)
+}
+
+// CounterVec is a Collector that bundles a set of Counters that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. number of HTTP requests, partitioned by response code and
+// method). Create instances with NewCounterVec.
+//
+// CounterVec embeds MetricVec. See there for a full list of methods with
+// detailed documentation.
+type CounterVec struct {
+ *MetricVec
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &CounterVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ result := &counter{value: value{
+ desc: desc,
+ valType: CounterValue,
+ labelPairs: makeLabelPairs(desc, lvs),
+ }}
+ result.init(result) // Init self-collection.
+ return result
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Counter and not a
+// Metric so that no type conversion is required.
+func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Counter and not a Metric so that no
+// type conversion is required.
+func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
+ return m.MetricVec.WithLabelValues(lvs...).(Counter)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *CounterVec) With(labels Labels) Counter {
+ return m.MetricVec.With(labels).(Counter)
+}
+
+// CounterFunc is a Counter whose value is determined at collect time by calling a
+// provided function.
+//
+// To create CounterFunc instances, use NewCounterFunc.
+type CounterFunc interface {
+ Metric
+ Collector
+}
+
+// NewCounterFunc creates a new CounterFunc based on the provided
+// CounterOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a CounterFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe. The function should also honor
+// the contract for a Counter (values only go up, not down), but compliance will
+// not be checked.
+func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), CounterValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
new file mode 100644
index 000000000..1835b16f6
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -0,0 +1,200 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// Desc is the descriptor used by every Prometheus Metric. It is essentially
+// the immutable meta-data of a Metric. The normal Metric implementations
+// included in this package manage their Desc under the hood. Users only have to
+// deal with Desc if they use advanced features like the ExpvarCollector or
+// custom Collectors and Metrics.
+//
+// Descriptors registered with the same registry have to fulfill certain
+// consistency and uniqueness criteria if they share the same fully-qualified
+// name: They must have the same help string and the same label names (aka label
+// dimensions) in each, constLabels and variableLabels, but they must differ in
+// the values of the constLabels.
+//
+// Descriptors that share the same fully-qualified names and the same label
+// values of their constLabels are considered equal.
+//
+// Use NewDesc to create new Desc instances.
+type Desc struct {
+ // fqName has been built from Namespace, Subsystem, and Name.
+ fqName string
+ // help provides some helpful information about this metric.
+ help string
+ // constLabelPairs contains precalculated DTO label pairs based on
+ // the constant labels.
+ constLabelPairs []*dto.LabelPair
+ // VariableLabels contains names of labels for which the metric
+ // maintains variable values.
+ variableLabels []string
+ // id is a hash of the values of the ConstLabels and fqName. This
+ // must be unique among all registered descriptors and can therefore be
+ // used as an identifier of the descriptor.
+ id uint64
+ // dimHash is a hash of the label names (preset and variable) and the
+ // Help string. Each Desc with the same fqName must have the same
+ // dimHash.
+ dimHash uint64
+ // err is an error that occurred during construction. It is reported on
+ // registration time.
+ err error
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName and help must not be empty.
+//
+// variableLabels only contain the label names. Their label values are variable
+// and therefore not part of the Desc. (They are managed within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Opts documentation for the implications of
+// constant labels.
+func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+ d := &Desc{
+ fqName: fqName,
+ help: help,
+ variableLabels: variableLabels,
+ }
+ if help == "" {
+ d.err = errors.New("empty help string")
+ return d
+ }
+ if !model.IsValidMetricName(model.LabelValue(fqName)) {
+ d.err = fmt.Errorf("%q is not a valid metric name", fqName)
+ return d
+ }
+ // labelValues contains the label values of const labels (in order of
+ // their sorted label names) plus the fqName (at position 0).
+ labelValues := make([]string, 1, len(constLabels)+1)
+ labelValues[0] = fqName
+ labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+ labelNameSet := map[string]struct{}{}
+ // First add only the const label names and sort them...
+ for labelName := range constLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ sort.Strings(labelNames)
+ // ... so that we can now add const label values in the order of their names.
+ for _, labelName := range labelNames {
+ labelValues = append(labelValues, constLabels[labelName])
+ }
+ // Now add the variable label names, but prefix them with something that
+ // cannot be in a regular label name. That prevents matching the label
+ // dimension with a different mix between preset and variable labels.
+ for _, labelName := range variableLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, "$"+labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ if len(labelNames) != len(labelNameSet) {
+ d.err = errors.New("duplicate label names")
+ return d
+ }
+ vh := hashNew()
+ for _, val := range labelValues {
+ vh = hashAdd(vh, val)
+ vh = hashAddByte(vh, separatorByte)
+ }
+ d.id = vh
+ // Sort labelNames so that order doesn't matter for the hash.
+ sort.Strings(labelNames)
+ // Now hash together (in this order) the help string and the sorted
+ // label names.
+ lh := hashNew()
+ lh = hashAdd(lh, help)
+ lh = hashAddByte(lh, separatorByte)
+ for _, labelName := range labelNames {
+ lh = hashAdd(lh, labelName)
+ lh = hashAddByte(lh, separatorByte)
+ }
+ d.dimHash = lh
+
+ d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
+ for n, v := range constLabels {
+ d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(v),
+ })
+ }
+ sort.Sort(LabelPairSorter(d.constLabelPairs))
+ return d
+}
+
+// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
+// provided error set. If a collector returning such a descriptor is registered,
+// registration will fail with the provided error. NewInvalidDesc can be used by
+// a Collector to signal inability to describe itself.
+func NewInvalidDesc(err error) *Desc {
+ return &Desc{
+ err: err,
+ }
+}
+
+func (d *Desc) String() string {
+ lpStrings := make([]string, 0, len(d.constLabelPairs))
+ for _, lp := range d.constLabelPairs {
+ lpStrings = append(
+ lpStrings,
+ fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
+ )
+ }
+ return fmt.Sprintf(
+ "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
+ d.fqName,
+ d.help,
+ strings.Join(lpStrings, ","),
+ d.variableLabels,
+ )
+}
+
+func checkLabelName(l string) bool {
+ return model.LabelName(l).IsValid() &&
+ !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
new file mode 100644
index 000000000..278969dc7
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -0,0 +1,186 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus provides metrics primitives to instrument code for
+// monitoring. It also offers a registry for metrics. Sub-packages allow to
+// expose the registered metrics via HTTP (package promhttp) or push them to a
+// Pushgateway (package push).
+//
+// All exported functions and methods are safe to be used concurrently unless
+// specified otherwise.
+//
+// A Basic Example
+//
+// As a starting point, a very basic usage example:
+//
+// package main
+//
+// import (
+// "log"
+// "net/http"
+//
+// "github.com/prometheus/client_golang/prometheus"
+// "github.com/prometheus/client_golang/prometheus/promhttp"
+// )
+//
+// var (
+// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// })
+// hdFailures = prometheus.NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// },
+// []string{"device"},
+// )
+// )
+//
+// func init() {
+// // Metrics have to be registered to be exposed:
+// prometheus.MustRegister(cpuTemp)
+// prometheus.MustRegister(hdFailures)
+// }
+//
+// func main() {
+// cpuTemp.Set(65.3)
+// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
+//
+// // The Handler function provides a default handler to expose metrics
+// // via an HTTP server. "/metrics" is the usual endpoint for that.
+// http.Handle("/metrics", promhttp.Handler())
+// log.Fatal(http.ListenAndServe(":8080", nil))
+// }
+//
+//
+// This is a complete program that exports two metrics, a Gauge and a Counter,
+// the latter with a label attached to turn it into a (one-dimensional) vector.
+//
+// Metrics
+//
+// The number of exported identifiers in this package might appear a bit
+// overwhelming. However, in addition to the basic plumbing shown in the example
+// above, you only need to understand the different metric types and their
+// vector versions for basic usage.
+//
+// Above, you have already touched the Counter and the Gauge. There are two more
+// advanced metric types: the Summary and Histogram. A more thorough description
+// of those four metric types can be found in the Prometheus docs:
+// https://prometheus.io/docs/concepts/metric_types/
+//
+// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
+// Prometheus server not to assume anything about its type.
+//
+// In addition to the fundamental metric types Gauge, Counter, Summary,
+// Histogram, and Untyped, a very important part of the Prometheus data model is
+// the partitioning of samples along dimensions called labels, which results in
+// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
+// HistogramVec, and UntypedVec.
+//
+// While only the fundamental metric types implement the Metric interface, both
+// the metrics and their vector versions implement the Collector interface. A
+// Collector manages the collection of a number of Metrics, but for convenience,
+// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
+// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
+// SummaryVec, HistogramVec, and UntypedVec are not.
+//
+// To create instances of Metrics and their vector versions, you need a suitable
+// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or
+// UntypedOpts.
+//
+// Custom Collectors and constant Metrics
+//
+// While you could create your own implementations of Metric, most likely you
+// will only ever implement the Collector interface on your own. At a first
+// glance, a custom Collector seems handy to bundle Metrics for common
+// registration (with the prime example of the different metric vectors above,
+// which bundle all the metrics of the same name but with different labels).
+//
+// There is a more involved use case, too: If you already have metrics
+// available, created outside of the Prometheus context, you don't need the
+// interface of the various Metric types. You essentially want to mirror the
+// existing numbers into Prometheus Metrics during collection. An own
+// implementation of the Collector interface is perfect for that. You can create
+// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
+// NewConstSummary (and their respective Must… versions). That will happen in
+// the Collect method. The Describe method has to return separate Desc
+// instances, representative of the “throw-away” metrics to be created later.
+// NewDesc comes in handy to create those Desc instances.
+//
+// The Collector example illustrates the use case. You can also look at the
+// source code of the processCollector (mirroring process metrics), the
+// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
+// metrics) as examples that are used in this package itself.
+//
+// If you just need to call a function to get a single float value to collect as
+// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
+// shortcuts.
+//
+// Advanced Uses of the Registry
+//
+// While MustRegister is the by far most common way of registering a Collector,
+// sometimes you might want to handle the errors the registration might cause.
+// As suggested by the name, MustRegister panics if an error occurs. With the
+// Register function, the error is returned and can be handled.
+//
+// An error is returned if the registered Collector is incompatible or
+// inconsistent with already registered metrics. The registry aims for
+// consistency of the collected metrics according to the Prometheus data model.
+// Inconsistencies are ideally detected at registration time, not at collect
+// time. The former will usually be detected at start-up time of a program,
+// while the latter will only happen at scrape time, possibly not even on the
+// first scrape if the inconsistency only becomes relevant later. That is the
+// main reason why a Collector and a Metric have to describe themselves to the
+// registry.
+//
+// So far, everything we did operated on the so-called default registry, as it
+// can be found in the global DefaultRegistry variable. With NewRegistry, you
+// can create a custom registry, or you can even implement the Registerer or
+// Gatherer interfaces yourself. The methods Register and Unregister work in the
+// same way on a custom registry as the global functions Register and Unregister
+// on the default registry.
+//
+// There are a number of uses for custom registries: You can use registries with
+// special properties, see NewPedanticRegistry. You can avoid global state, as
+// it is imposed by the DefaultRegistry. You can use multiple registries at the
+// same time to expose different metrics in different ways. You can use separate
+// registries for testing purposes.
+//
+// Also note that the DefaultRegistry comes registered with a Collector for Go
+// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
+// NewProcessCollector). With a custom registry, you are in control and decide
+// yourself about the Collectors to register.
+//
+// HTTP Exposition
+//
+// The Registry implements the Gatherer interface. The caller of the Gather
+// method can then expose the gathered metrics in some way. Usually, the metrics
+// are served via HTTP on the /metrics endpoint. That's happening in the example
+// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
+// (The top-level functions in the prometheus package are deprecated.)
+//
+// Pushing to the Pushgateway
+//
+// Function for pushing to the Pushgateway can be found in the push sub-package.
+//
+// Graphite Bridge
+//
+// Functions and examples to push metrics from a Gatherer to Graphite can be
+// found in the graphite sub-package.
+//
+// Other Means of Exposition
+//
+// More ways of exposing metrics can easily be added by following the approaches
+// of the existing implementations.
+package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
new file mode 100644
index 000000000..18a99d5fa
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
@@ -0,0 +1,119 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "encoding/json"
+ "expvar"
+)
+
+type expvarCollector struct {
+ exports map[string]*Desc
+}
+
+// NewExpvarCollector returns a newly allocated expvar Collector that still has
+// to be registered with a Prometheus registry.
+//
+// An expvar Collector collects metrics from the expvar interface. It provides a
+// quick way to expose numeric values that are already exported via expvar as
+// Prometheus metrics. Note that the data models of expvar and Prometheus are
+// fundamentally different, and that the expvar Collector is inherently slower
+// than native Prometheus metrics. Thus, the expvar Collector is probably great
+// for experiments and prototying, but you should seriously consider a more
+// direct implementation of Prometheus metrics for monitoring production
+// systems.
+//
+// The exports map has the following meaning:
+//
+// The keys in the map correspond to expvar keys, i.e. for every expvar key you
+// want to export as Prometheus metric, you need an entry in the exports
+// map. The descriptor mapped to each key describes how to export the expvar
+// value. It defines the name and the help string of the Prometheus metric
+// proxying the expvar value. The type will always be Untyped.
+//
+// For descriptors without variable labels, the expvar value must be a number or
+// a bool. The number is then directly exported as the Prometheus sample
+// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
+// that are not numbers or bools are silently ignored.
+//
+// If the descriptor has one variable label, the expvar value must be an expvar
+// map. The keys in the expvar map become the various values of the one
+// Prometheus label. The values in the expvar map must be numbers or bools again
+// as above.
+//
+// For descriptors with more than one variable label, the expvar must be a
+// nested expvar map, i.e. where the values of the topmost map are maps again
+// etc. until a depth is reached that corresponds to the number of labels. The
+// leaves of that structure must be numbers or bools as above to serve as the
+// sample values.
+//
+// Anything that does not fit into the scheme above is silently ignored.
+func NewExpvarCollector(exports map[string]*Desc) Collector {
+ return &expvarCollector{
+ exports: exports,
+ }
+}
+
+// Describe implements Collector.
+func (e *expvarCollector) Describe(ch chan<- *Desc) {
+ for _, desc := range e.exports {
+ ch <- desc
+ }
+}
+
+// Collect implements Collector.
+func (e *expvarCollector) Collect(ch chan<- Metric) {
+ for name, desc := range e.exports {
+ var m Metric
+ expVar := expvar.Get(name)
+ if expVar == nil {
+ continue
+ }
+ var v interface{}
+ labels := make([]string, len(desc.variableLabels))
+ if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
+ ch <- NewInvalidMetric(desc, err)
+ continue
+ }
+ var processValue func(v interface{}, i int)
+ processValue = func(v interface{}, i int) {
+ if i >= len(labels) {
+ copiedLabels := append(make([]string, 0, len(labels)), labels...)
+ switch v := v.(type) {
+ case float64:
+ m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
+ case bool:
+ if v {
+ m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
+ } else {
+ m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
+ }
+ default:
+ return
+ }
+ ch <- m
+ return
+ }
+ vm, ok := v.(map[string]interface{})
+ if !ok {
+ return
+ }
+ for lv, val := range vm {
+ labels[i] = lv
+ processValue(val, i+1)
+ }
+ }
+ processValue(v, 0)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
new file mode 100644
index 000000000..e3b67df8a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
@@ -0,0 +1,29 @@
+package prometheus
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
new file mode 100644
index 000000000..9ab5a3d62
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -0,0 +1,145 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Gauge is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// A Gauge is typically used for measured values like temperatures or current
+// memory usage, but also "counts" that can go up and down, like the number of
+// running goroutines.
+//
+// To create Gauge instances, use NewGauge.
+type Gauge interface {
+ Metric
+ Collector
+
+ // Set sets the Gauge to an arbitrary value.
+ Set(float64)
+ // Inc increments the Gauge by 1. Use Add to increment it by arbitrary
+ // values.
+ Inc()
+ // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
+ // values.
+ Dec()
+ // Add adds the given value to the Gauge. (The value can be negative,
+ // resulting in a decrease of the Gauge.)
+ Add(float64)
+ // Sub subtracts the given value from the Gauge. (The value can be
+ // negative, resulting in an increase of the Gauge.)
+ Sub(float64)
+
+ // SetToCurrentTime sets the Gauge to the current Unix time in seconds.
+ SetToCurrentTime()
+}
+
+// GaugeOpts is an alias for Opts. See there for doc comments.
+type GaugeOpts Opts
+
+// NewGauge creates a new Gauge based on the provided GaugeOpts.
+func NewGauge(opts GaugeOpts) Gauge {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, 0)
+}
+
+// GaugeVec is a Collector that bundles a set of Gauges that all share the same
+// Desc, but have different values for their variable labels. This is used if
+// you want to count the same thing partitioned by various dimensions
+// (e.g. number of operations queued, partitioned by user and operation
+// type). Create instances with NewGaugeVec.
+type GaugeVec struct {
+ *MetricVec
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &GaugeVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newValue(desc, GaugeValue, 0, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Gauge and not a
+// Metric so that no type conversion is required.
+func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Gauge and not a Metric so that no
+// type conversion is required.
+func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+ return m.MetricVec.WithLabelValues(lvs...).(Gauge)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *GaugeVec) With(labels Labels) Gauge {
+ return m.MetricVec.With(labels).(Gauge)
+}
+
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
+// provided function.
+//
+// To create GaugeFunc instances, use NewGaugeFunc.
+type GaugeFunc interface {
+ Metric
+ Collector
+}
+
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
+// value reported is determined by calling the given function from within the
+// Write method. Take into account that metric collection may happen
+// concurrently. If that results in concurrent calls to Write, like in the case
+// where a GaugeFunc is directly registered with Prometheus, the provided
+// function must be concurrency-safe.
+func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
new file mode 100644
index 000000000..f96764559
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -0,0 +1,276 @@
+package prometheus
+
+import (
+ "fmt"
+ "runtime"
+ "runtime/debug"
+ "time"
+)
+
+type goCollector struct {
+ goroutinesDesc *Desc
+ threadsDesc *Desc
+ gcDesc *Desc
+
+ // metrics to describe and collect
+ metrics memStatsMetrics
+}
+
+// NewGoCollector returns a collector which exports metrics about the current
+// go process.
+func NewGoCollector() Collector {
+ return &goCollector{
+ goroutinesDesc: NewDesc(
+ "go_goroutines",
+ "Number of goroutines that currently exist.",
+ nil, nil),
+ threadsDesc: NewDesc(
+ "go_threads",
+ "Number of OS threads created",
+ nil, nil),
+ gcDesc: NewDesc(
+ "go_gc_duration_seconds",
+ "A summary of the GC invocation durations.",
+ nil, nil),
+ metrics: memStatsMetrics{
+ {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes"),
+ "Number of bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes_total"),
+ "Total number of bytes allocated, even if freed.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("sys_bytes"),
+ "Number of bytes obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("lookups_total"),
+ "Total number of pointer lookups.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mallocs_total"),
+ "Total number of mallocs.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("frees_total"),
+ "Total number of frees.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_alloc_bytes"),
+ "Number of heap bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_sys_bytes"),
+ "Number of heap bytes obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_idle_bytes"),
+ "Number of heap bytes waiting to be used.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_inuse_bytes"),
+ "Number of heap bytes that are in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_released_bytes"),
+ "Number of heap bytes released to OS.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_objects"),
+ "Number of allocated objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_inuse_bytes"),
+ "Number of bytes in use by the stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_sys_bytes"),
+ "Number of bytes obtained from system for stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_inuse_bytes"),
+ "Number of bytes in use by mspan structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_sys_bytes"),
+ "Number of bytes used for mspan structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_inuse_bytes"),
+ "Number of bytes in use by mcache structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_sys_bytes"),
+ "Number of bytes used for mcache structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("buck_hash_sys_bytes"),
+ "Number of bytes used by the profiling bucket hash table.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_sys_bytes"),
+ "Number of bytes used for garbage collection system metadata.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("other_sys_bytes"),
+ "Number of bytes used for other system allocations.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("next_gc_bytes"),
+ "Number of heap bytes when next garbage collection will take place.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("last_gc_time_seconds"),
+ "Number of seconds since 1970 of last garbage collection.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_cpu_fraction"),
+ "The fraction of this program's available CPU time used by the GC since the program started.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
+ valType: GaugeValue,
+ },
+ },
+ }
+}
+
+func memstatNamespace(s string) string {
+ return fmt.Sprintf("go_memstats_%s", s)
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ ch <- c.goroutinesDesc
+ ch <- c.threadsDesc
+ ch <- c.gcDesc
+ for _, i := range c.metrics {
+ ch <- i.desc
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
+ n, _ := runtime.ThreadCreateProfile(nil)
+ ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
+
+ var stats debug.GCStats
+ stats.PauseQuantiles = make([]time.Duration, 5)
+ debug.ReadGCStats(&stats)
+
+ quantiles := make(map[float64]float64)
+ for idx, pq := range stats.PauseQuantiles[1:] {
+ quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
+ }
+ quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
+ ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
+
+ ms := &runtime.MemStats{}
+ runtime.ReadMemStats(ms)
+ for _, i := range c.metrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
+ }
+}
+
+// memStatsMetrics provide description, value, and value type for memstat metrics.
+type memStatsMetrics []struct {
+ desc *Desc
+ eval func(*runtime.MemStats) float64
+ valType ValueType
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
new file mode 100644
index 000000000..f46eff6ac
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -0,0 +1,444 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "sync/atomic"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// A Histogram counts individual observations from an event or sample stream in
+// configurable buckets. Similar to a summary, it also provides a sum of
+// observations and an observation count.
+//
+// On the Prometheus server, quantiles can be calculated from a Histogram using
+// the histogram_quantile function in the query language.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated with the
+// Prometheus query language (see the documentation for detailed
+// procedures). However, Histograms require the user to pre-define suitable
+// buckets, and they are in general less accurate. The Observe method of a
+// Histogram has a very low performance overhead in comparison with the Observe
+// method of a Summary.
+//
+// To create Histogram instances, use NewHistogram.
+type Histogram interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the histogram.
+ Observe(float64)
+}
+
+// bucketLabel is used for the label that defines the upper bound of a
+// bucket of a histogram ("le" -> "less or equal").
+const bucketLabel = "le"
+
+// DefBuckets are the default Histogram buckets. The default buckets are
+// tailored to broadly measure the response time (in seconds) of a network
+// service. Most likely, however, you will be required to define buckets
+// customized to your use case.
+var (
+ DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+
+ errBucketLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in histograms", bucketLabel,
+ )
+)
+
+// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
+// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is zero or negative.
+func LinearBuckets(start, width float64, count int) []float64 {
+ if count < 1 {
+ panic("LinearBuckets needs a positive count")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start += width
+ }
+ return buckets
+}
+
+// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
+// upper bound of 'start' and each following bucket's upper bound is 'factor'
+// times the previous bucket's upper bound. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
+// or if 'factor' is less than or equal 1.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBuckets needs a positive count")
+ }
+ if start <= 0 {
+ panic("ExponentialBuckets needs a positive start value")
+ }
+ if factor <= 1 {
+ panic("ExponentialBuckets needs a factor greater than 1")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start *= factor
+ }
+ return buckets
+}
+
+// HistogramOpts bundles the options for creating a Histogram metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type HistogramOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Histogram (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Histogram must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Histogram. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Histogram. Histograms with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // HistogramVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Histograms with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Buckets defines the buckets into which observations are counted. Each
+ // element in the slice is the upper inclusive bound of a bucket. The
+ // values must be sorted in strictly increasing order. There is no need
+ // to add a highest bucket with +Inf bound, it will be added
+ // implicitly. The default value is DefBuckets.
+ Buckets []float64
+}
+
+// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
+// panics if the buckets in HistogramOpts are not in strictly increasing order.
+func NewHistogram(opts HistogramOpts) Histogram {
+ return newHistogram(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Buckets) == 0 {
+ opts.Buckets = DefBuckets
+ }
+
+ h := &histogram{
+ desc: desc,
+ upperBounds: opts.Buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ for i, upperBound := range h.upperBounds {
+ if i < len(h.upperBounds)-1 {
+ if upperBound >= h.upperBounds[i+1] {
+ panic(fmt.Errorf(
+ "histogram buckets must be in increasing order: %f >= %f",
+ upperBound, h.upperBounds[i+1],
+ ))
+ }
+ } else {
+ if math.IsInf(upperBound, +1) {
+ // The +Inf bucket is implicit. Remove it here.
+ h.upperBounds = h.upperBounds[:i]
+ }
+ }
+ }
+ // Finally we know the final length of h.upperBounds and can make counts.
+ h.counts = make([]uint64, len(h.upperBounds))
+
+ h.init(h) // Init self-collection.
+ return h
+}
+
+type histogram struct {
+ // sumBits contains the bits of the float64 representing the sum of all
+ // observations. sumBits and count have to go first in the struct to
+ // guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ sumBits uint64
+ count uint64
+
+ selfCollector
+ // Note that there is no mutex required.
+
+ desc *Desc
+
+ upperBounds []float64
+ counts []uint64
+
+ labelPairs []*dto.LabelPair
+}
+
+func (h *histogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *histogram) Observe(v float64) {
+ // TODO(beorn7): For small numbers of buckets (<30), a linear search is
+ // slightly faster than the binary search. If we really care, we could
+ // switch from one search strategy to the other depending on the number
+ // of buckets.
+ //
+ // Microbenchmarks (BenchmarkHistogramNoLabels):
+ // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
+ // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
+ // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
+ i := sort.SearchFloat64s(h.upperBounds, v)
+ if i < len(h.counts) {
+ atomic.AddUint64(&h.counts[i], 1)
+ }
+ atomic.AddUint64(&h.count, 1)
+ for {
+ oldBits := atomic.LoadUint64(&h.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
+ break
+ }
+ }
+}
+
+func (h *histogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, len(h.upperBounds))
+
+ his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
+ his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
+ var count uint64
+ for i, upperBound := range h.upperBounds {
+ count += atomic.LoadUint64(&h.counts[i])
+ buckets[i] = &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ }
+ }
+ his.Bucket = buckets
+ out.Histogram = his
+ out.Label = h.labelPairs
+ return nil
+}
+
+// HistogramVec is a Collector that bundles a set of Histograms that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewHistogramVec.
+type HistogramVec struct {
+ *MetricVec
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &HistogramVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newHistogram(desc, opts, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns an Observer and not a
+// Metric so that no type conversion to an Observer is required.
+func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Observer), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns an Observer and not a Metric so that no
+// type conversion to an Observer is required.
+func (m *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Observer), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *HistogramVec) WithLabelValues(lvs ...string) Observer {
+ return m.MetricVec.WithLabelValues(lvs...).(Observer)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *HistogramVec) With(labels Labels) Observer {
+ return m.MetricVec.With(labels).(Observer)
+}
+
+type constHistogram struct {
+ desc *Desc
+ count uint64
+ sum float64
+ buckets map[float64]uint64
+ labelPairs []*dto.LabelPair
+}
+
+func (h *constHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *constHistogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, 0, len(h.buckets))
+
+ his.SampleCount = proto.Uint64(h.count)
+ his.SampleSum = proto.Float64(h.sum)
+
+ for upperBound, count := range h.buckets {
+ buckets = append(buckets, &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ })
+ }
+
+ if len(buckets) > 0 {
+ sort.Sort(buckSort(buckets))
+ }
+ his.Bucket = buckets
+
+ out.Histogram = his
+ out.Label = h.labelPairs
+
+ return nil
+}
+
+// NewConstHistogram returns a metric representing a Prometheus histogram with
+// fixed values for the count, sum, and bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
+// bucket.
+//
+// NewConstHistogram returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstHistogram is a version of NewConstHistogram that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type buckSort []*dto.Bucket
+
+func (s buckSort) Len() int {
+ return len(s)
+}
+
+func (s buckSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s buckSort) Less(i, j int) bool {
+ return s[i].GetUpperBound() < s[j].GetUpperBound()
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go
new file mode 100644
index 000000000..d485ce0b8
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go
@@ -0,0 +1,524 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/prometheus/common/expfmt"
+)
+
+// TODO(beorn7): Remove this whole file. It is a partial mirror of
+// promhttp/http.go (to avoid circular import chains) where everything HTTP
+// related should live. The functions here are just for avoiding
+// breakage. Everything is deprecated.
+
+const (
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+)
+
+var bufPool sync.Pool
+
+func getBuf() *bytes.Buffer {
+ buf := bufPool.Get()
+ if buf == nil {
+ return &bytes.Buffer{}
+ }
+ return buf.(*bytes.Buffer)
+}
+
+func giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ bufPool.Put(buf)
+}
+
+// Handler returns an HTTP handler for the DefaultGatherer. It is
+// already instrumented with InstrumentHandler (using "prometheus" as handler
+// name).
+//
+// Deprecated: Please note the issues described in the doc comment of
+// InstrumentHandler. You might want to consider using promhttp.Handler instead
+// (which is not instrumented, but can be instrumented with the tooling provided
+// in package promhttp).
+func Handler() http.Handler {
+ return InstrumentHandler("prometheus", UninstrumentedHandler())
+}
+
+// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
+//
+// Deprecated: Use promhttp.Handler instead. See there for further documentation.
+func UninstrumentedHandler() http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ mfs, err := DefaultGatherer.Gather()
+ if err != nil {
+ http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ contentType := expfmt.Negotiate(req.Header)
+ buf := getBuf()
+ defer giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf)
+ enc := expfmt.NewEncoder(writer, contentType)
+ var lastErr error
+ for _, mf := range mfs {
+ if err := enc.Encode(mf); err != nil {
+ lastErr = err
+ http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ if lastErr != nil && buf.Len() == 0 {
+ http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, string(contentType))
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ w.Write(buf.Bytes())
+ })
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part := strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
+
+var instLabels = []string{"method", "code"}
+
+type nower interface {
+ Now() time.Time
+}
+
+type nowFunc func() time.Time
+
+func (n nowFunc) Now() time.Time {
+ return n()
+}
+
+var now nower = nowFunc(func() time.Time {
+ return time.Now()
+})
+
+func nowSeries(t ...time.Time) nower {
+ return nowFunc(func() time.Time {
+ defer func() {
+ t = t[1:]
+ }()
+
+ return t[0]
+ })
+}
+
+// InstrumentHandler wraps the given HTTP handler for instrumentation. It
+// registers four metric collectors (if not already done) and reports HTTP
+// metrics to the (newly or already) registered collectors: http_requests_total
+// (CounterVec), http_request_duration_microseconds (Summary),
+// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
+// has a constant label named "handler" with the provided handlerName as
+// value. http_requests_total is a metric vector partitioned by HTTP method
+// (label name "method") and HTTP status code (label name "code").
+//
+// Deprecated: InstrumentHandler has several issues. Use the tooling provided in
+// package promhttp instead. The issues are the following:
+//
+// - It uses Summaries rather than Histograms. Summaries are not useful if
+// aggregation across multiple instances is required.
+//
+// - It uses microseconds as unit, which is deprecated and should be replaced by
+// seconds.
+//
+// - The size of the request is calculated in a separate goroutine. Since this
+// calculator requires access to the request header, it creates a race with
+// any writes to the header performed during request handling.
+// httputil.ReverseProxy is a prominent example for a handler
+// performing such writes.
+//
+// - It has additional issues with HTTP/2, cf.
+// https://github.com/prometheus/client_golang/issues/272.
+func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFunc wraps the given function for instrumentation. It
+// otherwise works in the same way as InstrumentHandler (and shares the same
+// issues).
+//
+// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
+// InstrumentHandler is. Use the tooling provided in package promhttp instead.
+func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(
+ SummaryOpts{
+ Subsystem: "http",
+ ConstLabels: Labels{"handler": handlerName},
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ },
+ handlerFunc,
+ )
+}
+
+// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
+// issues) but provides more flexibility (at the cost of a more complex call
+// syntax). As InstrumentHandler, this function registers four metric
+// collectors, but it uses the provided SummaryOpts to create them. However, the
+// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
+// by "requests_total", "request_duration_microseconds", "request_size_bytes",
+// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
+// help string. The names of the variable labels of the http_requests_total
+// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
+//
+// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
+// behavior of InstrumentHandler:
+//
+// prometheus.InstrumentHandlerWithOpts(
+// prometheus.SummaryOpts{
+// Subsystem: "http",
+// ConstLabels: prometheus.Labels{"handler": handlerName},
+// },
+// handler,
+// )
+//
+// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
+// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
+// and all its fields are set to the equally named fields in the provided
+// SummaryOpts.
+//
+// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
+// InstrumentHandler is. Use the tooling provided in package promhttp instead.
+func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
+// the same issues) but provides more flexibility (at the cost of a more complex
+// call syntax). See InstrumentHandlerWithOpts for details how the provided
+// SummaryOpts are used.
+//
+// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
+// as InstrumentHandler is. Use the tooling provided in package promhttp instead.
+func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ reqCnt := NewCounterVec(
+ CounterOpts{
+ Namespace: opts.Namespace,
+ Subsystem: opts.Subsystem,
+ Name: "requests_total",
+ Help: "Total number of HTTP requests made.",
+ ConstLabels: opts.ConstLabels,
+ },
+ instLabels,
+ )
+ if err := Register(reqCnt); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqCnt = are.ExistingCollector.(*CounterVec)
+ } else {
+ panic(err)
+ }
+ }
+
+ opts.Name = "request_duration_microseconds"
+ opts.Help = "The HTTP request latencies in microseconds."
+ reqDur := NewSummary(opts)
+ if err := Register(reqDur); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqDur = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
+
+ opts.Name = "request_size_bytes"
+ opts.Help = "The HTTP request sizes in bytes."
+ reqSz := NewSummary(opts)
+ if err := Register(reqSz); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqSz = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
+
+ opts.Name = "response_size_bytes"
+ opts.Help = "The HTTP response sizes in bytes."
+ resSz := NewSummary(opts)
+ if err := Register(resSz); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ resSz = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+
+ delegate := &responseWriterDelegator{ResponseWriter: w}
+ out := computeApproximateRequestSize(r)
+
+ _, cn := w.(http.CloseNotifier)
+ _, fl := w.(http.Flusher)
+ _, hj := w.(http.Hijacker)
+ _, rf := w.(io.ReaderFrom)
+ var rw http.ResponseWriter
+ if cn && fl && hj && rf {
+ rw = &fancyResponseWriterDelegator{delegate}
+ } else {
+ rw = delegate
+ }
+ handlerFunc(rw, r)
+
+ elapsed := float64(time.Since(now)) / float64(time.Microsecond)
+
+ method := sanitizeMethod(r.Method)
+ code := sanitizeCode(delegate.status)
+ reqCnt.WithLabelValues(method, code).Inc()
+ reqDur.Observe(elapsed)
+ resSz.Observe(float64(delegate.written))
+ reqSz.Observe(float64(<-out))
+ })
+}
+
+func computeApproximateRequestSize(r *http.Request) <-chan int {
+ // Get URL length in current go routine for avoiding a race condition.
+ // HandlerFunc that runs in parallel may modify the URL.
+ s := 0
+ if r.URL != nil {
+ s += len(r.URL.String())
+ }
+
+ out := make(chan int, 1)
+
+ go func() {
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ out <- s
+ close(out)
+ }()
+
+ return out
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ handler, method string
+ status int
+ written int64
+ wroteHeader bool
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type fancyResponseWriterDelegator struct {
+ *responseWriterDelegator
+}
+
+func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
+ return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+func (f *fancyResponseWriterDelegator) Flush() {
+ f.ResponseWriter.(http.Flusher).Flush()
+}
+
+func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return f.ResponseWriter.(http.Hijacker).Hijack()
+}
+
+func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
+ if !f.wroteHeader {
+ f.WriteHeader(http.StatusOK)
+ }
+ n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
+ f.written += n
+ return n, err
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
new file mode 100644
index 000000000..d4063d98f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -0,0 +1,166 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+const separatorByte byte = 255
+
+// A Metric models a single sample value with its meta data being exported to
+// Prometheus. Implementations of Metric in this package are Gauge, Counter,
+// Histogram, Summary, and Untyped.
+type Metric interface {
+ // Desc returns the descriptor for the Metric. This method idempotently
+ // returns the same descriptor throughout the lifetime of the
+ // Metric. The returned descriptor is immutable by contract. A Metric
+ // unable to describe itself must return an invalid descriptor (created
+ // with NewInvalidDesc).
+ Desc() *Desc
+ // Write encodes the Metric into a "Metric" Protocol Buffer data
+ // transmission object.
+ //
+ // Metric implementations must observe concurrency safety as reads of
+ // this metric may occur at any time, and any blocking occurs at the
+ // expense of total performance of rendering all registered
+ // metrics. Ideally, Metric implementations should support concurrent
+ // readers.
+ //
+ // While populating dto.Metric, it is the responsibility of the
+ // implementation to ensure validity of the Metric protobuf (like valid
+ // UTF-8 strings or syntactically valid metric and label names). It is
+ // recommended to sort labels lexicographically. (Implementers may find
+ // LabelPairSorter useful for that.) Callers of Write should still make
+ // sure of sorting if they depend on it.
+ Write(*dto.Metric) error
+ // TODO(beorn7): The original rationale of passing in a pre-allocated
+ // dto.Metric protobuf to save allocations has disappeared. The
+ // signature of this method should be changed to "Write() (*dto.Metric,
+ // error)".
+}
+
+// Opts bundles the options for creating most Metric types. Each metric
+// implementation XXX has its own XXXOpts type, but in most cases, it is just be
+// an alias of this type (which might change when the requirement arises.)
+//
+// It is mandatory to set Name and Help to a non-empty string. All other fields
+// are optional and can safely be left at their zero value.
+type Opts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Metric (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the metric must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this metric. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a metric
+ // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
+ // serve only special purposes. One is for the special case where the
+ // value of a label does not change during the lifetime of a process,
+ // e.g. if the revision of the running binary is put into a
+ // label. Another, more advanced purpose is if more than one Collector
+ // needs to collect Metrics with the same fully-qualified name. In that
+ // case, those Metrics must differ in the values of their
+ // ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+}
+
+// BuildFQName joins the given three name components by "_". Empty name
+// components are ignored. If the name parameter itself is empty, an empty
+// string is returned, no matter what. Metric implementations included in this
+// library use this function internally to generate the fully-qualified metric
+// name from the name component in their Opts. Users of the library will only
+// need this function if they implement their own Metric or instantiate a Desc
+// (with NewDesc) directly.
+func BuildFQName(namespace, subsystem, name string) string {
+ if name == "" {
+ return ""
+ }
+ switch {
+ case namespace != "" && subsystem != "":
+ return strings.Join([]string{namespace, subsystem, name}, "_")
+ case namespace != "":
+ return strings.Join([]string{namespace, name}, "_")
+ case subsystem != "":
+ return strings.Join([]string{subsystem, name}, "_")
+ }
+ return name
+}
+
+// LabelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers. This is useful for implementing the Write method of
+// custom metrics.
+type LabelPairSorter []*dto.LabelPair
+
+func (s LabelPairSorter) Len() int {
+ return len(s)
+}
+
+func (s LabelPairSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s LabelPairSorter) Less(i, j int) bool {
+ return s[i].GetName() < s[j].GetName()
+}
+
+type hashSorter []uint64
+
+func (s hashSorter) Len() int {
+ return len(s)
+}
+
+func (s hashSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s hashSorter) Less(i, j int) bool {
+ return s[i] < s[j]
+}
+
+type invalidMetric struct {
+ desc *Desc
+ err error
+}
+
+// NewInvalidMetric returns a metric whose Write method always returns the
+// provided error. It is useful if a Collector finds itself unable to collect
+// a metric and wishes to report an error to the registry.
+func NewInvalidMetric(desc *Desc, err error) Metric {
+ return &invalidMetric{desc, err}
+}
+
+func (m *invalidMetric) Desc() *Desc { return m.desc }
+
+func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
new file mode 100644
index 000000000..b0520e85e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
@@ -0,0 +1,50 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Observer is the interface that wraps the Observe method, which is used by
+// Histogram and Summary to add observations.
+type Observer interface {
+ Observe(float64)
+}
+
+// The ObserverFunc type is an adapter to allow the use of ordinary
+// functions as Observers. If f is a function with the appropriate
+// signature, ObserverFunc(f) is an Observer that calls f.
+//
+// This adapter is usually used in connection with the Timer type, and there are
+// two general use cases:
+//
+// The most common one is to use a Gauge as the Observer for a Timer.
+// See the "Gauge" Timer example.
+//
+// The more advanced use case is to create a function that dynamically decides
+// which Observer to use for observing the duration. See the "Complex" Timer
+// example.
+type ObserverFunc func(float64)
+
+// Observe calls f(value). It implements Observer.
+func (f ObserverFunc) Observe(value float64) {
+ f(value)
+}
+
+// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
+type ObserverVec interface {
+ GetMetricWith(Labels) (Observer, error)
+ GetMetricWithLabelValues(lvs ...string) (Observer, error)
+ With(Labels) Observer
+ WithLabelValues(...string) Observer
+
+ Collector
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
new file mode 100644
index 000000000..94b2553e1
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -0,0 +1,140 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "github.com/prometheus/procfs"
+
+type processCollector struct {
+ pid int
+ collectFn func(chan<- Metric)
+ pidFn func() (int, error)
+ cpuTotal *Desc
+ openFDs, maxFDs *Desc
+ vsize, rss *Desc
+ startTime *Desc
+}
+
+// NewProcessCollector returns a collector which exports the current state of
+// process metrics including cpu, memory and file descriptor usage as well as
+// the process start time for the given process id under the given namespace.
+func NewProcessCollector(pid int, namespace string) Collector {
+ return NewProcessCollectorPIDFn(
+ func() (int, error) { return pid, nil },
+ namespace,
+ )
+}
+
+// NewProcessCollectorPIDFn returns a collector which exports the current state
+// of process metrics including cpu, memory and file descriptor usage as well
+// as the process start time under the given namespace. The given pidFn is
+// called on each collect and is used to determine the process to export
+// metrics for.
+func NewProcessCollectorPIDFn(
+ pidFn func() (int, error),
+ namespace string,
+) Collector {
+ ns := ""
+ if len(namespace) > 0 {
+ ns = namespace + "_"
+ }
+
+ c := processCollector{
+ pidFn: pidFn,
+ collectFn: func(chan<- Metric) {},
+
+ cpuTotal: NewDesc(
+ ns+"process_cpu_seconds_total",
+ "Total user and system CPU time spent in seconds.",
+ nil, nil,
+ ),
+ openFDs: NewDesc(
+ ns+"process_open_fds",
+ "Number of open file descriptors.",
+ nil, nil,
+ ),
+ maxFDs: NewDesc(
+ ns+"process_max_fds",
+ "Maximum number of open file descriptors.",
+ nil, nil,
+ ),
+ vsize: NewDesc(
+ ns+"process_virtual_memory_bytes",
+ "Virtual memory size in bytes.",
+ nil, nil,
+ ),
+ rss: NewDesc(
+ ns+"process_resident_memory_bytes",
+ "Resident memory size in bytes.",
+ nil, nil,
+ ),
+ startTime: NewDesc(
+ ns+"process_start_time_seconds",
+ "Start time of the process since unix epoch in seconds.",
+ nil, nil,
+ ),
+ }
+
+ // Set up process metric collection if supported by the runtime.
+ if _, err := procfs.NewStat(); err == nil {
+ c.collectFn = c.processCollect
+ }
+
+ return &c
+}
+
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal
+ ch <- c.openFDs
+ ch <- c.maxFDs
+ ch <- c.vsize
+ ch <- c.rss
+ ch <- c.startTime
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *processCollector) Collect(ch chan<- Metric) {
+ c.collectFn(ch)
+}
+
+// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
+// client allows users to configure the error behavior.
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ pid, err := c.pidFn()
+ if err != nil {
+ return
+ }
+
+ p, err := procfs.NewProc(pid)
+ if err != nil {
+ return
+ }
+
+ if stat, err := p.NewStat(); err == nil {
+ ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
+ ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
+ ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
+ if startTime, err := stat.StartTime(); err == nil {
+ ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
+ }
+ }
+
+ if fds, err := p.FileDescriptorsLen(); err == nil {
+ ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
+ }
+
+ if limits, err := p.NewLimits(); err == nil {
+ ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
new file mode 100644
index 000000000..8c6b5bd8e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -0,0 +1,755 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "sort"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+const (
+ // Capacity for the channel to collect metrics and descriptors.
+ capMetricChan = 1000
+ capDescChan = 10
+)
+
+// DefaultRegisterer and DefaultGatherer are the implementations of the
+// Registerer and Gatherer interface a number of convenience functions in this
+// package act on. Initially, both variables point to the same Registry, which
+// has a process collector (see NewProcessCollector) and a Go collector (see
+// NewGoCollector) already registered. This approach to keep default instances
+// as global state mirrors the approach of other packages in the Go standard
+// library. Note that there are caveats. Change the variables with caution and
+// only if you understand the consequences. Users who want to avoid global state
+// altogether should not use the convenience function and act on custom
+// instances instead.
+var (
+ defaultRegistry = NewRegistry()
+ DefaultRegisterer Registerer = defaultRegistry
+ DefaultGatherer Gatherer = defaultRegistry
+)
+
+func init() {
+ MustRegister(NewProcessCollector(os.Getpid(), ""))
+ MustRegister(NewGoCollector())
+}
+
+// NewRegistry creates a new vanilla Registry without any Collectors
+// pre-registered.
+func NewRegistry() *Registry {
+ return &Registry{
+ collectorsByID: map[uint64]Collector{},
+ descIDs: map[uint64]struct{}{},
+ dimHashesByName: map[string]uint64{},
+ }
+}
+
+// NewPedanticRegistry returns a registry that checks during collection if each
+// collected Metric is consistent with its reported Desc, and if the Desc has
+// actually been registered with the registry.
+//
+// Usually, a Registry will be happy as long as the union of all collected
+// Metrics is consistent and valid even if some metrics are not consistent with
+// their own Desc or a Desc provided by their registered Collector. Well-behaved
+// Collectors and Metrics will only provide consistent Descs. This Registry is
+// useful to test the implementation of Collectors and Metrics.
+func NewPedanticRegistry() *Registry {
+ r := NewRegistry()
+ r.pedanticChecksEnabled = true
+ return r
+}
+
+// Registerer is the interface for the part of a registry in charge of
+// registering and unregistering. Users of custom registries should use
+// Registerer as type for registration purposes (rather then the Registry type
+// directly). In that way, they are free to use custom Registerer implementation
+// (e.g. for testing purposes).
+type Registerer interface {
+ // Register registers a new Collector to be included in metrics
+ // collection. It returns an error if the descriptors provided by the
+ // Collector are invalid or if they — in combination with descriptors of
+ // already registered Collectors — do not fulfill the consistency and
+ // uniqueness criteria described in the documentation of metric.Desc.
+ //
+ // If the provided Collector is equal to a Collector already registered
+ // (which includes the case of re-registering the same Collector), the
+ // returned error is an instance of AlreadyRegisteredError, which
+ // contains the previously registered Collector.
+ //
+ // It is in general not safe to register the same Collector multiple
+ // times concurrently.
+ Register(Collector) error
+ // MustRegister works like Register but registers any number of
+ // Collectors and panics upon the first registration that causes an
+ // error.
+ MustRegister(...Collector)
+ // Unregister unregisters the Collector that equals the Collector passed
+ // in as an argument. (Two Collectors are considered equal if their
+ // Describe method yields the same set of descriptors.) The function
+ // returns whether a Collector was unregistered.
+ //
+ // Note that even after unregistering, it will not be possible to
+ // register a new Collector that is inconsistent with the unregistered
+ // Collector, e.g. a Collector collecting metrics with the same name but
+ // a different help string. The rationale here is that the same registry
+ // instance must only collect consistent metrics throughout its
+ // lifetime.
+ Unregister(Collector) bool
+}
+
+// Gatherer is the interface for the part of a registry in charge of gathering
+// the collected metrics into a number of MetricFamilies. The Gatherer interface
+// comes with the same general implication as described for the Registerer
+// interface.
+type Gatherer interface {
+ // Gather calls the Collect method of the registered Collectors and then
+ // gathers the collected metrics into a lexicographically sorted slice
+ // of MetricFamily protobufs. Even if an error occurs, Gather attempts
+ // to gather as many metrics as possible. Hence, if a non-nil error is
+ // returned, the returned MetricFamily slice could be nil (in case of a
+ // fatal error that prevented any meaningful metric collection) or
+ // contain a number of MetricFamily protobufs, some of which might be
+ // incomplete, and some might be missing altogether. The returned error
+ // (which might be a MultiError) explains the details. In scenarios
+ // where complete collection is critical, the returned MetricFamily
+ // protobufs should be disregarded if the returned error is non-nil.
+ Gather() ([]*dto.MetricFamily, error)
+}
+
+// Register registers the provided Collector with the DefaultRegisterer.
+//
+// Register is a shortcut for DefaultRegisterer.Register(c). See there for more
+// details.
+func Register(c Collector) error {
+ return DefaultRegisterer.Register(c)
+}
+
+// MustRegister registers the provided Collectors with the DefaultRegisterer and
+// panics if any error occurs.
+//
+// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See
+// there for more details.
+func MustRegister(cs ...Collector) {
+ DefaultRegisterer.MustRegister(cs...)
+}
+
+// Unregister removes the registration of the provided Collector from the
+// DefaultRegisterer.
+//
+// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for
+// more details.
+func Unregister(c Collector) bool {
+ return DefaultRegisterer.Unregister(c)
+}
+
+// GathererFunc turns a function into a Gatherer.
+type GathererFunc func() ([]*dto.MetricFamily, error)
+
+// Gather implements Gatherer.
+func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
+ return gf()
+}
+
+// AlreadyRegisteredError is returned by the Register method if the Collector to
+// be registered has already been registered before, or a different Collector
+// that collects the same metrics has been registered before. Registration fails
+// in that case, but you can detect from the kind of error what has
+// happened. The error contains fields for the existing Collector and the
+// (rejected) new Collector that equals the existing one. This can be used to
+// find out if an equal Collector has been registered before and switch over to
+// using the old one, as demonstrated in the example.
+type AlreadyRegisteredError struct {
+ ExistingCollector, NewCollector Collector
+}
+
+func (err AlreadyRegisteredError) Error() string {
+ return "duplicate metrics collector registration attempted"
+}
+
+// MultiError is a slice of errors implementing the error interface. It is used
+// by a Gatherer to report multiple errors during MetricFamily gathering.
+type MultiError []error
+
+func (errs MultiError) Error() string {
+ if len(errs) == 0 {
+ return ""
+ }
+ buf := &bytes.Buffer{}
+ fmt.Fprintf(buf, "%d error(s) occurred:", len(errs))
+ for _, err := range errs {
+ fmt.Fprintf(buf, "\n* %s", err)
+ }
+ return buf.String()
+}
+
+// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
+// contained error as error if len(errs is 1). In all other cases, it returns
+// the MultiError directly. This is helpful for returning a MultiError in a way
+// that only uses the MultiError if needed.
+func (errs MultiError) MaybeUnwrap() error {
+ switch len(errs) {
+ case 0:
+ return nil
+ case 1:
+ return errs[0]
+ default:
+ return errs
+ }
+}
+
+// Registry registers Prometheus collectors, collects their metrics, and gathers
+// them into MetricFamilies for exposition. It implements both Registerer and
+// Gatherer. The zero value is not usable. Create instances with NewRegistry or
+// NewPedanticRegistry.
+type Registry struct {
+ mtx sync.RWMutex
+ collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
+ descIDs map[uint64]struct{}
+ dimHashesByName map[string]uint64
+ pedanticChecksEnabled bool
+}
+
+// Register implements Registerer.
+func (r *Registry) Register(c Collector) error {
+ var (
+ descChan = make(chan *Desc, capDescChan)
+ newDescIDs = map[uint64]struct{}{}
+ newDimHashesByName = map[string]uint64{}
+ collectorID uint64 // Just a sum of all desc IDs.
+ duplicateDescErr error
+ )
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+ // Conduct various tests...
+ for desc := range descChan {
+
+ // Is the descriptor valid at all?
+ if desc.err != nil {
+ return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
+ }
+
+ // Is the descID unique?
+ // (In other words: Is the fqName + constLabel combination unique?)
+ if _, exists := r.descIDs[desc.id]; exists {
+ duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
+ }
+ // If it is not a duplicate desc in this collector, add it to
+ // the collectorID. (We allow duplicate descs within the same
+ // collector, but their existence must be a no-op.)
+ if _, exists := newDescIDs[desc.id]; !exists {
+ newDescIDs[desc.id] = struct{}{}
+ collectorID += desc.id
+ }
+
+ // Are all the label names and the help string consistent with
+ // previous descriptors of the same name?
+ // First check existing descriptors...
+ if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
+ }
+ } else {
+ // ...then check the new descriptors already seen.
+ if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+ }
+ } else {
+ newDimHashesByName[desc.fqName] = desc.dimHash
+ }
+ }
+ }
+ // Did anything happen at all?
+ if len(newDescIDs) == 0 {
+ return errors.New("collector has no descriptors")
+ }
+ if existing, exists := r.collectorsByID[collectorID]; exists {
+ return AlreadyRegisteredError{
+ ExistingCollector: existing,
+ NewCollector: c,
+ }
+ }
+ // If the collectorID is new, but at least one of the descs existed
+ // before, we are in trouble.
+ if duplicateDescErr != nil {
+ return duplicateDescErr
+ }
+
+ // Only after all tests have passed, actually register.
+ r.collectorsByID[collectorID] = c
+ for hash := range newDescIDs {
+ r.descIDs[hash] = struct{}{}
+ }
+ for name, dimHash := range newDimHashesByName {
+ r.dimHashesByName[name] = dimHash
+ }
+ return nil
+}
+
+// Unregister implements Registerer.
+func (r *Registry) Unregister(c Collector) bool {
+ var (
+ descChan = make(chan *Desc, capDescChan)
+ descIDs = map[uint64]struct{}{}
+ collectorID uint64 // Just a sum of the desc IDs.
+ )
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+ for desc := range descChan {
+ if _, exists := descIDs[desc.id]; !exists {
+ collectorID += desc.id
+ descIDs[desc.id] = struct{}{}
+ }
+ }
+
+ r.mtx.RLock()
+ if _, exists := r.collectorsByID[collectorID]; !exists {
+ r.mtx.RUnlock()
+ return false
+ }
+ r.mtx.RUnlock()
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ delete(r.collectorsByID, collectorID)
+ for id := range descIDs {
+ delete(r.descIDs, id)
+ }
+ // dimHashesByName is left untouched as those must be consistent
+ // throughout the lifetime of a program.
+ return true
+}
+
+// MustRegister implements Registerer.
+func (r *Registry) MustRegister(cs ...Collector) {
+ for _, c := range cs {
+ if err := r.Register(c); err != nil {
+ panic(err)
+ }
+ }
+}
+
+// Gather implements Gatherer.
+func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
+ var (
+ metricChan = make(chan Metric, capMetricChan)
+ metricHashes = map[uint64]struct{}{}
+ dimHashes = map[string]uint64{}
+ wg sync.WaitGroup
+ errs MultiError // The collected errors to return in the end.
+ registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
+ )
+
+ r.mtx.RLock()
+ metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
+
+ // Scatter.
+ // (Collectors could be complex and slow, so we call them all at once.)
+ wg.Add(len(r.collectorsByID))
+ go func() {
+ wg.Wait()
+ close(metricChan)
+ }()
+ for _, collector := range r.collectorsByID {
+ go func(collector Collector) {
+ defer wg.Done()
+ collector.Collect(metricChan)
+ }(collector)
+ }
+
+ // In case pedantic checks are enabled, we have to copy the map before
+ // giving up the RLock.
+ if r.pedanticChecksEnabled {
+ registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs))
+ for id := range r.descIDs {
+ registeredDescIDs[id] = struct{}{}
+ }
+ }
+
+ r.mtx.RUnlock()
+
+ // Drain metricChan in case of premature return.
+ defer func() {
+ for range metricChan {
+ }
+ }()
+
+ // Gather.
+ for metric := range metricChan {
+ // This could be done concurrently, too, but it required locking
+ // of metricFamiliesByName (and of metricHashes if checks are
+ // enabled). Most likely not worth it.
+ desc := metric.Desc()
+ dtoMetric := &dto.Metric{}
+ if err := metric.Write(dtoMetric); err != nil {
+ errs = append(errs, fmt.Errorf(
+ "error collecting metric %v: %s", desc, err,
+ ))
+ continue
+ }
+ metricFamily, ok := metricFamiliesByName[desc.fqName]
+ if ok {
+ if metricFamily.GetHelp() != desc.help {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
+ ))
+ continue
+ }
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch metricFamily.GetType() {
+ case dto.MetricType_COUNTER:
+ if dtoMetric.Counter == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Counter",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_GAUGE:
+ if dtoMetric.Gauge == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Gauge",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_SUMMARY:
+ if dtoMetric.Summary == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Summary",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_UNTYPED:
+ if dtoMetric.Untyped == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be Untyped",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_HISTOGRAM:
+ if dtoMetric.Histogram == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Histogram",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ default:
+ panic("encountered MetricFamily with invalid type")
+ }
+ } else {
+ metricFamily = &dto.MetricFamily{}
+ metricFamily.Name = proto.String(desc.fqName)
+ metricFamily.Help = proto.String(desc.help)
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch {
+ case dtoMetric.Gauge != nil:
+ metricFamily.Type = dto.MetricType_GAUGE.Enum()
+ case dtoMetric.Counter != nil:
+ metricFamily.Type = dto.MetricType_COUNTER.Enum()
+ case dtoMetric.Summary != nil:
+ metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+ case dtoMetric.Untyped != nil:
+ metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+ case dtoMetric.Histogram != nil:
+ metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+ default:
+ errs = append(errs, fmt.Errorf(
+ "empty metric collected: %s", dtoMetric,
+ ))
+ continue
+ }
+ metricFamiliesByName[desc.fqName] = metricFamily
+ }
+ if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ if r.pedanticChecksEnabled {
+ // Is the desc registered at all?
+ if _, exist := registeredDescIDs[desc.id]; !exist {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s with unregistered descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ ))
+ continue
+ }
+ if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ }
+ metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+ }
+ return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// Gatherers is a slice of Gatherer instances that implements the Gatherer
+// interface itself. Its Gather method calls Gather on all Gatherers in the
+// slice in order and returns the merged results. Errors returned from the
+// Gather calles are all returned in a flattened MultiError. Duplicate and
+// inconsistent Metrics are skipped (first occurrence in slice order wins) and
+// reported in the returned error.
+//
+// Gatherers can be used to merge the Gather results from multiple
+// Registries. It also provides a way to directly inject existing MetricFamily
+// protobufs into the gathering by creating a custom Gatherer with a Gather
+// method that simply returns the existing MetricFamily protobufs. Note that no
+// registration is involved (in contrast to Collector registration), so
+// obviously registration-time checks cannot happen. Any inconsistencies between
+// the gathered MetricFamilies are reported as errors by the Gather method, and
+// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies
+// (e.g. syntactically invalid metric or label names) will go undetected.
+type Gatherers []Gatherer
+
+// Gather implements Gatherer.
+func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
+ var (
+ metricFamiliesByName = map[string]*dto.MetricFamily{}
+ metricHashes = map[uint64]struct{}{}
+ dimHashes = map[string]uint64{}
+ errs MultiError // The collected errors to return in the end.
+ )
+
+ for i, g := range gs {
+ mfs, err := g.Gather()
+ if err != nil {
+ if multiErr, ok := err.(MultiError); ok {
+ for _, err := range multiErr {
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ }
+ } else {
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ }
+ }
+ for _, mf := range mfs {
+ existingMF, exists := metricFamiliesByName[mf.GetName()]
+ if exists {
+ if existingMF.GetHelp() != mf.GetHelp() {
+ errs = append(errs, fmt.Errorf(
+ "gathered metric family %s has help %q but should have %q",
+ mf.GetName(), mf.GetHelp(), existingMF.GetHelp(),
+ ))
+ continue
+ }
+ if existingMF.GetType() != mf.GetType() {
+ errs = append(errs, fmt.Errorf(
+ "gathered metric family %s has type %s but should have %s",
+ mf.GetName(), mf.GetType(), existingMF.GetType(),
+ ))
+ continue
+ }
+ } else {
+ existingMF = &dto.MetricFamily{}
+ existingMF.Name = mf.Name
+ existingMF.Help = mf.Help
+ existingMF.Type = mf.Type
+ metricFamiliesByName[mf.GetName()] = existingMF
+ }
+ for _, m := range mf.Metric {
+ if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ existingMF.Metric = append(existingMF.Metric, m)
+ }
+ }
+ }
+ return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// metricSorter is a sortable slice of *dto.Metric.
+type metricSorter []*dto.Metric
+
+func (s metricSorter) Len() int {
+ return len(s)
+}
+
+func (s metricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s metricSorter) Less(i, j int) bool {
+ if len(s[i].Label) != len(s[j].Label) {
+ // This should not happen. The metrics are
+ // inconsistent. However, we have to deal with the fact, as
+ // people might use custom collectors or metric family injection
+ // to create inconsistent metrics. So let's simply compare the
+ // number of labels in this case. That will still yield
+ // reproducible sorting.
+ return len(s[i].Label) < len(s[j].Label)
+ }
+ for n, lp := range s[i].Label {
+ vi := lp.GetValue()
+ vj := s[j].Label[n].GetValue()
+ if vi != vj {
+ return vi < vj
+ }
+ }
+
+ // We should never arrive here. Multiple metrics with the same
+ // label set in the same scrape will lead to undefined ingestion
+ // behavior. However, as above, we have to provide stable sorting
+ // here, even for inconsistent metrics. So sort equal metrics
+ // by their timestamp, with missing timestamps (implying "now")
+ // coming last.
+ if s[i].TimestampMs == nil {
+ return false
+ }
+ if s[j].TimestampMs == nil {
+ return true
+ }
+ return s[i].GetTimestampMs() < s[j].GetTimestampMs()
+}
+
+// normalizeMetricFamilies returns a MetricFamily slice with empty
+// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
+// the slice, with the contained Metrics sorted within each MetricFamily.
+func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
+ for _, mf := range metricFamiliesByName {
+ sort.Sort(metricSorter(mf.Metric))
+ }
+ names := make([]string, 0, len(metricFamiliesByName))
+ for name, mf := range metricFamiliesByName {
+ if len(mf.Metric) > 0 {
+ names = append(names, name)
+ }
+ }
+ sort.Strings(names)
+ result := make([]*dto.MetricFamily, 0, len(names))
+ for _, name := range names {
+ result = append(result, metricFamiliesByName[name])
+ }
+ return result
+}
+
+// checkMetricConsistency checks if the provided Metric is consistent with the
+// provided MetricFamily. It also hashed the Metric labels and the MetricFamily
+// name. If the resulting hash is alread in the provided metricHashes, an error
+// is returned. If not, it is added to metricHashes. The provided dimHashes maps
+// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
+// doesn't yet contain a hash for the provided MetricFamily, it is
+// added. Otherwise, an error is returned if the existing dimHashes in not equal
+// the calculated dimHash.
+func checkMetricConsistency(
+ metricFamily *dto.MetricFamily,
+ dtoMetric *dto.Metric,
+ metricHashes map[uint64]struct{},
+ dimHashes map[string]uint64,
+) error {
+ // Type consistency with metric family.
+ if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
+ metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
+ metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
+ metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
+ metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
+ return fmt.Errorf(
+ "collected metric %s %s is not a %s",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
+ )
+ }
+
+ // Is the metric unique (i.e. no other metric with the same name and the same label values)?
+ h := hashNew()
+ h = hashAdd(h, metricFamily.GetName())
+ h = hashAddByte(h, separatorByte)
+ dh := hashNew()
+ // Make sure label pairs are sorted. We depend on it for the consistency
+ // check.
+ sort.Sort(LabelPairSorter(dtoMetric.Label))
+ for _, lp := range dtoMetric.Label {
+ h = hashAdd(h, lp.GetValue())
+ h = hashAddByte(h, separatorByte)
+ dh = hashAdd(dh, lp.GetName())
+ dh = hashAddByte(dh, separatorByte)
+ }
+ if _, exists := metricHashes[h]; exists {
+ return fmt.Errorf(
+ "collected metric %s %s was collected before with the same name and label values",
+ metricFamily.GetName(), dtoMetric,
+ )
+ }
+ if dimHash, ok := dimHashes[metricFamily.GetName()]; ok {
+ if dimHash != dh {
+ return fmt.Errorf(
+ "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family",
+ metricFamily.GetName(), dtoMetric,
+ )
+ }
+ } else {
+ dimHashes[metricFamily.GetName()] = dh
+ }
+ metricHashes[h] = struct{}{}
+ return nil
+}
+
+func checkDescConsistency(
+ metricFamily *dto.MetricFamily,
+ dtoMetric *dto.Metric,
+ desc *Desc,
+) error {
+ // Desc help consistency with metric family help.
+ if metricFamily.GetHelp() != desc.help {
+ return fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
+ )
+ }
+
+ // Is the desc consistent with the content of the metric?
+ lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
+ lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
+ for _, l := range desc.variableLabels {
+ lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
+ Name: proto.String(l),
+ })
+ }
+ if len(lpsFromDesc) != len(dtoMetric.Label) {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ sort.Sort(LabelPairSorter(lpsFromDesc))
+ for i, lpFromDesc := range lpsFromDesc {
+ lpFromMetric := dtoMetric.Label[i]
+ if lpFromDesc.GetName() != lpFromMetric.GetName() ||
+ lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
new file mode 100644
index 000000000..1c65e25ec
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -0,0 +1,543 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/beorn7/perks/quantile"
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// quantileLabel is used for the label that defines the quantile in a
+// summary.
+const quantileLabel = "quantile"
+
+// A Summary captures individual observations from an event or sample stream and
+// summarizes them in a manner similar to traditional summary statistics: 1. sum
+// of observations, 2. observation count, 3. rank estimations.
+//
+// A typical use-case is the observation of request latencies. By default, a
+// Summary provides the median, the 90th and the 99th percentile of the latency
+// as rank estimations.
+//
+// Note that the rank estimations cannot be aggregated in a meaningful way with
+// the Prometheus query language (i.e. you cannot average or add them). If you
+// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
+// queries served across all instances of a service), consider the Histogram
+// metric type. See the Prometheus documentation for more details.
+//
+// To create Summary instances, use NewSummary.
+type Summary interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the summary.
+ Observe(float64)
+}
+
+// DefObjectives are the default Summary quantile values.
+//
+// Deprecated: DefObjectives will not be used as the default objectives in
+// v0.10 of the library. The default Summary will have no quantiles then.
+var (
+ DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
+
+ errQuantileLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in summaries", quantileLabel,
+ )
+)
+
+// Default values for SummaryOpts.
+const (
+ // DefMaxAge is the default duration for which observations stay
+ // relevant.
+ DefMaxAge time.Duration = 10 * time.Minute
+ // DefAgeBuckets is the default number of buckets used to calculate the
+ // age of observations.
+ DefAgeBuckets = 5
+ // DefBufCap is the standard buffer size for collecting Summary observations.
+ DefBufCap = 500
+)
+
+// SummaryOpts bundles the options for creating a Summary metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type SummaryOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Summary (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Summary must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Summary. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Summary. Summaries with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // SummaryVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Summaries with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Objectives defines the quantile rank estimates with their respective
+ // absolute error. If Objectives[q] = e, then the value reported for q
+ // will be the φ-quantile value for some φ between q-e and q+e. The
+ // default value is DefObjectives. It is used if Objectives is left at
+ // its zero value (i.e. nil). To create a Summary without Objectives,
+ // set it to an empty map (i.e. map[float64]float64{}).
+ //
+ // Deprecated: Note that the current value of DefObjectives is
+ // deprecated. It will be replaced by an empty map in v0.10 of the
+ // library. Please explicitly set Objectives to the desired value.
+ Objectives map[float64]float64
+
+ // MaxAge defines the duration for which an observation stays relevant
+ // for the summary. Must be positive. The default value is DefMaxAge.
+ MaxAge time.Duration
+
+ // AgeBuckets is the number of buckets used to exclude observations that
+ // are older than MaxAge from the summary. A higher number has a
+ // resource penalty, so only increase it if the higher resolution is
+ // really required. For very high observation rates, you might want to
+ // reduce the number of age buckets. With only one age bucket, you will
+ // effectively see a complete reset of the summary each time MaxAge has
+ // passed. The default value is DefAgeBuckets.
+ AgeBuckets uint32
+
+ // BufCap defines the default sample stream buffer size. The default
+ // value of DefBufCap should suffice for most uses. If there is a need
+ // to increase the value, a multiple of 500 is recommended (because that
+ // is the internal buffer size of the underlying package
+ // "github.com/bmizerany/perks/quantile").
+ BufCap uint32
+}
+
+// Great fuck-up with the sliding-window decay algorithm... The Merge method of
+// perk/quantile is actually not working as advertised - and it might be
+// unfixable, as the underlying algorithm is apparently not capable of merging
+// summaries in the first place. To avoid using Merge, we are currently adding
+// observations to _each_ age bucket, i.e. the effort to add a sample is
+// essentially multiplied by the number of age buckets. When rotating age
+// buckets, we empty the previous head stream. On scrape time, we simply take
+// the quantiles from the head stream (no merging required). Result: More effort
+// on observation time, less effort on scrape time, which is exactly the
+// opposite of what we try to accomplish, but at least the results are correct.
+//
+// The quite elegant previous contraption to merge the age buckets efficiently
+// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
+// can't be used anymore.
+
+// NewSummary creates a new Summary based on the provided SummaryOpts.
+func NewSummary(opts SummaryOpts) Summary {
+ return newSummary(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+
+ if opts.Objectives == nil {
+ opts.Objectives = DefObjectives
+ }
+
+ if opts.MaxAge < 0 {
+ panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
+ }
+ if opts.MaxAge == 0 {
+ opts.MaxAge = DefMaxAge
+ }
+
+ if opts.AgeBuckets == 0 {
+ opts.AgeBuckets = DefAgeBuckets
+ }
+
+ if opts.BufCap == 0 {
+ opts.BufCap = DefBufCap
+ }
+
+ s := &summary{
+ desc: desc,
+
+ objectives: opts.Objectives,
+ sortedObjectives: make([]float64, 0, len(opts.Objectives)),
+
+ labelPairs: makeLabelPairs(desc, labelValues),
+
+ hotBuf: make([]float64, 0, opts.BufCap),
+ coldBuf: make([]float64, 0, opts.BufCap),
+ streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
+ }
+ s.headStreamExpTime = time.Now().Add(s.streamDuration)
+ s.hotBufExpTime = s.headStreamExpTime
+
+ for i := uint32(0); i < opts.AgeBuckets; i++ {
+ s.streams = append(s.streams, s.newStream())
+ }
+ s.headStream = s.streams[0]
+
+ for qu := range s.objectives {
+ s.sortedObjectives = append(s.sortedObjectives, qu)
+ }
+ sort.Float64s(s.sortedObjectives)
+
+ s.init(s) // Init self-collection.
+ return s
+}
+
+type summary struct {
+ selfCollector
+
+ bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
+ mtx sync.Mutex // Protects every other moving part.
+ // Lock bufMtx before mtx if both are needed.
+
+ desc *Desc
+
+ objectives map[float64]float64
+ sortedObjectives []float64
+
+ labelPairs []*dto.LabelPair
+
+ sum float64
+ cnt uint64
+
+ hotBuf, coldBuf []float64
+
+ streams []*quantile.Stream
+ streamDuration time.Duration
+ headStream *quantile.Stream
+ headStreamIdx int
+ headStreamExpTime, hotBufExpTime time.Time
+}
+
+func (s *summary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *summary) Observe(v float64) {
+ s.bufMtx.Lock()
+ defer s.bufMtx.Unlock()
+
+ now := time.Now()
+ if now.After(s.hotBufExpTime) {
+ s.asyncFlush(now)
+ }
+ s.hotBuf = append(s.hotBuf, v)
+ if len(s.hotBuf) == cap(s.hotBuf) {
+ s.asyncFlush(now)
+ }
+}
+
+func (s *summary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.objectives))
+
+ s.bufMtx.Lock()
+ s.mtx.Lock()
+ // Swap bufs even if hotBuf is empty to set new hotBufExpTime.
+ s.swapBufs(time.Now())
+ s.bufMtx.Unlock()
+
+ s.flushColdBuf()
+ sum.SampleCount = proto.Uint64(s.cnt)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for _, rank := range s.sortedObjectives {
+ var q float64
+ if s.headStream.Count() == 0 {
+ q = math.NaN()
+ } else {
+ q = s.headStream.Query(rank)
+ }
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ s.mtx.Unlock()
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+ return nil
+}
+
+func (s *summary) newStream() *quantile.Stream {
+ return quantile.NewTargeted(s.objectives)
+}
+
+// asyncFlush needs bufMtx locked.
+func (s *summary) asyncFlush(now time.Time) {
+ s.mtx.Lock()
+ s.swapBufs(now)
+
+ // Unblock the original goroutine that was responsible for the mutation
+ // that triggered the compaction. But hold onto the global non-buffer
+ // state mutex until the operation finishes.
+ go func() {
+ s.flushColdBuf()
+ s.mtx.Unlock()
+ }()
+}
+
+// rotateStreams needs mtx AND bufMtx locked.
+func (s *summary) maybeRotateStreams() {
+ for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
+ s.headStream.Reset()
+ s.headStreamIdx++
+ if s.headStreamIdx >= len(s.streams) {
+ s.headStreamIdx = 0
+ }
+ s.headStream = s.streams[s.headStreamIdx]
+ s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
+ }
+}
+
+// flushColdBuf needs mtx locked.
+func (s *summary) flushColdBuf() {
+ for _, v := range s.coldBuf {
+ for _, stream := range s.streams {
+ stream.Insert(v)
+ }
+ s.cnt++
+ s.sum += v
+ }
+ s.coldBuf = s.coldBuf[0:0]
+ s.maybeRotateStreams()
+}
+
+// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
+func (s *summary) swapBufs(now time.Time) {
+ if len(s.coldBuf) != 0 {
+ panic("coldBuf is not empty")
+ }
+ s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
+ // hotBuf is now empty and gets new expiration set.
+ for now.After(s.hotBufExpTime) {
+ s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
+ }
+}
+
+type quantSort []*dto.Quantile
+
+func (s quantSort) Len() int {
+ return len(s)
+}
+
+func (s quantSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s quantSort) Less(i, j int) bool {
+ return s[i].GetQuantile() < s[j].GetQuantile()
+}
+
+// SummaryVec is a Collector that bundles a set of Summaries that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewSummaryVec.
+type SummaryVec struct {
+ *MetricVec
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &SummaryVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newSummary(desc, opts, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in MetricVec.
+// The difference is that this method returns an Observer and not a Metric so
+// that no type conversion to an Observer is required.
+func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Observer), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns an Observer and not a Metric so that
+// no type conversion to an Observer is required.
+func (m *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Observer), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *SummaryVec) WithLabelValues(lvs ...string) Observer {
+ return m.MetricVec.WithLabelValues(lvs...).(Observer)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *SummaryVec) With(labels Labels) Observer {
+ return m.MetricVec.With(labels).(Observer)
+}
+
+type constSummary struct {
+ desc *Desc
+ count uint64
+ sum float64
+ quantiles map[float64]float64
+ labelPairs []*dto.LabelPair
+}
+
+func (s *constSummary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *constSummary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.quantiles))
+
+ sum.SampleCount = proto.Uint64(s.count)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for rank, q := range s.quantiles {
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+
+ return nil
+}
+
+// NewConstSummary returns a metric representing a Prometheus summary with fixed
+// values for the count, sum, and quantiles. As those parameters cannot be
+// changed, the returned value does not implement the Summary interface (but
+// only the Metric interface). Users of this package will not have much use for
+// it in regular operations. However, when implementing custom Collectors, it is
+// useful as a throw-away metric that is generated on the fly to send it to
+// Prometheus in the Collect method.
+//
+// quantiles maps ranks to quantile values. For example, a median latency of
+// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
+// map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// NewConstSummary returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstSummary is a version of NewConstSummary that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
new file mode 100644
index 000000000..12b65699b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
@@ -0,0 +1,48 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "time"
+
+// Timer is a helper type to time functions. Use NewTimer to create new
+// instances.
+type Timer struct {
+ begin time.Time
+ observer Observer
+}
+
+// NewTimer creates a new Timer. The provided Observer is used to observe a
+// duration in seconds. Timer is usually used to time a function call in the
+// following way:
+// func TimeMe() {
+// timer := NewTimer(myHistogram)
+// defer timer.ObserveDuration()
+// // Do actual work.
+// }
+func NewTimer(o Observer) *Timer {
+ return &Timer{
+ begin: time.Now(),
+ observer: o,
+ }
+}
+
+// ObserveDuration records the duration passed since the Timer was created with
+// NewTimer. It calls the Observe method of the Observer provided during
+// construction with the duration in seconds as an argument. ObserveDuration is
+// usually called with a defer statement.
+func (t *Timer) ObserveDuration() {
+ if t.observer != nil {
+ t.observer.Observe(time.Since(t.begin).Seconds())
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
new file mode 100644
index 000000000..065501d38
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -0,0 +1,143 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Untyped is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// An Untyped metric works the same as a Gauge. The only difference is that to
+// no type information is implied.
+//
+// To create Untyped instances, use NewUntyped.
+//
+// Deprecated: The Untyped type is deprecated because it doesn't make sense in
+// direct instrumentation. If you need to mirror an external metric of unknown
+// type (usually while writing exporters), Use MustNewConstMetric to create an
+// untyped metric instance on the fly.
+type Untyped interface {
+ Metric
+ Collector
+
+ // Set sets the Untyped metric to an arbitrary value.
+ Set(float64)
+ // Inc increments the Untyped metric by 1.
+ Inc()
+ // Dec decrements the Untyped metric by 1.
+ Dec()
+ // Add adds the given value to the Untyped metric. (The value can be
+ // negative, resulting in a decrease.)
+ Add(float64)
+ // Sub subtracts the given value from the Untyped metric. (The value can
+ // be negative, resulting in an increase.)
+ Sub(float64)
+}
+
+// UntypedOpts is an alias for Opts. See there for doc comments.
+type UntypedOpts Opts
+
+// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
+func NewUntyped(opts UntypedOpts) Untyped {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, 0)
+}
+
+// UntypedVec is a Collector that bundles a set of Untyped metrics that all
+// share the same Desc, but have different values for their variable
+// labels. This is used if you want to count the same thing partitioned by
+// various dimensions. Create instances with NewUntypedVec.
+type UntypedVec struct {
+ *MetricVec
+}
+
+// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &UntypedVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newValue(desc, UntypedValue, 0, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns an Untyped and not a
+// Metric so that no type conversion is required.
+func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns an Untyped and not a Metric so that no
+// type conversion is required.
+func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
+ return m.MetricVec.WithLabelValues(lvs...).(Untyped)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *UntypedVec) With(labels Labels) Untyped {
+ return m.MetricVec.With(labels).(Untyped)
+}
+
+// UntypedFunc is an Untyped whose value is determined at collect time by
+// calling a provided function.
+//
+// To create UntypedFunc instances, use NewUntypedFunc.
+type UntypedFunc interface {
+ Metric
+ Collector
+}
+
+// NewUntypedFunc creates a new UntypedFunc based on the provided
+// UntypedOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where an UntypedFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
new file mode 100644
index 000000000..ff75ce585
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -0,0 +1,239 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "sort"
+ "sync/atomic"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// ValueType is an enumeration of metric types that represent a simple value.
+type ValueType int
+
+// Possible values for the ValueType enum.
+const (
+ _ ValueType = iota
+ CounterValue
+ GaugeValue
+ UntypedValue
+)
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+// value is a generic metric for simple values. It implements Metric, Collector,
+// Counter, Gauge, and Untyped. Its effective type is determined by
+// ValueType. This is a low-level building block used by the library to back the
+// implementations of Counter, Gauge, and Untyped.
+type value struct {
+ // valBits contains the bits of the represented float64 value. It has
+ // to go first in the struct to guarantee alignment for atomic
+ // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+
+ selfCollector
+
+ desc *Desc
+ valType ValueType
+ labelPairs []*dto.LabelPair
+}
+
+// newValue returns a newly allocated value with the given Desc, ValueType,
+// sample value and label values. It panics if the number of label
+// values is different from the number of variable labels in Desc.
+func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
+ if len(labelValues) != len(desc.variableLabels) {
+ panic(errInconsistentCardinality)
+ }
+ result := &value{
+ desc: desc,
+ valType: valueType,
+ valBits: math.Float64bits(val),
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ result.init(result)
+ return result
+}
+
+func (v *value) Desc() *Desc {
+ return v.desc
+}
+
+func (v *value) Set(val float64) {
+ atomic.StoreUint64(&v.valBits, math.Float64bits(val))
+}
+
+func (v *value) SetToCurrentTime() {
+ v.Set(float64(time.Now().UnixNano()) / 1e9)
+}
+
+func (v *value) Inc() {
+ v.Add(1)
+}
+
+func (v *value) Dec() {
+ v.Add(-1)
+}
+
+func (v *value) Add(val float64) {
+ for {
+ oldBits := atomic.LoadUint64(&v.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+ if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (v *value) Sub(val float64) {
+ v.Add(val * -1)
+}
+
+func (v *value) Write(out *dto.Metric) error {
+ val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
+ return populateMetric(v.valType, val, v.labelPairs, out)
+}
+
+// valueFunc is a generic metric for simple values retrieved on collect time
+// from a function. It implements Metric and Collector. Its effective type is
+// determined by ValueType. This is a low-level building block used by the
+// library to back the implementations of CounterFunc, GaugeFunc, and
+// UntypedFunc.
+type valueFunc struct {
+ selfCollector
+
+ desc *Desc
+ valType ValueType
+ function func() float64
+ labelPairs []*dto.LabelPair
+}
+
+// newValueFunc returns a newly allocated valueFunc with the given Desc and
+// ValueType. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a valueFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
+ result := &valueFunc{
+ desc: desc,
+ valType: valueType,
+ function: function,
+ labelPairs: makeLabelPairs(desc, nil),
+ }
+ result.init(result)
+ return result
+}
+
+func (v *valueFunc) Desc() *Desc {
+ return v.desc
+}
+
+func (v *valueFunc) Write(out *dto.Metric) error {
+ return populateMetric(v.valType, v.function(), v.labelPairs, out)
+}
+
+// NewConstMetric returns a metric with one fixed value that cannot be
+// changed. Users of this package will not have much use for it in regular
+// operations. However, when implementing custom Collectors, it is useful as a
+// throw-away metric that is generated on the fly to send it to Prometheus in
+// the Collect method. NewConstMetric returns an error if the length of
+// labelValues is not consistent with the variable labels in Desc.
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constMetric{
+ desc: desc,
+ valType: valueType,
+ val: value,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstMetric is a version of NewConstMetric that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
+ m, err := NewConstMetric(desc, valueType, value, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type constMetric struct {
+ desc *Desc
+ valType ValueType
+ val float64
+ labelPairs []*dto.LabelPair
+}
+
+func (m *constMetric) Desc() *Desc {
+ return m.desc
+}
+
+func (m *constMetric) Write(out *dto.Metric) error {
+ return populateMetric(m.valType, m.val, m.labelPairs, out)
+}
+
+func populateMetric(
+ t ValueType,
+ v float64,
+ labelPairs []*dto.LabelPair,
+ m *dto.Metric,
+) error {
+ m.Label = labelPairs
+ switch t {
+ case CounterValue:
+ m.Counter = &dto.Counter{Value: proto.Float64(v)}
+ case GaugeValue:
+ m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
+ case UntypedValue:
+ m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
+ default:
+ return fmt.Errorf("encountered unknown type %v", t)
+ }
+ return nil
+}
+
+func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
+ totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
+ if totalLen == 0 {
+ // Super fast path.
+ return nil
+ }
+ if len(desc.variableLabels) == 0 {
+ // Moderately fast path.
+ return desc.constLabelPairs
+ }
+ labelPairs := make([]*dto.LabelPair, 0, totalLen)
+ for i, n := range desc.variableLabels {
+ labelPairs = append(labelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(labelValues[i]),
+ })
+ }
+ for _, lp := range desc.constLabelPairs {
+ labelPairs = append(labelPairs, lp)
+ }
+ sort.Sort(LabelPairSorter(labelPairs))
+ return labelPairs
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
new file mode 100644
index 000000000..7f3eef9a4
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -0,0 +1,404 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/prometheus/common/model"
+)
+
+// MetricVec is a Collector to bundle metrics of the same name that
+// differ in their label values. MetricVec is usually not used directly but as a
+// building block for implementations of vectors of a given metric
+// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
+// provided in this package.
+type MetricVec struct {
+ mtx sync.RWMutex // Protects the children.
+ children map[uint64][]metricWithLabelValues
+ desc *Desc
+
+ newMetric func(labelValues ...string) Metric
+ hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
+ hashAddByte func(h uint64, b byte) uint64
+}
+
+// newMetricVec returns an initialized MetricVec. The concrete value is
+// returned for embedding into another struct.
+func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
+ return &MetricVec{
+ children: map[uint64][]metricWithLabelValues{},
+ desc: desc,
+ newMetric: newMetric,
+ hashAdd: hashAdd,
+ hashAddByte: hashAddByte,
+ }
+}
+
+// metricWithLabelValues provides the metric and its label values for
+// disambiguation on hash collision.
+type metricWithLabelValues struct {
+ values []string
+ metric Metric
+}
+
+// Describe implements Collector. The length of the returned slice
+// is always one.
+func (m *MetricVec) Describe(ch chan<- *Desc) {
+ ch <- m.desc
+}
+
+// Collect implements Collector.
+func (m *MetricVec) Collect(ch chan<- Metric) {
+ m.mtx.RLock()
+ defer m.mtx.RUnlock()
+
+ for _, metrics := range m.children {
+ for _, metric := range metrics {
+ ch <- metric.metric
+ }
+ }
+}
+
+// GetMetricWithLabelValues returns the Metric for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Metric is created.
+//
+// It is possible to call this method without using the returned Metric to only
+// create the new Metric but leave it at its start value (e.g. a Summary or
+// Histogram without any observations). See also the SummaryVec example.
+//
+// Keeping the Metric for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Metric from the MetricVec. In that case, the
+// Metric will still exist, but it will not be exported anymore, even if a
+// Metric with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return nil, err
+ }
+
+ return m.getOrCreateMetricWithLabelValues(h, lvs), nil
+}
+
+// GetMetricWith returns the Metric for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Metric is created. Implications of
+// creating a Metric without using it and keeping the Metric for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc.
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return nil, err
+ }
+
+ return m.getOrCreateMetricWithLabels(h, labels), nil
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
+// occurs. The method allows neat syntax like:
+// httpReqs.WithLabelValues("404", "POST").Inc()
+func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
+ metric, err := m.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// With works as GetMetricWith, but panics if an error occurs. The method allows
+// neat syntax like:
+// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
+func (m *MetricVec) With(labels Labels) Metric {
+ metric, err := m.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// DeleteLabelValues removes the metric where the variable labels are the same
+// as those passed in as labels (same order as the VariableLabels in Desc). It
+// returns true if a metric was deleted.
+//
+// It is not an error if the number of label values is not the same as the
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual Metric, so the method will always return false in that
+// case.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
+// alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the CounterVec example.
+func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return false
+ }
+ return m.deleteByHashWithLabelValues(h, lvs)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in the Desc of the MetricVec. However, such
+// inconsistent Labels can never match an actual Metric, so the method will
+// always return false in that case.
+//
+// This method is used for the same purpose as DeleteLabelValues(...string). See
+// there for pros and cons of the two methods.
+func (m *MetricVec) Delete(labels Labels) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return false
+ }
+
+ return m.deleteByHashWithLabels(h, labels)
+}
+
+// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
+// there are multiple matches in the bucket, use lvs to select a metric and
+// remove only that metric.
+func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
+ metrics, ok := m.children[h]
+ if !ok {
+ return false
+ }
+
+ i := m.findMetricWithLabelValues(metrics, lvs)
+ if i >= len(metrics) {
+ return false
+ }
+
+ if len(metrics) > 1 {
+ m.children[h] = append(metrics[:i], metrics[i+1:]...)
+ } else {
+ delete(m.children, h)
+ }
+ return true
+}
+
+// deleteByHashWithLabels removes the metric from the hash bucket h. If there
+// are multiple matches in the bucket, use lvs to select a metric and remove
+// only that metric.
+func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
+ metrics, ok := m.children[h]
+ if !ok {
+ return false
+ }
+ i := m.findMetricWithLabels(metrics, labels)
+ if i >= len(metrics) {
+ return false
+ }
+
+ if len(metrics) > 1 {
+ m.children[h] = append(metrics[:i], metrics[i+1:]...)
+ } else {
+ delete(m.children, h)
+ }
+ return true
+}
+
+// Reset deletes all metrics in this vector.
+func (m *MetricVec) Reset() {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ for h := range m.children {
+ delete(m.children, h)
+ }
+}
+
+func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
+ if len(vals) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ h := hashNew()
+ for _, val := range vals {
+ h = m.hashAdd(h, val)
+ h = m.hashAddByte(h, model.SeparatorByte)
+ }
+ return h, nil
+}
+
+func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
+ if len(labels) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ h := hashNew()
+ for _, label := range m.desc.variableLabels {
+ val, ok := labels[label]
+ if !ok {
+ return 0, fmt.Errorf("label name %q missing in label map", label)
+ }
+ h = m.hashAdd(h, val)
+ h = m.hashAddByte(h, model.SeparatorByte)
+ }
+ return h, nil
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
+ m.mtx.RLock()
+ metric, ok := m.getMetricWithLabelValues(hash, lvs)
+ m.mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ metric, ok = m.getMetricWithLabelValues(hash, lvs)
+ if !ok {
+ // Copy to avoid allocation in case wo don't go down this code path.
+ copiedLVs := make([]string, len(lvs))
+ copy(copiedLVs, lvs)
+ metric = m.newMetric(copiedLVs...)
+ m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
+ }
+ return metric
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
+ m.mtx.RLock()
+ metric, ok := m.getMetricWithLabels(hash, labels)
+ m.mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ metric, ok = m.getMetricWithLabels(hash, labels)
+ if !ok {
+ lvs := m.extractLabelValues(labels)
+ metric = m.newMetric(lvs...)
+ m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
+ }
+ return metric
+}
+
+// getMetricWithLabelValues gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
+ metrics, ok := m.children[h]
+ if ok {
+ if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
+ return metrics[i].metric, true
+ }
+ }
+ return nil, false
+}
+
+// getMetricWithLabels gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
+ metrics, ok := m.children[h]
+ if ok {
+ if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
+ return metrics[i].metric, true
+ }
+ }
+ return nil, false
+}
+
+// findMetricWithLabelValues returns the index of the matching metric or
+// len(metrics) if not found.
+func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
+ for i, metric := range metrics {
+ if m.matchLabelValues(metric.values, lvs) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+// findMetricWithLabels returns the index of the matching metric or len(metrics)
+// if not found.
+func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
+ for i, metric := range metrics {
+ if m.matchLabels(metric.values, labels) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
+ if len(values) != len(lvs) {
+ return false
+ }
+ for i, v := range values {
+ if v != lvs[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
+ if len(labels) != len(values) {
+ return false
+ }
+ for i, k := range m.desc.variableLabels {
+ if values[i] != labels[k] {
+ return false
+ }
+ }
+ return true
+}
+
+func (m *MetricVec) extractLabelValues(labels Labels) []string {
+ labelValues := make([]string, len(labels))
+ for i, k := range m.desc.variableLabels {
+ labelValues[i] = labels[k]
+ }
+ return labelValues
+}
diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE
new file mode 100644
index 000000000..20110e410
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/NOTICE
@@ -0,0 +1,5 @@
+Data model artifacts for Prometheus.
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/client_model/README.md b/vendor/github.com/prometheus/client_model/README.md
new file mode 100644
index 000000000..a710042db
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/README.md
@@ -0,0 +1,26 @@
+# Background
+Under most circumstances, manually downloading this repository should never
+be required.
+
+# Prerequisites
+# Base
+* [Google Protocol Buffers](https://developers.google.com/protocol-buffers)
+
+## Java
+* [Apache Maven](http://maven.apache.org)
+* [Prometheus Maven Repository](https://github.com/prometheus/io.prometheus-maven-repository) checked out into ../io.prometheus-maven-repository
+
+## Go
+* [Go](http://golang.org)
+* [goprotobuf](https://code.google.com/p/goprotobuf)
+
+## Ruby
+* [Ruby](https://www.ruby-lang.org)
+* [bundler](https://rubygems.org/gems/bundler)
+
+# Building
+ $ make
+
+# Getting Started
+ * The Go source code is periodically indexed: [Go Protocol Buffer Model](http://godoc.org/github.com/prometheus/client_model/go).
+ * All of the core developers are accessible via the [Prometheus Developers Mailinglist](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
new file mode 100644
index 000000000..b065f8683
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -0,0 +1,364 @@
+// Code generated by protoc-gen-go.
+// source: metrics.proto
+// DO NOT EDIT!
+
+/*
+Package io_prometheus_client is a generated protocol buffer package.
+
+It is generated from these files:
+ metrics.proto
+
+It has these top-level messages:
+ LabelPair
+ Gauge
+ Counter
+ Quantile
+ Summary
+ Untyped
+ Histogram
+ Bucket
+ Metric
+ MetricFamily
+*/
+package io_prometheus_client
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type MetricType int32
+
+const (
+ MetricType_COUNTER MetricType = 0
+ MetricType_GAUGE MetricType = 1
+ MetricType_SUMMARY MetricType = 2
+ MetricType_UNTYPED MetricType = 3
+ MetricType_HISTOGRAM MetricType = 4
+)
+
+var MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+}
+var MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+}
+
+func (x MetricType) Enum() *MetricType {
+ p := new(MetricType)
+ *p = x
+ return p
+}
+func (x MetricType) String() string {
+ return proto.EnumName(MetricType_name, int32(x))
+}
+func (x *MetricType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+ if err != nil {
+ return err
+ }
+ *x = MetricType(value)
+ return nil
+}
+
+type LabelPair struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LabelPair) Reset() { *m = LabelPair{} }
+func (m *LabelPair) String() string { return proto.CompactTextString(m) }
+func (*LabelPair) ProtoMessage() {}
+
+func (m *LabelPair) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *LabelPair) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Gauge struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Gauge) Reset() { *m = Gauge{} }
+func (m *Gauge) String() string { return proto.CompactTextString(m) }
+func (*Gauge) ProtoMessage() {}
+
+func (m *Gauge) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Counter struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Counter) Reset() { *m = Counter{} }
+func (m *Counter) String() string { return proto.CompactTextString(m) }
+func (*Counter) ProtoMessage() {}
+
+func (m *Counter) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Quantile struct {
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Quantile) Reset() { *m = Quantile{} }
+func (m *Quantile) String() string { return proto.CompactTextString(m) }
+func (*Quantile) ProtoMessage() {}
+
+func (m *Quantile) GetQuantile() float64 {
+ if m != nil && m.Quantile != nil {
+ return *m.Quantile
+ }
+ return 0
+}
+
+func (m *Quantile) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Summary struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Summary) Reset() { *m = Summary{} }
+func (m *Summary) String() string { return proto.CompactTextString(m) }
+func (*Summary) ProtoMessage() {}
+
+func (m *Summary) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Summary) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Summary) GetQuantile() []*Quantile {
+ if m != nil {
+ return m.Quantile
+ }
+ return nil
+}
+
+type Untyped struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Untyped) Reset() { *m = Untyped{} }
+func (m *Untyped) String() string { return proto.CompactTextString(m) }
+func (*Untyped) ProtoMessage() {}
+
+func (m *Untyped) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Histogram struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Histogram) Reset() { *m = Histogram{} }
+func (m *Histogram) String() string { return proto.CompactTextString(m) }
+func (*Histogram) ProtoMessage() {}
+
+func (m *Histogram) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Histogram) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Histogram) GetBucket() []*Bucket {
+ if m != nil {
+ return m.Bucket
+ }
+ return nil
+}
+
+type Bucket struct {
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Bucket) Reset() { *m = Bucket{} }
+func (m *Bucket) String() string { return proto.CompactTextString(m) }
+func (*Bucket) ProtoMessage() {}
+
+func (m *Bucket) GetCumulativeCount() uint64 {
+ if m != nil && m.CumulativeCount != nil {
+ return *m.CumulativeCount
+ }
+ return 0
+}
+
+func (m *Bucket) GetUpperBound() float64 {
+ if m != nil && m.UpperBound != nil {
+ return *m.UpperBound
+ }
+ return 0
+}
+
+type Metric struct {
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Metric) Reset() { *m = Metric{} }
+func (m *Metric) String() string { return proto.CompactTextString(m) }
+func (*Metric) ProtoMessage() {}
+
+func (m *Metric) GetLabel() []*LabelPair {
+ if m != nil {
+ return m.Label
+ }
+ return nil
+}
+
+func (m *Metric) GetGauge() *Gauge {
+ if m != nil {
+ return m.Gauge
+ }
+ return nil
+}
+
+func (m *Metric) GetCounter() *Counter {
+ if m != nil {
+ return m.Counter
+ }
+ return nil
+}
+
+func (m *Metric) GetSummary() *Summary {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func (m *Metric) GetUntyped() *Untyped {
+ if m != nil {
+ return m.Untyped
+ }
+ return nil
+}
+
+func (m *Metric) GetHistogram() *Histogram {
+ if m != nil {
+ return m.Histogram
+ }
+ return nil
+}
+
+func (m *Metric) GetTimestampMs() int64 {
+ if m != nil && m.TimestampMs != nil {
+ return *m.TimestampMs
+ }
+ return 0
+}
+
+type MetricFamily struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MetricFamily) Reset() { *m = MetricFamily{} }
+func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
+func (*MetricFamily) ProtoMessage() {}
+
+func (m *MetricFamily) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetHelp() string {
+ if m != nil && m.Help != nil {
+ return *m.Help
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetType() MetricType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return MetricType_COUNTER
+}
+
+func (m *MetricFamily) GetMetric() []*Metric {
+ if m != nil {
+ return m.Metric
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
+}
diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/prometheus/common/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE
new file mode 100644
index 000000000..636a2c1a5
--- /dev/null
+++ b/vendor/github.com/prometheus/common/NOTICE
@@ -0,0 +1,5 @@
+Common libraries shared by Prometheus Go components.
+Copyright 2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/common/README.md b/vendor/github.com/prometheus/common/README.md
new file mode 100644
index 000000000..98f6ce24b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/README.md
@@ -0,0 +1,12 @@
+# Common
+[![Build Status](https://travis-ci.org/prometheus/common.svg)](https://travis-ci.org/prometheus/common)
+
+This repository contains Go libraries that are shared across Prometheus
+components and libraries.
+
+* **config**: Common configuration structures
+* **expfmt**: Decoding and encoding for the exposition format
+* **log**: A logging wrapper around [logrus](https://github.com/Sirupsen/logrus)
+* **model**: Shared data structures
+* **route**: A routing wrapper around [httprouter](https://github.com/julienschmidt/httprouter) using `context.Context`
+* **version**: Version informations and metric
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
new file mode 100644
index 000000000..a7a42d5ef
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -0,0 +1,429 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "mime"
+ "net/http"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/model"
+)
+
+// Decoder types decode an input stream into metric families.
+type Decoder interface {
+ Decode(*dto.MetricFamily) error
+}
+
+// DecodeOptions contains options used by the Decoder and in sample extraction.
+type DecodeOptions struct {
+ // Timestamp is added to each value from the stream that has no explicit timestamp set.
+ Timestamp model.Time
+}
+
+// ResponseFormat extracts the correct format from a HTTP response header.
+// If no matching format can be found FormatUnknown is returned.
+func ResponseFormat(h http.Header) Format {
+ ct := h.Get(hdrContentType)
+
+ mediatype, params, err := mime.ParseMediaType(ct)
+ if err != nil {
+ return FmtUnknown
+ }
+
+ const textType = "text/plain"
+
+ switch mediatype {
+ case ProtoType:
+ if p, ok := params["proto"]; ok && p != ProtoProtocol {
+ return FmtUnknown
+ }
+ if e, ok := params["encoding"]; ok && e != "delimited" {
+ return FmtUnknown
+ }
+ return FmtProtoDelim
+
+ case textType:
+ if v, ok := params["version"]; ok && v != TextVersion {
+ return FmtUnknown
+ }
+ return FmtText
+ }
+
+ return FmtUnknown
+}
+
+// NewDecoder returns a new decoder based on the given input format.
+// If the input format does not imply otherwise, a text format decoder is returned.
+func NewDecoder(r io.Reader, format Format) Decoder {
+ switch format {
+ case FmtProtoDelim:
+ return &protoDecoder{r: r}
+ }
+ return &textDecoder{r: r}
+}
+
+// protoDecoder implements the Decoder interface for protocol buffers.
+type protoDecoder struct {
+ r io.Reader
+}
+
+// Decode implements the Decoder interface.
+func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
+ _, err := pbutil.ReadDelimited(d.r, v)
+ if err != nil {
+ return err
+ }
+ if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
+ return fmt.Errorf("invalid metric name %q", v.GetName())
+ }
+ for _, m := range v.GetMetric() {
+ if m == nil {
+ continue
+ }
+ for _, l := range m.GetLabel() {
+ if l == nil {
+ continue
+ }
+ if !model.LabelValue(l.GetValue()).IsValid() {
+ return fmt.Errorf("invalid label value %q", l.GetValue())
+ }
+ if !model.LabelName(l.GetName()).IsValid() {
+ return fmt.Errorf("invalid label name %q", l.GetName())
+ }
+ }
+ }
+ return nil
+}
+
+// textDecoder implements the Decoder interface for the text protocol.
+type textDecoder struct {
+ r io.Reader
+ p TextParser
+ fams []*dto.MetricFamily
+}
+
+// Decode implements the Decoder interface.
+func (d *textDecoder) Decode(v *dto.MetricFamily) error {
+ // TODO(fabxc): Wrap this as a line reader to make streaming safer.
+ if len(d.fams) == 0 {
+ // No cached metric families, read everything and parse metrics.
+ fams, err := d.p.TextToMetricFamilies(d.r)
+ if err != nil {
+ return err
+ }
+ if len(fams) == 0 {
+ return io.EOF
+ }
+ d.fams = make([]*dto.MetricFamily, 0, len(fams))
+ for _, f := range fams {
+ d.fams = append(d.fams, f)
+ }
+ }
+
+ *v = *d.fams[0]
+ d.fams = d.fams[1:]
+
+ return nil
+}
+
+// SampleDecoder wraps a Decoder to extract samples from the metric families
+// decoded by the wrapped Decoder.
+type SampleDecoder struct {
+ Dec Decoder
+ Opts *DecodeOptions
+
+ f dto.MetricFamily
+}
+
+// Decode calls the Decode method of the wrapped Decoder and then extracts the
+// samples from the decoded MetricFamily into the provided model.Vector.
+func (sd *SampleDecoder) Decode(s *model.Vector) error {
+ err := sd.Dec.Decode(&sd.f)
+ if err != nil {
+ return err
+ }
+ *s, err = extractSamples(&sd.f, sd.Opts)
+ return err
+}
+
+// ExtractSamples builds a slice of samples from the provided metric
+// families. If an error occurs during sample extraction, it continues to
+// extract from the remaining metric families. The returned error is the last
+// error that has occured.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
+ var (
+ all model.Vector
+ lastErr error
+ )
+ for _, f := range fams {
+ some, err := extractSamples(f, o)
+ if err != nil {
+ lastErr = err
+ continue
+ }
+ all = append(all, some...)
+ }
+ return all, lastErr
+}
+
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
+ switch f.GetType() {
+ case dto.MetricType_COUNTER:
+ return extractCounter(o, f), nil
+ case dto.MetricType_GAUGE:
+ return extractGauge(o, f), nil
+ case dto.MetricType_SUMMARY:
+ return extractSummary(o, f), nil
+ case dto.MetricType_UNTYPED:
+ return extractUntyped(o, f), nil
+ case dto.MetricType_HISTOGRAM:
+ return extractHistogram(o, f), nil
+ }
+ return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
+}
+
+func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Counter == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Counter.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Gauge == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Gauge.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Untyped == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Untyped.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Summary == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ for _, q := range m.Summary.Quantile {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ // BUG(matt): Update other names to "quantile".
+ lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetValue()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ return samples
+}
+
+func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Histogram == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ infSeen := false
+
+ for _, q := range m.Histogram.Bucket {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetCumulativeCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ count := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleCount()),
+ Timestamp: timestamp,
+ }
+ samples = append(samples, count)
+
+ if !infSeen {
+ // Append an infinity bucket sample.
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: count.Value,
+ Timestamp: timestamp,
+ })
+ }
+ }
+
+ return samples
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
new file mode 100644
index 000000000..11839ed65
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Encoder types encode metric families into an underlying wire protocol.
+type Encoder interface {
+ Encode(*dto.MetricFamily) error
+}
+
+type encoder func(*dto.MetricFamily) error
+
+func (e encoder) Encode(v *dto.MetricFamily) error {
+ return e(v)
+}
+
+// Negotiate returns the Content-Type based on the given Accept header.
+// If no appropriate accepted type is found, FmtText is returned.
+func Negotiate(h http.Header) Format {
+ for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ // Check for protocol buffer
+ if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+ switch ac.Params["encoding"] {
+ case "delimited":
+ return FmtProtoDelim
+ case "text":
+ return FmtProtoText
+ case "compact-text":
+ return FmtProtoCompact
+ }
+ }
+ // Check for text format.
+ ver := ac.Params["version"]
+ if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+ return FmtText
+ }
+ }
+ return FmtText
+}
+
+// NewEncoder returns a new encoder based on content type negotiation.
+func NewEncoder(w io.Writer, format Format) Encoder {
+ switch format {
+ case FmtProtoDelim:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := pbutil.WriteDelimited(w, v)
+ return err
+ })
+ case FmtProtoCompact:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, v.String())
+ return err
+ })
+ case FmtProtoText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+ return err
+ })
+ case FmtText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := MetricFamilyToText(w, v)
+ return err
+ })
+ }
+ panic("expfmt.NewEncoder: unknown format")
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
new file mode 100644
index 000000000..371ac7503
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -0,0 +1,38 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package expfmt contains tools for reading and writing Prometheus metrics.
+package expfmt
+
+// Format specifies the HTTP content type of the different wire protocols.
+type Format string
+
+// Constants to assemble the Content-Type values for the different wire protocols.
+const (
+ TextVersion = "0.0.4"
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+
+ // The Content-Type values for the different wire protocols.
+ FmtUnknown Format = `<unknown>`
+ FmtText Format = `text/plain; version=` + TextVersion
+ FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+ FmtProtoText Format = ProtoFmt + ` encoding=text`
+ FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+)
+
+const (
+ hdrContentType = "Content-Type"
+ hdrAccept = "Accept"
+)
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
new file mode 100644
index 000000000..dc2eedeef
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -0,0 +1,36 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Build only when actually fuzzing
+// +build gofuzz
+
+package expfmt
+
+import "bytes"
+
+// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
+//
+// go-fuzz-build github.com/prometheus/common/expfmt
+// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+//
+// Further input samples should go in the folder fuzz/corpus.
+func Fuzz(in []byte) int {
+ parser := TextParser{}
+ _, err := parser.TextToMetricFamilies(bytes.NewReader(in))
+
+ if err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
new file mode 100644
index 000000000..f11321cd0
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -0,0 +1,303 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+)
+
+// MetricFamilyToText converts a MetricFamily proto message into text format and
+// writes the resulting lines to 'out'. It returns the number of bytes written
+// and any error encountered. The output will have the same order as the input,
+// no further sorting is performed. Furthermore, this function assumes the input
+// is already sanitized and does not perform any sanity checks. If the input
+// contains duplicate metrics or invalid metric or label names, the conversion
+// will result in invalid text format output.
+//
+// This method fulfills the type 'prometheus.encoder'.
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
+ var written int
+
+ // Fail-fast checks.
+ if len(in.Metric) == 0 {
+ return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
+ }
+ name := in.GetName()
+ if name == "" {
+ return written, fmt.Errorf("MetricFamily has no name: %s", in)
+ }
+
+ // Comments, first HELP, then TYPE.
+ if in.Help != nil {
+ n, err := fmt.Fprintf(
+ out, "# HELP %s %s\n",
+ name, escapeString(*in.Help, false),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ metricType := in.GetType()
+ n, err := fmt.Fprintf(
+ out, "# TYPE %s %s\n",
+ name, strings.ToLower(metricType.String()),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ // Finally the samples, one line for each.
+ for _, metric := range in.Metric {
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ if metric.Counter == nil {
+ return written, fmt.Errorf(
+ "expected counter in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Counter.GetValue(),
+ out,
+ )
+ case dto.MetricType_GAUGE:
+ if metric.Gauge == nil {
+ return written, fmt.Errorf(
+ "expected gauge in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Gauge.GetValue(),
+ out,
+ )
+ case dto.MetricType_UNTYPED:
+ if metric.Untyped == nil {
+ return written, fmt.Errorf(
+ "expected untyped in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Untyped.GetValue(),
+ out,
+ )
+ case dto.MetricType_SUMMARY:
+ if metric.Summary == nil {
+ return written, fmt.Errorf(
+ "expected summary in metric %s %s", name, metric,
+ )
+ }
+ for _, q := range metric.Summary.Quantile {
+ n, err = writeSample(
+ name, metric,
+ model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
+ q.GetValue(),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Summary.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Summary.GetSampleCount()),
+ out,
+ )
+ case dto.MetricType_HISTOGRAM:
+ if metric.Histogram == nil {
+ return written, fmt.Errorf(
+ "expected histogram in metric %s %s", name, metric,
+ )
+ }
+ infSeen := false
+ for _, q := range metric.Histogram.Bucket {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
+ float64(q.GetCumulativeCount()),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+ }
+ if !infSeen {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, "+Inf",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Histogram.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ default:
+ return written, fmt.Errorf(
+ "unexpected type in metric %s %s", name, metric,
+ )
+ }
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+}
+
+// writeSample writes a single sample in text format to out, given the metric
+// name, the metric proto message itself, optionally an additional label name
+// and value (use empty strings if not required), and the value. The function
+// returns the number of bytes written and any error encountered.
+func writeSample(
+ name string,
+ metric *dto.Metric,
+ additionalLabelName, additionalLabelValue string,
+ value float64,
+ out io.Writer,
+) (int, error) {
+ var written int
+ n, err := fmt.Fprint(out, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = labelPairsToText(
+ metric.Label,
+ additionalLabelName, additionalLabelValue,
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = fmt.Fprintf(out, " %v", value)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if metric.TimestampMs != nil {
+ n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = out.Write([]byte{'\n'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// labelPairsToText converts a slice of LabelPair proto messages plus the
+// explicitly given additional label pair into text formatted as required by the
+// text format and writes it to 'out'. An empty slice in combination with an
+// empty string 'additionalLabelName' results in nothing being
+// written. Otherwise, the label pairs are written, escaped as required by the
+// text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered.
+func labelPairsToText(
+ in []*dto.LabelPair,
+ additionalLabelName, additionalLabelValue string,
+ out io.Writer,
+) (int, error) {
+ if len(in) == 0 && additionalLabelName == "" {
+ return 0, nil
+ }
+ var written int
+ separator := '{'
+ for _, lp := range in {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, lp.GetName(), escapeString(lp.GetValue(), true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ if additionalLabelName != "" {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, additionalLabelName,
+ escapeString(additionalLabelValue, true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err := out.Write([]byte{'}'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+var (
+ escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
+ escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
+)
+
+// escapeString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
+func escapeString(v string, includeDoubleQuote bool) string {
+ if includeDoubleQuote {
+ return escapeWithDoubleQuote.Replace(v)
+ }
+
+ return escape.Replace(v)
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
new file mode 100644
index 000000000..ef9a15077
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -0,0 +1,753 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
+)
+
+// A stateFn is a function that represents a state in a state machine. By
+// executing it, the state is progressed to the next state. The stateFn returns
+// another stateFn, which represents the new state. The end state is represented
+// by nil.
+type stateFn func() stateFn
+
+// ParseError signals errors while parsing the simple and flat text-based
+// exchange format.
+type ParseError struct {
+ Line int
+ Msg string
+}
+
+// Error implements the error interface.
+func (e ParseError) Error() string {
+ return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
+}
+
+// TextParser is used to parse the simple and flat text-based exchange format. Its
+// zero value is ready to use.
+type TextParser struct {
+ metricFamiliesByName map[string]*dto.MetricFamily
+ buf *bufio.Reader // Where the parsed input is read through.
+ err error // Most recent error.
+ lineCount int // Tracks the line count for error messages.
+ currentByte byte // The most recent byte read.
+ currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
+ currentMF *dto.MetricFamily
+ currentMetric *dto.Metric
+ currentLabelPair *dto.LabelPair
+
+ // The remaining member variables are only used for summaries/histograms.
+ currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
+ // Summary specific.
+ summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentQuantile float64
+ // Histogram specific.
+ histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentBucket float64
+ // These tell us if the currently processed line ends on '_count' or
+ // '_sum' respectively and belong to a summary/histogram, representing the sample
+ // count and sum of that summary/histogram.
+ currentIsSummaryCount, currentIsSummarySum bool
+ currentIsHistogramCount, currentIsHistogramSum bool
+}
+
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
+// format and creates MetricFamily proto messages. It returns the MetricFamily
+// proto messages in a map where the metric names are the keys, along with any
+// error encountered.
+//
+// If the input contains duplicate metrics (i.e. lines with the same metric name
+// and exactly the same label set), the resulting MetricFamily will contain
+// duplicate Metric proto messages. Similar is true for duplicate label
+// names. Checks for duplicates have to be performed separately, if required.
+// Also note that neither the metrics within each MetricFamily are sorted nor
+// the label pairs within each Metric. Sorting is not required for the most
+// frequent use of this method, which is sample ingestion in the Prometheus
+// server. However, for presentation purposes, you might want to sort the
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
+// the metric family injection hook of the Prometheus registry.
+//
+// Summaries and histograms are rather special beasts. You would probably not
+// use them in the simple text format anyway. This method can deal with
+// summaries and histograms if they are presented in exactly the way the
+// text.Create function creates them.
+//
+// This method must not be called concurrently. If you want to parse different
+// input concurrently, instantiate a separate Parser for each goroutine.
+func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
+ p.reset(in)
+ for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
+ // Magic happens here...
+ }
+ // Get rid of empty metric families.
+ for k, mf := range p.metricFamiliesByName {
+ if len(mf.GetMetric()) == 0 {
+ delete(p.metricFamiliesByName, k)
+ }
+ }
+ // If p.err is io.EOF now, we have run into a premature end of the input
+ // stream. Turn this error into something nicer and more
+ // meaningful. (io.EOF is often used as a signal for the legitimate end
+ // of an input stream.)
+ if p.err == io.EOF {
+ p.parseError("unexpected end of input stream")
+ }
+ return p.metricFamiliesByName, p.err
+}
+
+func (p *TextParser) reset(in io.Reader) {
+ p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+ if p.buf == nil {
+ p.buf = bufio.NewReader(in)
+ } else {
+ p.buf.Reset(in)
+ }
+ p.err = nil
+ p.lineCount = 0
+ if p.summaries == nil || len(p.summaries) > 0 {
+ p.summaries = map[uint64]*dto.Metric{}
+ }
+ if p.histograms == nil || len(p.histograms) > 0 {
+ p.histograms = map[uint64]*dto.Metric{}
+ }
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+}
+
+// startOfLine represents the state where the next byte read from p.buf is the
+// start of a line (or whitespace leading up to it).
+func (p *TextParser) startOfLine() stateFn {
+ p.lineCount++
+ if p.skipBlankTab(); p.err != nil {
+ // End of input reached. This is the only case where
+ // that is not an error but a signal that we are done.
+ p.err = nil
+ return nil
+ }
+ switch p.currentByte {
+ case '#':
+ return p.startComment
+ case '\n':
+ return p.startOfLine // Empty line, start the next one.
+ }
+ return p.readingMetricName
+}
+
+// startComment represents the state where the next byte read from p.buf is the
+// start of a comment (or whitespace leading up to it).
+func (p *TextParser) startComment() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ // If we have hit the end of line already, there is nothing left
+ // to do. This is not considered a syntax error.
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ keyword := p.currentToken.String()
+ if keyword != "HELP" && keyword != "TYPE" {
+ // Generic comment, ignore by fast forwarding to end of line.
+ for p.currentByte != '\n' {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ }
+ return p.startOfLine
+ }
+ // There is something. Next has to be a metric name.
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ if !isBlankOrTab(p.currentByte) {
+ p.parseError("invalid metric name in comment")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ switch keyword {
+ case "HELP":
+ return p.readingHelp
+ case "TYPE":
+ return p.readingType
+ }
+ panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
+}
+
+// readingMetricName represents the state where the last byte read (now in
+// p.currentByte) is the first byte of a metric name.
+func (p *TextParser) readingMetricName() stateFn {
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError("invalid metric name")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ // Now is the time to fix the type if it hasn't happened yet.
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ // Do not append the newly created currentMetric to
+ // currentMF.Metric right now. First wait if this is a summary,
+ // and the metric exists already, which we can only know after
+ // having read all the labels.
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingLabels
+}
+
+// readingLabels represents the state where the last byte read (now in
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
+// first byte of the value (otherwise).
+func (p *TextParser) readingLabels() stateFn {
+ // Summaries/histograms are special. We have to reset the
+ // currentLabels map, currentQuantile and currentBucket before starting to
+ // read labels.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ p.currentLabels = map[string]string{}
+ p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+ }
+ if p.currentByte != '{' {
+ return p.readingValue
+ }
+ return p.startLabelName
+}
+
+// startLabelName represents the state where the next byte read from p.buf is
+// the start of a label name (or whitespace leading up to it).
+func (p *TextParser) startLabelName() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '}' {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ }
+ if p.readTokenAsLabelName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+ if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+ p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+ return nil
+ }
+ // Special summary/histogram treatment. Don't add 'quantile' and 'le'
+ // labels to 'real' labels.
+ if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
+ !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
+ }
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '=' {
+ p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+ return nil
+ }
+ return p.startLabelValue
+}
+
+// startLabelValue represents the state where the next byte read from p.buf is
+// the start of a (quoted) label value (or whitespace leading up to it).
+func (p *TextParser) startLabelValue() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '"' {
+ p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
+ return nil
+ }
+ if p.readTokenAsLabelValue(); p.err != nil {
+ return nil
+ }
+ p.currentLabelPair.Value = proto.String(p.currentToken.String())
+ // Special treatment of summaries:
+ // - Quantile labels are special, will result in dto.Quantile later.
+ // - Other labels have to be added to currentLabels for signature calculation.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if p.currentLabelPair.GetName() == model.QuantileLabel {
+ if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ // Similar special treatment of histograms.
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if p.currentLabelPair.GetName() == model.BucketLabel {
+ if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ switch p.currentByte {
+ case ',':
+ return p.startLabelName
+
+ case '}':
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ default:
+ p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
+ return nil
+ }
+}
+
+// readingValue represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the sample value (i.e. a float).
+func (p *TextParser) readingValue() stateFn {
+ // When we are here, we have read all the labels, so for the
+ // special case of a summary/histogram, we can finally find out
+ // if the metric already exists.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if summary := p.summaries[signature]; summary != nil {
+ p.currentMetric = summary
+ } else {
+ p.summaries[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if histogram := p.histograms[signature]; histogram != nil {
+ p.currentMetric = histogram
+ } else {
+ p.histograms[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else {
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ value, err := strconv.ParseFloat(p.currentToken.String(), 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
+ return nil
+ }
+ switch p.currentMF.GetType() {
+ case dto.MetricType_COUNTER:
+ p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
+ case dto.MetricType_GAUGE:
+ p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
+ case dto.MetricType_UNTYPED:
+ p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
+ case dto.MetricType_SUMMARY:
+ // *sigh*
+ if p.currentMetric.Summary == nil {
+ p.currentMetric.Summary = &dto.Summary{}
+ }
+ switch {
+ case p.currentIsSummaryCount:
+ p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsSummarySum:
+ p.currentMetric.Summary.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentQuantile):
+ p.currentMetric.Summary.Quantile = append(
+ p.currentMetric.Summary.Quantile,
+ &dto.Quantile{
+ Quantile: proto.Float64(p.currentQuantile),
+ Value: proto.Float64(value),
+ },
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ // *sigh*
+ if p.currentMetric.Histogram == nil {
+ p.currentMetric.Histogram = &dto.Histogram{}
+ }
+ switch {
+ case p.currentIsHistogramCount:
+ p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsHistogramSum:
+ p.currentMetric.Histogram.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentBucket):
+ p.currentMetric.Histogram.Bucket = append(
+ p.currentMetric.Histogram.Bucket,
+ &dto.Bucket{
+ UpperBound: proto.Float64(p.currentBucket),
+ CumulativeCount: proto.Uint64(uint64(value)),
+ },
+ )
+ }
+ default:
+ p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ return p.startTimestamp
+}
+
+// startTimestamp represents the state where the next byte read from p.buf is
+// the start of the timestamp (or whitespace leading up to it).
+func (p *TextParser) startTimestamp() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMetric.TimestampMs = proto.Int64(timestamp)
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() > 0 {
+ p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
+ return nil
+ }
+ return p.startOfLine
+}
+
+// readingHelp represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the docstring after 'HELP'.
+func (p *TextParser) readingHelp() stateFn {
+ if p.currentMF.Help != nil {
+ p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the docstring.
+ if p.readTokenUntilNewline(true); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ p.currentMF.Help = proto.String(p.currentToken.String())
+ return p.startOfLine
+}
+
+// readingType represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the type hint after 'HELP'.
+func (p *TextParser) readingType() stateFn {
+ if p.currentMF.Type != nil {
+ p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the type.
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
+ if !ok {
+ p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMF.Type = dto.MetricType(metricType).Enum()
+ return p.startOfLine
+}
+
+// parseError sets p.err to a ParseError at the current line with the given
+// message.
+func (p *TextParser) parseError(msg string) {
+ p.err = ParseError{
+ Line: p.lineCount,
+ Msg: msg,
+ }
+}
+
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
+func (p *TextParser) skipBlankTab() {
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
+ return
+ }
+ }
+}
+
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
+// anything if p.currentByte is neither ' ' nor '\t'.
+func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
+ if isBlankOrTab(p.currentByte) {
+ p.skipBlankTab()
+ }
+}
+
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
+// first byte considered is the byte already read (now in p.currentByte). The
+// first whitespace byte encountered is still copied into p.currentByte, but not
+// into p.currentToken.
+func (p *TextParser) readTokenUntilWhitespace() {
+ p.currentToken.Reset()
+ for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
+// byte considered is the byte already read (now in p.currentByte). The first
+// newline byte encountered is still copied into p.currentByte, but not into
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
+// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
+// other escape sequences are invalid and cause an error.
+func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
+ p.currentToken.Reset()
+ escaped := false
+ for p.err == nil {
+ if recognizeEscapeSequence && escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '\n':
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a metric name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsMetricName() {
+ p.currentToken.Reset()
+ if !isValidMetricNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a label name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelName() {
+ p.currentToken.Reset()
+ if !isValidLabelNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
+// In contrast to the other 'readTokenAs...' functions, which start with the
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
+// with reading a new byte from p.buf. The first byte not part of a label value
+// is still copied into p.currentByte, but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelValue() {
+ p.currentToken.Reset()
+ escaped := false
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return
+ }
+ if escaped {
+ switch p.currentByte {
+ case '"', '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ continue
+ }
+ switch p.currentByte {
+ case '"':
+ return
+ case '\n':
+ p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+}
+
+func (p *TextParser) setOrCreateCurrentMF() {
+ p.currentIsSummaryCount = false
+ p.currentIsSummarySum = false
+ p.currentIsHistogramCount = false
+ p.currentIsHistogramSum = false
+ name := p.currentToken.String()
+ if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
+ return
+ }
+ // Try out if this is a _sum or _count for a summary/histogram.
+ summaryName := summaryMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if isCount(name) {
+ p.currentIsSummaryCount = true
+ }
+ if isSum(name) {
+ p.currentIsSummarySum = true
+ }
+ return
+ }
+ }
+ histogramName := histogramMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if isCount(name) {
+ p.currentIsHistogramCount = true
+ }
+ if isSum(name) {
+ p.currentIsHistogramSum = true
+ }
+ return
+ }
+ }
+ p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
+ p.metricFamiliesByName[name] = p.currentMF
+}
+
+func isValidLabelNameStart(b byte) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+}
+
+func isValidLabelNameContinuation(b byte) bool {
+ return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+}
+
+func isValidMetricNameStart(b byte) bool {
+ return isValidLabelNameStart(b) || b == ':'
+}
+
+func isValidMetricNameContinuation(b byte) bool {
+ return isValidLabelNameContinuation(b) || b == ':'
+}
+
+func isBlankOrTab(b byte) bool {
+ return b == ' ' || b == '\t'
+}
+
+func isCount(name string) bool {
+ return len(name) > 6 && name[len(name)-6:] == "_count"
+}
+
+func isSum(name string) bool {
+ return len(name) > 4 && name[len(name)-4:] == "_sum"
+}
+
+func isBucket(name string) bool {
+ return len(name) > 7 && name[len(name)-7:] == "_bucket"
+}
+
+func summaryMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ default:
+ return name
+ }
+}
+
+func histogramMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ case isBucket(name):
+ return name[:len(name)-7]
+ default:
+ return name
+ }
+}
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
new file mode 100644
index 000000000..7723656d5
--- /dev/null
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+ Type, SubType string
+ Q float32
+ Params map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+ .hg
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
new file mode 100644
index 000000000..648b38cb6
--- /dev/null
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
@@ -0,0 +1,162 @@
+/*
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+*/
+package goautoneg
+
+import (
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+ Type, SubType string
+ Q float64
+ Params map[string]string
+}
+
+// For internal use, so that we can use the sort interface
+type accept_slice []Accept
+
+func (accept accept_slice) Len() int {
+ slice := []Accept(accept)
+ return len(slice)
+}
+
+func (accept accept_slice) Less(i, j int) bool {
+ slice := []Accept(accept)
+ ai, aj := slice[i], slice[j]
+ if ai.Q > aj.Q {
+ return true
+ }
+ if ai.Type != "*" && aj.Type == "*" {
+ return true
+ }
+ if ai.SubType != "*" && aj.SubType == "*" {
+ return true
+ }
+ return false
+}
+
+func (accept accept_slice) Swap(i, j int) {
+ slice := []Accept(accept)
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) (accept []Accept) {
+ parts := strings.Split(header, ",")
+ accept = make([]Accept, 0, len(parts))
+ for _, part := range parts {
+ part := strings.Trim(part, " ")
+
+ a := Accept{}
+ a.Params = make(map[string]string)
+ a.Q = 1.0
+
+ mrp := strings.Split(part, ";")
+
+ media_range := mrp[0]
+ sp := strings.Split(media_range, "/")
+ a.Type = strings.Trim(sp[0], " ")
+
+ switch {
+ case len(sp) == 1 && a.Type == "*":
+ a.SubType = "*"
+ case len(sp) == 2:
+ a.SubType = strings.Trim(sp[1], " ")
+ default:
+ continue
+ }
+
+ if len(mrp) == 1 {
+ accept = append(accept, a)
+ continue
+ }
+
+ for _, param := range mrp[1:] {
+ sp := strings.SplitN(param, "=", 2)
+ if len(sp) != 2 {
+ continue
+ }
+ token := strings.Trim(sp[0], " ")
+ if token == "q" {
+ a.Q, _ = strconv.ParseFloat(sp[1], 32)
+ } else {
+ a.Params[token] = strings.Trim(sp[1], " ")
+ }
+ }
+
+ accept = append(accept, a)
+ }
+
+ slice := accept_slice(accept)
+ sort.Sort(slice)
+
+ return
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+ asp := make([][]string, 0, len(alternatives))
+ for _, ctype := range alternatives {
+ asp = append(asp, strings.SplitN(ctype, "/", 2))
+ }
+ for _, clause := range ParseAccept(header) {
+ for i, ctsp := range asp {
+ if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == ctsp[0] && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == "*" && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
new file mode 100644
index 000000000..35e739c7a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -0,0 +1,136 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "time"
+)
+
+type AlertStatus string
+
+const (
+ AlertFiring AlertStatus = "firing"
+ AlertResolved AlertStatus = "resolved"
+)
+
+// Alert is a generic representation of an alert in the Prometheus eco-system.
+type Alert struct {
+ // Label value pairs for purpose of aggregation, matching, and disposition
+ // dispatching. This must minimally include an "alertname" label.
+ Labels LabelSet `json:"labels"`
+
+ // Extra key/value information which does not define alert identity.
+ Annotations LabelSet `json:"annotations"`
+
+ // The known time range for this alert. Both ends are optional.
+ StartsAt time.Time `json:"startsAt,omitempty"`
+ EndsAt time.Time `json:"endsAt,omitempty"`
+ GeneratorURL string `json:"generatorURL"`
+}
+
+// Name returns the name of the alert. It is equivalent to the "alertname" label.
+func (a *Alert) Name() string {
+ return string(a.Labels[AlertNameLabel])
+}
+
+// Fingerprint returns a unique hash for the alert. It is equivalent to
+// the fingerprint of the alert's label set.
+func (a *Alert) Fingerprint() Fingerprint {
+ return a.Labels.Fingerprint()
+}
+
+func (a *Alert) String() string {
+ s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
+ if a.Resolved() {
+ return s + "[resolved]"
+ }
+ return s + "[active]"
+}
+
+// Resolved returns true iff the activity interval ended in the past.
+func (a *Alert) Resolved() bool {
+ return a.ResolvedAt(time.Now())
+}
+
+// ResolvedAt returns true off the activity interval ended before
+// the given timestamp.
+func (a *Alert) ResolvedAt(ts time.Time) bool {
+ if a.EndsAt.IsZero() {
+ return false
+ }
+ return !a.EndsAt.After(ts)
+}
+
+// Status returns the status of the alert.
+func (a *Alert) Status() AlertStatus {
+ if a.Resolved() {
+ return AlertResolved
+ }
+ return AlertFiring
+}
+
+// Validate checks whether the alert data is inconsistent.
+func (a *Alert) Validate() error {
+ if a.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if err := a.Labels.Validate(); err != nil {
+ return fmt.Errorf("invalid label set: %s", err)
+ }
+ if len(a.Labels) == 0 {
+ return fmt.Errorf("at least one label pair required")
+ }
+ if err := a.Annotations.Validate(); err != nil {
+ return fmt.Errorf("invalid annotations: %s", err)
+ }
+ return nil
+}
+
+// Alert is a list of alerts that can be sorted in chronological order.
+type Alerts []*Alert
+
+func (as Alerts) Len() int { return len(as) }
+func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
+
+func (as Alerts) Less(i, j int) bool {
+ if as[i].StartsAt.Before(as[j].StartsAt) {
+ return true
+ }
+ if as[i].EndsAt.Before(as[j].EndsAt) {
+ return true
+ }
+ return as[i].Fingerprint() < as[j].Fingerprint()
+}
+
+// HasFiring returns true iff one of the alerts is not resolved.
+func (as Alerts) HasFiring() bool {
+ for _, a := range as {
+ if !a.Resolved() {
+ return true
+ }
+ }
+ return false
+}
+
+// Status returns StatusFiring iff at least one of the alerts is firing.
+func (as Alerts) Status() AlertStatus {
+ if as.HasFiring() {
+ return AlertFiring
+ }
+ return AlertResolved
+}
diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go
new file mode 100644
index 000000000..fc4de4106
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fingerprinting.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// Fingerprint provides a hash-capable representation of a Metric.
+// For our purposes, FNV-1A 64-bit is used.
+type Fingerprint uint64
+
+// FingerprintFromString transforms a string representation into a Fingerprint.
+func FingerprintFromString(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ return Fingerprint(num), err
+}
+
+// ParseFingerprint parses the input string into a fingerprint.
+func ParseFingerprint(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return 0, err
+ }
+ return Fingerprint(num), nil
+}
+
+func (f Fingerprint) String() string {
+ return fmt.Sprintf("%016x", uint64(f))
+}
+
+// Fingerprints represents a collection of Fingerprint subject to a given
+// natural sorting scheme. It implements sort.Interface.
+type Fingerprints []Fingerprint
+
+// Len implements sort.Interface.
+func (f Fingerprints) Len() int {
+ return len(f)
+}
+
+// Less implements sort.Interface.
+func (f Fingerprints) Less(i, j int) bool {
+ return f[i] < f[j]
+}
+
+// Swap implements sort.Interface.
+func (f Fingerprints) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+// FingerprintSet is a set of Fingerprints.
+type FingerprintSet map[Fingerprint]struct{}
+
+// Equal returns true if both sets contain the same elements (and not more).
+func (s FingerprintSet) Equal(o FingerprintSet) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for k := range s {
+ if _, ok := o[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection returns the elements contained in both sets.
+func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
+ myLength, otherLength := len(s), len(o)
+ if myLength == 0 || otherLength == 0 {
+ return FingerprintSet{}
+ }
+
+ subSet := s
+ superSet := o
+
+ if otherLength < myLength {
+ subSet = o
+ superSet = s
+ }
+
+ out := FingerprintSet{}
+
+ for k := range subSet {
+ if _, ok := superSet[k]; ok {
+ out[k] = struct{}{}
+ }
+ }
+
+ return out
+}
diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go
new file mode 100644
index 000000000..038fc1c90
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
new file mode 100644
index 000000000..41051a01a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -0,0 +1,210 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strings"
+ "unicode/utf8"
+)
+
+const (
+ // AlertNameLabel is the name of the label containing the an alert's name.
+ AlertNameLabel = "alertname"
+
+ // ExportedLabelPrefix is the prefix to prepend to the label names present in
+ // exported metrics if a label of the same name is added by the server.
+ ExportedLabelPrefix = "exported_"
+
+ // MetricNameLabel is the label name indicating the metric name of a
+ // timeseries.
+ MetricNameLabel = "__name__"
+
+ // SchemeLabel is the name of the label that holds the scheme on which to
+ // scrape a target.
+ SchemeLabel = "__scheme__"
+
+ // AddressLabel is the name of the label that holds the address of
+ // a scrape target.
+ AddressLabel = "__address__"
+
+ // MetricsPathLabel is the name of the label that holds the path on which to
+ // scrape a target.
+ MetricsPathLabel = "__metrics_path__"
+
+ // ReservedLabelPrefix is a prefix which is not legal in user-supplied
+ // label names.
+ ReservedLabelPrefix = "__"
+
+ // MetaLabelPrefix is a prefix for labels that provide meta information.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series.
+ MetaLabelPrefix = "__meta_"
+
+ // TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series. This is reserved for use in
+ // Prometheus configuration files by users.
+ TmpLabelPrefix = "__tmp_"
+
+ // ParamLabelPrefix is a prefix for labels that provide URL parameters
+ // used to scrape a target.
+ ParamLabelPrefix = "__param_"
+
+ // JobLabel is the label name indicating the job from which a timeseries
+ // was scraped.
+ JobLabel = "job"
+
+ // InstanceLabel is the label name used for the instance label.
+ InstanceLabel = "instance"
+
+ // BucketLabel is used for the label that defines the upper bound of a
+ // bucket of a histogram ("le" -> "less or equal").
+ BucketLabel = "le"
+
+ // QuantileLabel is used for the label that defines the quantile in a
+ // summary.
+ QuantileLabel = "quantile"
+)
+
+// LabelNameRE is a regular expression matching valid label names. Note that the
+// IsValid method of LabelName performs the same check but faster than a match
+// with this regular expression.
+var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+// A LabelName is a key for a LabelSet or Metric. It has a value associated
+// therewith.
+type LabelName string
+
+// IsValid is true iff the label name matches the pattern of LabelNameRE. This
+// method, however, does not use LabelNameRE for the check but a much faster
+// hardcoded implementation.
+func (ln LabelName) IsValid() bool {
+ if len(ln) == 0 {
+ return false
+ }
+ for i, b := range ln {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ if !LabelName(s).IsValid() {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ln *LabelName) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if !LabelName(s).IsValid() {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// LabelNames is a sortable LabelName slice. In implements sort.Interface.
+type LabelNames []LabelName
+
+func (l LabelNames) Len() int {
+ return len(l)
+}
+
+func (l LabelNames) Less(i, j int) bool {
+ return l[i] < l[j]
+}
+
+func (l LabelNames) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+func (l LabelNames) String() string {
+ labelStrings := make([]string, 0, len(l))
+ for _, label := range l {
+ labelStrings = append(labelStrings, string(label))
+ }
+ return strings.Join(labelStrings, ", ")
+}
+
+// A LabelValue is an associated value for a LabelName.
+type LabelValue string
+
+// IsValid returns true iff the string is a valid UTF8.
+func (lv LabelValue) IsValid() bool {
+ return utf8.ValidString(string(lv))
+}
+
+// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
+type LabelValues []LabelValue
+
+func (l LabelValues) Len() int {
+ return len(l)
+}
+
+func (l LabelValues) Less(i, j int) bool {
+ return string(l[i]) < string(l[j])
+}
+
+func (l LabelValues) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+// LabelPair pairs a name with a value.
+type LabelPair struct {
+ Name LabelName
+ Value LabelValue
+}
+
+// LabelPairs is a sortable slice of LabelPair pointers. It implements
+// sort.Interface.
+type LabelPairs []*LabelPair
+
+func (l LabelPairs) Len() int {
+ return len(l)
+}
+
+func (l LabelPairs) Less(i, j int) bool {
+ switch {
+ case l[i].Name > l[j].Name:
+ return false
+ case l[i].Name < l[j].Name:
+ return true
+ case l[i].Value > l[j].Value:
+ return false
+ case l[i].Value < l[j].Value:
+ return true
+ default:
+ return false
+ }
+}
+
+func (l LabelPairs) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
new file mode 100644
index 000000000..6eda08a73
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset.go
@@ -0,0 +1,169 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
+// may be fully-qualified down to the point where it may resolve to a single
+// Metric in the data store or not. All operations that occur within the realm
+// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
+// match.
+type LabelSet map[LabelName]LabelValue
+
+// Validate checks whether all names and values in the label set
+// are valid.
+func (ls LabelSet) Validate() error {
+ for ln, lv := range ls {
+ if !ln.IsValid() {
+ return fmt.Errorf("invalid name %q", ln)
+ }
+ if !lv.IsValid() {
+ return fmt.Errorf("invalid value %q", lv)
+ }
+ }
+ return nil
+}
+
+// Equal returns true iff both label sets have exactly the same key/value pairs.
+func (ls LabelSet) Equal(o LabelSet) bool {
+ if len(ls) != len(o) {
+ return false
+ }
+ for ln, lv := range ls {
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if olv != lv {
+ return false
+ }
+ }
+ return true
+}
+
+// Before compares the metrics, using the following criteria:
+//
+// If m has fewer labels than o, it is before o. If it has more, it is not.
+//
+// If the number of labels is the same, the superset of all label names is
+// sorted alphanumerically. The first differing label pair found in that order
+// determines the outcome: If the label does not exist at all in m, then m is
+// before o, and vice versa. Otherwise the label value is compared
+// alphanumerically.
+//
+// If m and o are equal, the method returns false.
+func (ls LabelSet) Before(o LabelSet) bool {
+ if len(ls) < len(o) {
+ return true
+ }
+ if len(ls) > len(o) {
+ return false
+ }
+
+ lns := make(LabelNames, 0, len(ls)+len(o))
+ for ln := range ls {
+ lns = append(lns, ln)
+ }
+ for ln := range o {
+ lns = append(lns, ln)
+ }
+ // It's probably not worth it to de-dup lns.
+ sort.Sort(lns)
+ for _, ln := range lns {
+ mlv, ok := ls[ln]
+ if !ok {
+ return true
+ }
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if mlv < olv {
+ return true
+ }
+ if mlv > olv {
+ return false
+ }
+ }
+ return false
+}
+
+// Clone returns a copy of the label set.
+func (ls LabelSet) Clone() LabelSet {
+ lsn := make(LabelSet, len(ls))
+ for ln, lv := range ls {
+ lsn[ln] = lv
+ }
+ return lsn
+}
+
+// Merge is a helper function to non-destructively merge two label sets.
+func (l LabelSet) Merge(other LabelSet) LabelSet {
+ result := make(LabelSet, len(l))
+
+ for k, v := range l {
+ result[k] = v
+ }
+
+ for k, v := range other {
+ result[k] = v
+ }
+
+ return result
+}
+
+func (l LabelSet) String() string {
+ lstrs := make([]string, 0, len(l))
+ for l, v := range l {
+ lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
+ }
+
+ sort.Strings(lstrs)
+ return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
+}
+
+// Fingerprint returns the LabelSet's fingerprint.
+func (ls LabelSet) Fingerprint() Fingerprint {
+ return labelSetToFingerprint(ls)
+}
+
+// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (ls LabelSet) FastFingerprint() Fingerprint {
+ return labelSetToFastFingerprint(ls)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (l *LabelSet) UnmarshalJSON(b []byte) error {
+ var m map[LabelName]LabelValue
+ if err := json.Unmarshal(b, &m); err != nil {
+ return err
+ }
+ // encoding/json only unmarshals maps of the form map[string]T. It treats
+ // LabelName as a string and does not call its UnmarshalJSON method.
+ // Thus, we have to replicate the behavior here.
+ for ln := range m {
+ if !ln.IsValid() {
+ return fmt.Errorf("%q is not a valid label name", ln)
+ }
+ }
+ *l = LabelSet(m)
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
new file mode 100644
index 000000000..f7250909b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -0,0 +1,103 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+var (
+ separator = []byte{0}
+ // MetricNameRE is a regular expression matching valid metric
+ // names. Note that the IsValidMetricName function performs the same
+ // check but faster than a match with this regular expression.
+ MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
+)
+
+// A Metric is similar to a LabelSet, but the key difference is that a Metric is
+// a singleton and refers to one and only one stream of samples.
+type Metric LabelSet
+
+// Equal compares the metrics.
+func (m Metric) Equal(o Metric) bool {
+ return LabelSet(m).Equal(LabelSet(o))
+}
+
+// Before compares the metrics' underlying label sets.
+func (m Metric) Before(o Metric) bool {
+ return LabelSet(m).Before(LabelSet(o))
+}
+
+// Clone returns a copy of the Metric.
+func (m Metric) Clone() Metric {
+ clone := make(Metric, len(m))
+ for k, v := range m {
+ clone[k] = v
+ }
+ return clone
+}
+
+func (m Metric) String() string {
+ metricName, hasName := m[MetricNameLabel]
+ numLabels := len(m) - 1
+ if !hasName {
+ numLabels = len(m)
+ }
+ labelStrings := make([]string, 0, numLabels)
+ for label, value := range m {
+ if label != MetricNameLabel {
+ labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
+ }
+ }
+
+ switch numLabels {
+ case 0:
+ if hasName {
+ return string(metricName)
+ }
+ return "{}"
+ default:
+ sort.Strings(labelStrings)
+ return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+ }
+}
+
+// Fingerprint returns a Metric's Fingerprint.
+func (m Metric) Fingerprint() Fingerprint {
+ return LabelSet(m).Fingerprint()
+}
+
+// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (m Metric) FastFingerprint() Fingerprint {
+ return LabelSet(m).FastFingerprint()
+}
+
+// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
+// This function, however, does not use MetricNameRE for the check but a much
+// faster hardcoded implementation.
+func IsValidMetricName(n LabelValue) bool {
+ if len(n) == 0 {
+ return false
+ }
+ for i, b := range n {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go
new file mode 100644
index 000000000..a7b969170
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/model.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package model contains common data structures that are shared across
+// Prometheus components and libraries.
+package model
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
new file mode 100644
index 000000000..8762b13c6
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/signature.go
@@ -0,0 +1,144 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "sort"
+)
+
+// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
+// used to separate label names, label values, and other strings from each other
+// when calculating their combined hash value (aka signature aka fingerprint).
+const SeparatorByte byte = 255
+
+var (
+ // cache the signature of an empty label set.
+ emptyLabelSignature = hashNew()
+)
+
+// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
+// given label set. (Collisions are possible but unlikely if the number of label
+// sets the function is applied to is small.)
+func LabelsToSignature(labels map[string]string) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make([]string, 0, len(labels))
+ for labelName := range labels {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Strings(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, labelName)
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, labels[labelName])
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
+// parameter (rather than a label map) and returns a Fingerprint.
+func labelSetToFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ labelNames := make(LabelNames, 0, len(ls))
+ for labelName := range ls {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(ls[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return Fingerprint(sum)
+}
+
+// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
+// faster and less allocation-heavy hash function, which is more susceptible to
+// create hash collisions. Therefore, collision detection should be applied.
+func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ var result uint64
+ for labelName, labelValue := range ls {
+ sum := hashNew()
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(labelValue))
+ result ^= sum
+ }
+ return Fingerprint(result)
+}
+
+// SignatureForLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and only includes the labels with the
+// specified LabelNames into the signature calculation. The labels passed in
+// will be sorted by this function.
+func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ sort.Sort(LabelNames(labels))
+
+ sum := hashNew()
+ for _, label := range labels {
+ sum = hashAdd(sum, string(label))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[label]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and excludes the labels with any of the
+// specified LabelNames from the signature calculation.
+func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
+ if len(m) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make(LabelNames, 0, len(m))
+ for labelName := range m {
+ if _, exclude := labels[labelName]; !exclude {
+ labelNames = append(labelNames, labelName)
+ }
+ }
+ if len(labelNames) == 0 {
+ return emptyLabelSignature
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
new file mode 100644
index 000000000..7538e2997
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -0,0 +1,106 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "time"
+)
+
+// Matcher describes a matches the value of a given label.
+type Matcher struct {
+ Name LabelName `json:"name"`
+ Value string `json:"value"`
+ IsRegex bool `json:"isRegex"`
+}
+
+func (m *Matcher) UnmarshalJSON(b []byte) error {
+ type plain Matcher
+ if err := json.Unmarshal(b, (*plain)(m)); err != nil {
+ return err
+ }
+
+ if len(m.Name) == 0 {
+ return fmt.Errorf("label name in matcher must not be empty")
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Validate returns true iff all fields of the matcher have valid values.
+func (m *Matcher) Validate() error {
+ if !m.Name.IsValid() {
+ return fmt.Errorf("invalid name %q", m.Name)
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return fmt.Errorf("invalid regular expression %q", m.Value)
+ }
+ } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
+ return fmt.Errorf("invalid value %q", m.Value)
+ }
+ return nil
+}
+
+// Silence defines the representation of a silence definiton
+// in the Prometheus eco-system.
+type Silence struct {
+ ID uint64 `json:"id,omitempty"`
+
+ Matchers []*Matcher `json:"matchers"`
+
+ StartsAt time.Time `json:"startsAt"`
+ EndsAt time.Time `json:"endsAt"`
+
+ CreatedAt time.Time `json:"createdAt,omitempty"`
+ CreatedBy string `json:"createdBy"`
+ Comment string `json:"comment,omitempty"`
+}
+
+// Validate returns true iff all fields of the silence have valid values.
+func (s *Silence) Validate() error {
+ if len(s.Matchers) == 0 {
+ return fmt.Errorf("at least one matcher required")
+ }
+ for _, m := range s.Matchers {
+ if err := m.Validate(); err != nil {
+ return fmt.Errorf("invalid matcher: %s", err)
+ }
+ }
+ if s.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if s.EndsAt.IsZero() {
+ return fmt.Errorf("end time missing")
+ }
+ if s.EndsAt.Before(s.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if s.CreatedBy == "" {
+ return fmt.Errorf("creator information missing")
+ }
+ if s.Comment == "" {
+ return fmt.Errorf("comment missing")
+ }
+ if s.CreatedAt.IsZero() {
+ return fmt.Errorf("creation timestamp missing")
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
new file mode 100644
index 000000000..548968aeb
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -0,0 +1,249 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ // MinimumTick is the minimum supported time resolution. This has to be
+ // at least time.Second in order for the code below to work.
+ minimumTick = time.Millisecond
+ // second is the Time duration equivalent to one second.
+ second = int64(time.Second / minimumTick)
+ // The number of nanoseconds per minimum tick.
+ nanosPerTick = int64(minimumTick / time.Nanosecond)
+
+ // Earliest is the earliest Time representable. Handy for
+ // initializing a high watermark.
+ Earliest = Time(math.MinInt64)
+ // Latest is the latest Time representable. Handy for initializing
+ // a low watermark.
+ Latest = Time(math.MaxInt64)
+)
+
+// Time is the number of milliseconds since the epoch
+// (1970-01-01 00:00 UTC) excluding leap seconds.
+type Time int64
+
+// Interval describes and interval between two timestamps.
+type Interval struct {
+ Start, End Time
+}
+
+// Now returns the current time as a Time.
+func Now() Time {
+ return TimeFromUnixNano(time.Now().UnixNano())
+}
+
+// TimeFromUnix returns the Time equivalent to the Unix Time t
+// provided in seconds.
+func TimeFromUnix(t int64) Time {
+ return Time(t * second)
+}
+
+// TimeFromUnixNano returns the Time equivalent to the Unix Time
+// t provided in nanoseconds.
+func TimeFromUnixNano(t int64) Time {
+ return Time(t / nanosPerTick)
+}
+
+// Equal reports whether two Times represent the same instant.
+func (t Time) Equal(o Time) bool {
+ return t == o
+}
+
+// Before reports whether the Time t is before o.
+func (t Time) Before(o Time) bool {
+ return t < o
+}
+
+// After reports whether the Time t is after o.
+func (t Time) After(o Time) bool {
+ return t > o
+}
+
+// Add returns the Time t + d.
+func (t Time) Add(d time.Duration) Time {
+ return t + Time(d/minimumTick)
+}
+
+// Sub returns the Duration t - o.
+func (t Time) Sub(o Time) time.Duration {
+ return time.Duration(t-o) * minimumTick
+}
+
+// Time returns the time.Time representation of t.
+func (t Time) Time() time.Time {
+ return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) Unix() int64 {
+ return int64(t) / second
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) UnixNano() int64 {
+ return int64(t) * nanosPerTick
+}
+
+// The number of digits after the dot.
+var dotPrecision = int(math.Log10(float64(second)))
+
+// String returns a string representation of the Time.
+func (t Time) String() string {
+ return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+ return []byte(t.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+ p := strings.Split(string(b), ".")
+ switch len(p) {
+ case 1:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ *t = Time(v * second)
+
+ case 2:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ v *= second
+
+ prec := dotPrecision - len(p[1])
+ if prec < 0 {
+ p[1] = p[1][:dotPrecision]
+ } else if prec > 0 {
+ p[1] = p[1] + strings.Repeat("0", prec)
+ }
+
+ va, err := strconv.ParseInt(p[1], 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *t = Time(v + va)
+
+ default:
+ return fmt.Errorf("invalid time %q", string(b))
+ }
+ return nil
+}
+
+// Duration wraps time.Duration. It is used to parse the custom duration format
+// from YAML.
+// This type should not propagate beyond the scope of input/output processing.
+type Duration time.Duration
+
+var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
+
+// StringToDuration parses a string into a time.Duration, assuming that a year
+// always has 365d, a week always has 7d, and a day always has 24h.
+func ParseDuration(durationStr string) (Duration, error) {
+ matches := durationRE.FindStringSubmatch(durationStr)
+ if len(matches) != 3 {
+ return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
+ }
+ var (
+ n, _ = strconv.Atoi(matches[1])
+ dur = time.Duration(n) * time.Millisecond
+ )
+ switch unit := matches[2]; unit {
+ case "y":
+ dur *= 1000 * 60 * 60 * 24 * 365
+ case "w":
+ dur *= 1000 * 60 * 60 * 24 * 7
+ case "d":
+ dur *= 1000 * 60 * 60 * 24
+ case "h":
+ dur *= 1000 * 60 * 60
+ case "m":
+ dur *= 1000 * 60
+ case "s":
+ dur *= 1000
+ case "ms":
+ // Value already correct
+ default:
+ return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
+ }
+ return Duration(dur), nil
+}
+
+func (d Duration) String() string {
+ var (
+ ms = int64(time.Duration(d) / time.Millisecond)
+ unit = "ms"
+ )
+ factors := map[string]int64{
+ "y": 1000 * 60 * 60 * 24 * 365,
+ "w": 1000 * 60 * 60 * 24 * 7,
+ "d": 1000 * 60 * 60 * 24,
+ "h": 1000 * 60 * 60,
+ "m": 1000 * 60,
+ "s": 1000,
+ "ms": 1,
+ }
+
+ switch int64(0) {
+ case ms % factors["y"]:
+ unit = "y"
+ case ms % factors["w"]:
+ unit = "w"
+ case ms % factors["d"]:
+ unit = "d"
+ case ms % factors["h"]:
+ unit = "h"
+ case ms % factors["m"]:
+ unit = "m"
+ case ms % factors["s"]:
+ unit = "s"
+ }
+ return fmt.Sprintf("%v%v", ms/factors[unit], unit)
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (d Duration) MarshalYAML() (interface{}, error) {
+ return d.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ dur, err := ParseDuration(s)
+ if err != nil {
+ return err
+ }
+ *d = dur
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
new file mode 100644
index 000000000..c9ed3ffd8
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -0,0 +1,416 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+var (
+ // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+ // non-existing sample pair. It is a SamplePair with timestamp Earliest and
+ // value 0.0. Note that the natural zero value of SamplePair has a timestamp
+ // of 0, which is possible to appear in a real SamplePair and thus not
+ // suitable to signal a non-existing SamplePair.
+ ZeroSamplePair = SamplePair{Timestamp: Earliest}
+
+ // ZeroSample is the pseudo zero-value of Sample used to signal a
+ // non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+ // and metric nil. Note that the natural zero value of Sample has a timestamp
+ // of 0, which is possible to appear in a real Sample and thus not suitable
+ // to signal a non-existing Sample.
+ ZeroSample = Sample{Timestamp: Earliest}
+)
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+ if v == o {
+ return true
+ }
+ return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The sematics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
+
+// Sample is a sample pair associated with a metric.
+type Sample struct {
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+// Equal compares first the metrics, then the timestamp, then the value. The
+// sematics of value equality is defined by SampleValue.Equal.
+func (s *Sample) Equal(o *Sample) bool {
+ if s == o {
+ return true
+ }
+
+ if !s.Metric.Equal(o.Metric) {
+ return false
+ }
+ if !s.Timestamp.Equal(o.Timestamp) {
+ return false
+ }
+
+ return s.Value.Equal(o.Value)
+}
+
+func (s Sample) String() string {
+ return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ })
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Sample) MarshalJSON() ([]byte, error) {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ return json.Marshal(&v)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Sample) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ s.Metric = v.Metric
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+
+ return nil
+}
+
+// Samples is a sortable Sample slice. It implements sort.Interface.
+type Samples []*Sample
+
+func (s Samples) Len() int {
+ return len(s)
+}
+
+// Less compares first the metrics, then the timestamp.
+func (s Samples) Less(i, j int) bool {
+ switch {
+ case s[i].Metric.Before(s[j].Metric):
+ return true
+ case s[j].Metric.Before(s[i].Metric):
+ return false
+ case s[i].Timestamp.Before(s[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+func (s Samples) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (s Samples) Equal(o Samples) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, sample := range s {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// SampleStream is a stream of Values belonging to an attached COWMetric.
+type SampleStream struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+}
+
+func (ss SampleStream) String() string {
+ vals := make([]string, len(ss.Values))
+ for i, v := range ss.Values {
+ vals[i] = v.String()
+ }
+ return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
+}
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "<ValNone>":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return "<ValNone>"
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
+
+// Scalar is a scalar value evaluated at the set timestamp.
+type Scalar struct {
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s Scalar) String() string {
+ return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Scalar) MarshalJSON() ([]byte, error) {
+ v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
+ return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Scalar) UnmarshalJSON(b []byte) error {
+ var f string
+ v := [...]interface{}{&s.Timestamp, &f}
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ value, err := strconv.ParseFloat(f, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing sample value: %s", err)
+ }
+ s.Value = SampleValue(value)
+ return nil
+}
+
+// String is a string value evaluated at the set timestamp.
+type String struct {
+ Value string `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s *String) String() string {
+ return s.Value
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s String) MarshalJSON() ([]byte, error) {
+ return json.Marshal([]interface{}{s.Timestamp, s.Value})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *String) UnmarshalJSON(b []byte) error {
+ v := [...]interface{}{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Vector is basically only an alias for Samples, but the
+// contract is that in a Vector, all Samples have the same timestamp.
+type Vector []*Sample
+
+func (vec Vector) String() string {
+ entries := make([]string, len(vec))
+ for i, s := range vec {
+ entries[i] = s.String()
+ }
+ return strings.Join(entries, "\n")
+}
+
+func (vec Vector) Len() int { return len(vec) }
+func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
+
+// Less compares first the metrics, then the timestamp.
+func (vec Vector) Less(i, j int) bool {
+ switch {
+ case vec[i].Metric.Before(vec[j].Metric):
+ return true
+ case vec[j].Metric.Before(vec[i].Metric):
+ return false
+ case vec[i].Timestamp.Before(vec[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (vec Vector) Equal(o Vector) bool {
+ if len(vec) != len(o) {
+ return false
+ }
+
+ for i, sample := range vec {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Matrix is a list of time series.
+type Matrix []*SampleStream
+
+func (m Matrix) Len() int { return len(m) }
+func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
+func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
+
+func (mat Matrix) String() string {
+ matCp := make(Matrix, len(mat))
+ copy(matCp, mat)
+ sort.Sort(matCp)
+
+ strs := make([]string, len(matCp))
+
+ for i, ss := range matCp {
+ strs[i] = ss.String()
+ }
+
+ return strings.Join(strs, "\n")
+}
diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE
new file mode 100644
index 000000000..53c5e9aa1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/NOTICE
@@ -0,0 +1,7 @@
+procfs provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+Copyright 2014-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
new file mode 100644
index 000000000..209549471
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -0,0 +1,11 @@
+# procfs
+
+This procfs package provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+*WARNING*: This package is a work in progress. Its API may still break in
+backwards-incompatible ways without warnings. Use it at your own risk.
+
+[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
+[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
+[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
new file mode 100644
index 000000000..680a9842a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/buddyinfo.go
@@ -0,0 +1,95 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// A BuddyInfo is the details parsed from /proc/buddyinfo.
+// The data is comprised of an array of free fragments of each size.
+// The sizes are 2^n*PAGE_SIZE, where n is the array index.
+type BuddyInfo struct {
+ Node string
+ Zone string
+ Sizes []float64
+}
+
+// NewBuddyInfo reads the buddyinfo statistics.
+func NewBuddyInfo() ([]BuddyInfo, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return nil, err
+ }
+
+ return fs.NewBuddyInfo()
+}
+
+// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
+func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
+ file, err := os.Open(fs.Path("buddyinfo"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return parseBuddyInfo(file)
+}
+
+func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
+ var (
+ buddyInfo = []BuddyInfo{}
+ scanner = bufio.NewScanner(r)
+ bucketCount = -1
+ )
+
+ for scanner.Scan() {
+ var err error
+ line := scanner.Text()
+ parts := strings.Fields(string(line))
+
+ if len(parts) < 4 {
+ return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
+ }
+
+ node := strings.TrimRight(parts[1], ",")
+ zone := strings.TrimRight(parts[3], ",")
+ arraySize := len(parts[4:])
+
+ if bucketCount == -1 {
+ bucketCount = arraySize
+ } else {
+ if bucketCount != arraySize {
+ return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
+ }
+ }
+
+ sizes := make([]float64, arraySize)
+ for i := 0; i < arraySize; i++ {
+ sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid value in buddyinfo: %s", err)
+ }
+ }
+
+ buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
+ }
+
+ return buddyInfo, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
new file mode 100644
index 000000000..e2acd6d40
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2014 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package procfs provides functions to retrieve system, kernel and process
+// metrics from the pseudo-filesystem proc.
+//
+// Example:
+//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+//
+// "github.com/prometheus/procfs"
+// )
+//
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
+//
+// stat, err := p.NewStat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
+//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
+//
+package procfs
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
new file mode 100644
index 000000000..17546756b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -0,0 +1,46 @@
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "path"
+
+ "github.com/prometheus/procfs/xfs"
+)
+
+// FS represents the pseudo-filesystem proc, which provides an interface to
+// kernel data structures.
+type FS string
+
+// DefaultMountPoint is the common mount point of the proc filesystem.
+const DefaultMountPoint = "/proc"
+
+// NewFS returns a new FS mounted under the given mountPoint. It will error
+// if the mount point can't be read.
+func NewFS(mountPoint string) (FS, error) {
+ info, err := os.Stat(mountPoint)
+ if err != nil {
+ return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
+ }
+ if !info.IsDir() {
+ return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
+ }
+
+ return FS(mountPoint), nil
+}
+
+// Path returns the path of the given subsystem relative to the procfs root.
+func (fs FS) Path(p ...string) string {
+ return path.Join(append([]string{string(fs)}, p...)...)
+}
+
+// XFSStats retrieves XFS filesystem runtime statistics.
+func (fs FS) XFSStats() (*xfs.Stats, error) {
+ f, err := os.Open(fs.Path("fs/xfs/stat"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return xfs.ParseStats(f)
+}
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
new file mode 100644
index 000000000..696d114e7
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -0,0 +1,246 @@
+package procfs
+
+import (
+ "bufio"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
+type IPVSStats struct {
+ // Total count of connections.
+ Connections uint64
+ // Total incoming packages processed.
+ IncomingPackets uint64
+ // Total outgoing packages processed.
+ OutgoingPackets uint64
+ // Total incoming traffic.
+ IncomingBytes uint64
+ // Total outgoing traffic.
+ OutgoingBytes uint64
+}
+
+// IPVSBackendStatus holds current metrics of one virtual / real address pair.
+type IPVSBackendStatus struct {
+ // The local (virtual) IP address.
+ LocalAddress net.IP
+ // The local (virtual) port.
+ LocalPort uint16
+ // The local firewall mark
+ LocalMark string
+ // The transport protocol (TCP, UDP).
+ Proto string
+ // The remote (real) IP address.
+ RemoteAddress net.IP
+ // The remote (real) port.
+ RemotePort uint16
+ // The current number of active connections for this virtual/real address pair.
+ ActiveConn uint64
+ // The current number of inactive connections for this virtual/real address pair.
+ InactConn uint64
+ // The current weight of this virtual/real address pair.
+ Weight uint64
+}
+
+// NewIPVSStats reads the IPVS statistics.
+func NewIPVSStats() (IPVSStats, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return fs.NewIPVSStats()
+}
+
+// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
+func (fs FS) NewIPVSStats() (IPVSStats, error) {
+ file, err := os.Open(fs.Path("net/ip_vs_stats"))
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ defer file.Close()
+
+ return parseIPVSStats(file)
+}
+
+// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
+func parseIPVSStats(file io.Reader) (IPVSStats, error) {
+ var (
+ statContent []byte
+ statLines []string
+ statFields []string
+ stats IPVSStats
+ )
+
+ statContent, err := ioutil.ReadAll(file)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ statLines = strings.SplitN(string(statContent), "\n", 4)
+ if len(statLines) != 4 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
+ }
+
+ statFields = strings.Fields(statLines[2])
+ if len(statFields) != 5 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
+ }
+
+ stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return stats, nil
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.
+func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return []IPVSBackendStatus{}, err
+ }
+
+ return fs.NewIPVSBackendStatus()
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
+func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ file, err := os.Open(fs.Path("net/ip_vs"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return parseIPVSBackendStatus(file)
+}
+
+func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
+ var (
+ status []IPVSBackendStatus
+ scanner = bufio.NewScanner(file)
+ proto string
+ localMark string
+ localAddress net.IP
+ localPort uint16
+ err error
+ )
+
+ for scanner.Scan() {
+ fields := strings.Fields(string(scanner.Text()))
+ if len(fields) == 0 {
+ continue
+ }
+ switch {
+ case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
+ continue
+ case fields[0] == "TCP" || fields[0] == "UDP":
+ if len(fields) < 2 {
+ continue
+ }
+ proto = fields[0]
+ localMark = ""
+ localAddress, localPort, err = parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ case fields[0] == "FWM":
+ if len(fields) < 2 {
+ continue
+ }
+ proto = fields[0]
+ localMark = fields[1]
+ localAddress = nil
+ localPort = 0
+ case fields[0] == "->":
+ if len(fields) < 6 {
+ continue
+ }
+ remoteAddress, remotePort, err := parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ weight, err := strconv.ParseUint(fields[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ activeConn, err := strconv.ParseUint(fields[4], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ inactConn, err := strconv.ParseUint(fields[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ status = append(status, IPVSBackendStatus{
+ LocalAddress: localAddress,
+ LocalPort: localPort,
+ LocalMark: localMark,
+ RemoteAddress: remoteAddress,
+ RemotePort: remotePort,
+ Proto: proto,
+ Weight: weight,
+ ActiveConn: activeConn,
+ InactConn: inactConn,
+ })
+ }
+ }
+ return status, nil
+}
+
+func parseIPPort(s string) (net.IP, uint16, error) {
+ var (
+ ip net.IP
+ err error
+ )
+
+ switch len(s) {
+ case 13:
+ ip, err = hex.DecodeString(s[0:8])
+ if err != nil {
+ return nil, 0, err
+ }
+ case 46:
+ ip = net.ParseIP(s[1:40])
+ if ip == nil {
+ return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
+ }
+ default:
+ return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
+ }
+
+ portString := s[len(s)-4:]
+ if len(portString) != 4 {
+ return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
+ }
+ port, err := strconv.ParseUint(portString, 16, 16)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return ip, uint16(port), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
new file mode 100644
index 000000000..d7a248c0d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -0,0 +1,138 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
+ buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
+)
+
+// MDStat holds info parsed from /proc/mdstat.
+type MDStat struct {
+ // Name of the device.
+ Name string
+ // activity-state of the device.
+ ActivityState string
+ // Number of active disks.
+ DisksActive int64
+ // Total number of disks the device consists of.
+ DisksTotal int64
+ // Number of blocks the device holds.
+ BlocksTotal int64
+ // Number of blocks on the device that are in sync.
+ BlocksSynced int64
+}
+
+// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
+func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
+ mdStatusFilePath := fs.Path("mdstat")
+ content, err := ioutil.ReadFile(mdStatusFilePath)
+ if err != nil {
+ return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+
+ mdStates := []MDStat{}
+ lines := strings.Split(string(content), "\n")
+ for i, l := range lines {
+ if l == "" {
+ continue
+ }
+ if l[0] == ' ' {
+ continue
+ }
+ if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
+ continue
+ }
+
+ mainLine := strings.Split(l, " ")
+ if len(mainLine) < 3 {
+ return mdStates, fmt.Errorf("error parsing mdline: %s", l)
+ }
+ mdName := mainLine[0]
+ activityState := mainLine[2]
+
+ if len(lines) <= i+3 {
+ return mdStates, fmt.Errorf(
+ "error parsing %s: too few lines for md device %s",
+ mdStatusFilePath,
+ mdName,
+ )
+ }
+
+ active, total, size, err := evalStatusline(lines[i+1])
+ if err != nil {
+ return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+
+ // j is the line number of the syncing-line.
+ j := i + 2
+ if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
+ j = i + 3
+ }
+
+ // If device is syncing at the moment, get the number of currently
+ // synced bytes, otherwise that number equals the size of the device.
+ syncedBlocks := size
+ if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
+ syncedBlocks, err = evalBuildline(lines[j])
+ if err != nil {
+ return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+ }
+
+ mdStates = append(mdStates, MDStat{
+ Name: mdName,
+ ActivityState: activityState,
+ DisksActive: active,
+ DisksTotal: total,
+ BlocksTotal: size,
+ BlocksSynced: syncedBlocks,
+ })
+ }
+
+ return mdStates, nil
+}
+
+func evalStatusline(statusline string) (active, total, size int64, err error) {
+ matches := statuslineRE.FindStringSubmatch(statusline)
+ if len(matches) != 4 {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
+ }
+
+ size, err = strconv.ParseInt(matches[1], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ total, err = strconv.ParseInt(matches[2], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ active, err = strconv.ParseInt(matches[3], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ return active, total, size, nil
+}
+
+func evalBuildline(buildline string) (syncedBlocks int64, err error) {
+ matches := buildlineRE.FindStringSubmatch(buildline)
+ if len(matches) != 2 {
+ return 0, fmt.Errorf("unexpected buildline: %s", buildline)
+ }
+
+ syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
+ }
+
+ return syncedBlocks, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
new file mode 100644
index 000000000..6b2b0ba9d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -0,0 +1,556 @@
+package procfs
+
+// While implementing parsing of /proc/[pid]/mountstats, this blog was used
+// heavily as a reference:
+// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
+//
+// Special thanks to Chris Siebenmann for all of his posts explaining the
+// various statistics available for NFS.
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Constants shared between multiple functions.
+const (
+ deviceEntryLen = 8
+
+ fieldBytesLen = 8
+ fieldEventsLen = 27
+
+ statVersion10 = "1.0"
+ statVersion11 = "1.1"
+
+ fieldTransport10Len = 10
+ fieldTransport11Len = 13
+)
+
+// A Mount is a device mount parsed from /proc/[pid]/mountstats.
+type Mount struct {
+ // Name of the device.
+ Device string
+ // The mount point of the device.
+ Mount string
+ // The filesystem type used by the device.
+ Type string
+ // If available additional statistics related to this Mount.
+ // Use a type assertion to determine if additional statistics are available.
+ Stats MountStats
+}
+
+// A MountStats is a type which contains detailed statistics for a specific
+// type of Mount.
+type MountStats interface {
+ mountStats()
+}
+
+// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
+type MountStatsNFS struct {
+ // The version of statistics provided.
+ StatVersion string
+ // The age of the NFS mount.
+ Age time.Duration
+ // Statistics related to byte counters for various operations.
+ Bytes NFSBytesStats
+ // Statistics related to various NFS event occurrences.
+ Events NFSEventsStats
+ // Statistics broken down by filesystem operation.
+ Operations []NFSOperationStats
+ // Statistics about the NFS RPC transport.
+ Transport NFSTransportStats
+}
+
+// mountStats implements MountStats.
+func (m MountStatsNFS) mountStats() {}
+
+// A NFSBytesStats contains statistics about the number of bytes read and written
+// by an NFS client to and from an NFS server.
+type NFSBytesStats struct {
+ // Number of bytes read using the read() syscall.
+ Read uint64
+ // Number of bytes written using the write() syscall.
+ Write uint64
+ // Number of bytes read using the read() syscall in O_DIRECT mode.
+ DirectRead uint64
+ // Number of bytes written using the write() syscall in O_DIRECT mode.
+ DirectWrite uint64
+ // Number of bytes read from the NFS server, in total.
+ ReadTotal uint64
+ // Number of bytes written to the NFS server, in total.
+ WriteTotal uint64
+ // Number of pages read directly via mmap()'d files.
+ ReadPages uint64
+ // Number of pages written directly via mmap()'d files.
+ WritePages uint64
+}
+
+// A NFSEventsStats contains statistics about NFS event occurrences.
+type NFSEventsStats struct {
+ // Number of times cached inode attributes are re-validated from the server.
+ InodeRevalidate uint64
+ // Number of times cached dentry nodes are re-validated from the server.
+ DnodeRevalidate uint64
+ // Number of times an inode cache is cleared.
+ DataInvalidate uint64
+ // Number of times cached inode attributes are invalidated.
+ AttributeInvalidate uint64
+ // Number of times files or directories have been open()'d.
+ VFSOpen uint64
+ // Number of times a directory lookup has occurred.
+ VFSLookup uint64
+ // Number of times permissions have been checked.
+ VFSAccess uint64
+ // Number of updates (and potential writes) to pages.
+ VFSUpdatePage uint64
+ // Number of pages read directly via mmap()'d files.
+ VFSReadPage uint64
+ // Number of times a group of pages have been read.
+ VFSReadPages uint64
+ // Number of pages written directly via mmap()'d files.
+ VFSWritePage uint64
+ // Number of times a group of pages have been written.
+ VFSWritePages uint64
+ // Number of times directory entries have been read with getdents().
+ VFSGetdents uint64
+ // Number of times attributes have been set on inodes.
+ VFSSetattr uint64
+ // Number of pending writes that have been forcefully flushed to the server.
+ VFSFlush uint64
+ // Number of times fsync() has been called on directories and files.
+ VFSFsync uint64
+ // Number of times locking has been attempted on a file.
+ VFSLock uint64
+ // Number of times files have been closed and released.
+ VFSFileRelease uint64
+ // Unknown. Possibly unused.
+ CongestionWait uint64
+ // Number of times files have been truncated.
+ Truncation uint64
+ // Number of times a file has been grown due to writes beyond its existing end.
+ WriteExtension uint64
+ // Number of times a file was removed while still open by another process.
+ SillyRename uint64
+ // Number of times the NFS server gave less data than expected while reading.
+ ShortRead uint64
+ // Number of times the NFS server wrote less data than expected while writing.
+ ShortWrite uint64
+ // Number of times the NFS server indicated EJUKEBOX; retrieving data from
+ // offline storage.
+ JukeboxDelay uint64
+ // Number of NFS v4.1+ pNFS reads.
+ PNFSRead uint64
+ // Number of NFS v4.1+ pNFS writes.
+ PNFSWrite uint64
+}
+
+// A NFSOperationStats contains statistics for a single operation.
+type NFSOperationStats struct {
+ // The name of the operation.
+ Operation string
+ // Number of requests performed for this operation.
+ Requests uint64
+ // Number of times an actual RPC request has been transmitted for this operation.
+ Transmissions uint64
+ // Number of times a request has had a major timeout.
+ MajorTimeouts uint64
+ // Number of bytes sent for this operation, including RPC headers and payload.
+ BytesSent uint64
+ // Number of bytes received for this operation, including RPC headers and payload.
+ BytesReceived uint64
+ // Duration all requests spent queued for transmission before they were sent.
+ CumulativeQueueTime time.Duration
+ // Duration it took to get a reply back after the request was transmitted.
+ CumulativeTotalResponseTime time.Duration
+ // Duration from when a request was enqueued to when it was completely handled.
+ CumulativeTotalRequestTime time.Duration
+}
+
+// A NFSTransportStats contains statistics for the NFS mount RPC requests and
+// responses.
+type NFSTransportStats struct {
+ // The local port used for the NFS mount.
+ Port uint64
+ // Number of times the client has had to establish a connection from scratch
+ // to the NFS server.
+ Bind uint64
+ // Number of times the client has made a TCP connection to the NFS server.
+ Connect uint64
+ // Duration (in jiffies, a kernel internal unit of time) the NFS mount has
+ // spent waiting for connections to the server to be established.
+ ConnectIdleTime uint64
+ // Duration since the NFS mount last saw any RPC traffic.
+ IdleTime time.Duration
+ // Number of RPC requests for this mount sent to the NFS server.
+ Sends uint64
+ // Number of RPC responses for this mount received from the NFS server.
+ Receives uint64
+ // Number of times the NFS server sent a response with a transaction ID
+ // unknown to this client.
+ BadTransactionIDs uint64
+ // A running counter, incremented on each request as the current difference
+ // ebetween sends and receives.
+ CumulativeActiveRequests uint64
+ // A running counter, incremented on each request by the current backlog
+ // queue size.
+ CumulativeBacklog uint64
+
+ // Stats below only available with stat version 1.1.
+
+ // Maximum number of simultaneously active RPC requests ever used.
+ MaximumRPCSlotsUsed uint64
+ // A running counter, incremented on each request as the current size of the
+ // sending queue.
+ CumulativeSendingQueue uint64
+ // A running counter, incremented on each request as the current size of the
+ // pending queue.
+ CumulativePendingQueue uint64
+}
+
+// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
+// of Mount structures containing detailed information about each mount.
+// If available, statistics for each mount are parsed as well.
+func parseMountStats(r io.Reader) ([]*Mount, error) {
+ const (
+ device = "device"
+ statVersionPrefix = "statvers="
+
+ nfs3Type = "nfs"
+ nfs4Type = "nfs4"
+ )
+
+ var mounts []*Mount
+
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ // Only look for device entries in this function
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 || ss[0] != device {
+ continue
+ }
+
+ m, err := parseMount(ss)
+ if err != nil {
+ return nil, err
+ }
+
+ // Does this mount also possess statistics information?
+ if len(ss) > deviceEntryLen {
+ // Only NFSv3 and v4 are supported for parsing statistics
+ if m.Type != nfs3Type && m.Type != nfs4Type {
+ return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
+ }
+
+ statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
+
+ stats, err := parseMountStatsNFS(s, statVersion)
+ if err != nil {
+ return nil, err
+ }
+
+ m.Stats = stats
+ }
+
+ mounts = append(mounts, m)
+ }
+
+ return mounts, s.Err()
+}
+
+// parseMount parses an entry in /proc/[pid]/mountstats in the format:
+// device [device] mounted on [mount] with fstype [type]
+func parseMount(ss []string) (*Mount, error) {
+ if len(ss) < deviceEntryLen {
+ return nil, fmt.Errorf("invalid device entry: %v", ss)
+ }
+
+ // Check for specific words appearing at specific indices to ensure
+ // the format is consistent with what we expect
+ format := []struct {
+ i int
+ s string
+ }{
+ {i: 0, s: "device"},
+ {i: 2, s: "mounted"},
+ {i: 3, s: "on"},
+ {i: 5, s: "with"},
+ {i: 6, s: "fstype"},
+ }
+
+ for _, f := range format {
+ if ss[f.i] != f.s {
+ return nil, fmt.Errorf("invalid device entry: %v", ss)
+ }
+ }
+
+ return &Mount{
+ Device: ss[1],
+ Mount: ss[4],
+ Type: ss[7],
+ }, nil
+}
+
+// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
+// related to NFS statistics.
+func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
+ // Field indicators for parsing specific types of data
+ const (
+ fieldAge = "age:"
+ fieldBytes = "bytes:"
+ fieldEvents = "events:"
+ fieldPerOpStats = "per-op"
+ fieldTransport = "xprt:"
+ )
+
+ stats := &MountStatsNFS{
+ StatVersion: statVersion,
+ }
+
+ for s.Scan() {
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 {
+ break
+ }
+ if len(ss) < 2 {
+ return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ }
+
+ switch ss[0] {
+ case fieldAge:
+ // Age integer is in seconds
+ d, err := time.ParseDuration(ss[1] + "s")
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Age = d
+ case fieldBytes:
+ bstats, err := parseNFSBytesStats(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Bytes = *bstats
+ case fieldEvents:
+ estats, err := parseNFSEventsStats(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Events = *estats
+ case fieldTransport:
+ if len(ss) < 3 {
+ return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
+ }
+
+ tstats, err := parseNFSTransportStats(ss[2:], statVersion)
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Transport = *tstats
+ }
+
+ // When encountering "per-operation statistics", we must break this
+ // loop and parse them separately to ensure we can terminate parsing
+ // before reaching another device entry; hence why this 'if' statement
+ // is not just another switch case
+ if ss[0] == fieldPerOpStats {
+ break
+ }
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ // NFS per-operation stats appear last before the next device entry
+ perOpStats, err := parseNFSOperationStats(s)
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Operations = perOpStats
+
+ return stats, nil
+}
+
+// parseNFSBytesStats parses a NFSBytesStats line using an input set of
+// integer fields.
+func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
+ if len(ss) != fieldBytesLen {
+ return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
+ }
+
+ ns := make([]uint64, 0, fieldBytesLen)
+ for _, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ return &NFSBytesStats{
+ Read: ns[0],
+ Write: ns[1],
+ DirectRead: ns[2],
+ DirectWrite: ns[3],
+ ReadTotal: ns[4],
+ WriteTotal: ns[5],
+ ReadPages: ns[6],
+ WritePages: ns[7],
+ }, nil
+}
+
+// parseNFSEventsStats parses a NFSEventsStats line using an input set of
+// integer fields.
+func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
+ if len(ss) != fieldEventsLen {
+ return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
+ }
+
+ ns := make([]uint64, 0, fieldEventsLen)
+ for _, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ return &NFSEventsStats{
+ InodeRevalidate: ns[0],
+ DnodeRevalidate: ns[1],
+ DataInvalidate: ns[2],
+ AttributeInvalidate: ns[3],
+ VFSOpen: ns[4],
+ VFSLookup: ns[5],
+ VFSAccess: ns[6],
+ VFSUpdatePage: ns[7],
+ VFSReadPage: ns[8],
+ VFSReadPages: ns[9],
+ VFSWritePage: ns[10],
+ VFSWritePages: ns[11],
+ VFSGetdents: ns[12],
+ VFSSetattr: ns[13],
+ VFSFlush: ns[14],
+ VFSFsync: ns[15],
+ VFSLock: ns[16],
+ VFSFileRelease: ns[17],
+ CongestionWait: ns[18],
+ Truncation: ns[19],
+ WriteExtension: ns[20],
+ SillyRename: ns[21],
+ ShortRead: ns[22],
+ ShortWrite: ns[23],
+ JukeboxDelay: ns[24],
+ PNFSRead: ns[25],
+ PNFSWrite: ns[26],
+ }, nil
+}
+
+// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
+// additional information about per-operation statistics until an empty
+// line is reached.
+func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
+ const (
+ // Number of expected fields in each per-operation statistics set
+ numFields = 9
+ )
+
+ var ops []NFSOperationStats
+
+ for s.Scan() {
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 {
+ // Must break when reading a blank line after per-operation stats to
+ // enable top-level function to parse the next device entry
+ break
+ }
+
+ if len(ss) != numFields {
+ return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
+ }
+
+ // Skip string operation name for integers
+ ns := make([]uint64, 0, numFields-1)
+ for _, st := range ss[1:] {
+ n, err := strconv.ParseUint(st, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ ops = append(ops, NFSOperationStats{
+ Operation: strings.TrimSuffix(ss[0], ":"),
+ Requests: ns[0],
+ Transmissions: ns[1],
+ MajorTimeouts: ns[2],
+ BytesSent: ns[3],
+ BytesReceived: ns[4],
+ CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond,
+ CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
+ CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond,
+ })
+ }
+
+ return ops, s.Err()
+}
+
+// parseNFSTransportStats parses a NFSTransportStats line using an input set of
+// integer fields matched to a specific stats version.
+func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
+ switch statVersion {
+ case statVersion10:
+ if len(ss) != fieldTransport10Len {
+ return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
+ }
+ case statVersion11:
+ if len(ss) != fieldTransport11Len {
+ return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
+ }
+ default:
+ return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
+ }
+
+ // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
+ // in a v1.0 response.
+ //
+ // Note: slice length must be set to length of v1.1 stats to avoid a panic when
+ // only v1.0 stats are present.
+ // See: https://github.com/prometheus/node_exporter/issues/571.
+ ns := make([]uint64, fieldTransport11Len)
+ for i, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns[i] = n
+ }
+
+ return &NFSTransportStats{
+ Port: ns[0],
+ Bind: ns[1],
+ Connect: ns[2],
+ ConnectIdleTime: ns[3],
+ IdleTime: time.Duration(ns[4]) * time.Second,
+ Sends: ns[5],
+ Receives: ns[6],
+ BadTransactionIDs: ns[7],
+ CumulativeActiveRequests: ns[8],
+ CumulativeBacklog: ns[9],
+ MaximumRPCSlotsUsed: ns[10],
+ CumulativeSendingQueue: ns[11],
+ CumulativePendingQueue: ns[12],
+ }, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
new file mode 100644
index 000000000..8717e1fe0
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -0,0 +1,224 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Proc provides information about a running process.
+type Proc struct {
+ // The process ID.
+ PID int
+
+ fs FS
+}
+
+// Procs represents a list of Proc structs.
+type Procs []Proc
+
+func (p Procs) Len() int { return len(p) }
+func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
+
+// Self returns a process for the current process read via /proc/self.
+func Self() (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.Self()
+}
+
+// NewProc returns a process for the given pid under /proc.
+func NewProc(pid int) (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.NewProc(pid)
+}
+
+// AllProcs returns a list of all currently available processes under /proc.
+func AllProcs() (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+ return fs.AllProcs()
+}
+
+// Self returns a process for the current process.
+func (fs FS) Self() (Proc, error) {
+ p, err := os.Readlink(fs.Path("self"))
+ if err != nil {
+ return Proc{}, err
+ }
+ pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1))
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.NewProc(pid)
+}
+
+// NewProc returns a process for the given pid.
+func (fs FS) NewProc(pid int) (Proc, error) {
+ if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: pid, fs: fs}, nil
+}
+
+// AllProcs returns a list of all currently available processes.
+func (fs FS) AllProcs() (Procs, error) {
+ d, err := os.Open(fs.Path())
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ p := Procs{}
+ for _, n := range names {
+ pid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+ p = append(p, Proc{PID: int(pid), fs: fs})
+ }
+
+ return p, nil
+}
+
+// CmdLine returns the command line of a process.
+func (p Proc) CmdLine() ([]string, error) {
+ f, err := os.Open(p.path("cmdline"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(data) < 1 {
+ return []string{}, nil
+ }
+
+ return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
+}
+
+// Comm returns the command name of a process.
+func (p Proc) Comm() (string, error) {
+ f, err := os.Open(p.path("comm"))
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return "", err
+ }
+
+ return strings.TrimSpace(string(data)), nil
+}
+
+// Executable returns the absolute path of the executable command of a process.
+func (p Proc) Executable() (string, error) {
+ exe, err := os.Readlink(p.path("exe"))
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+
+ return exe, err
+}
+
+// FileDescriptors returns the currently open file descriptors of a process.
+func (p Proc) FileDescriptors() ([]uintptr, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ fds := make([]uintptr, len(names))
+ for i, n := range names {
+ fd, err := strconv.ParseInt(n, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse fd %s: %s", n, err)
+ }
+ fds[i] = uintptr(fd)
+ }
+
+ return fds, nil
+}
+
+// FileDescriptorTargets returns the targets of all file descriptors of a process.
+// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
+func (p Proc) FileDescriptorTargets() ([]string, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ targets := make([]string, len(names))
+
+ for i, name := range names {
+ target, err := os.Readlink(p.path("fd", name))
+ if err == nil {
+ targets[i] = target
+ }
+ }
+
+ return targets, nil
+}
+
+// FileDescriptorsLen returns the number of currently open file descriptors of
+// a process.
+func (p Proc) FileDescriptorsLen() (int, error) {
+ fds, err := p.fileDescriptors()
+ if err != nil {
+ return 0, err
+ }
+
+ return len(fds), nil
+}
+
+// MountStats retrieves statistics and configuration for mount points in a
+// process's namespace.
+func (p Proc) MountStats() ([]*Mount, error) {
+ f, err := os.Open(p.path("mountstats"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseMountStats(f)
+}
+
+func (p Proc) fileDescriptors() ([]string, error) {
+ d, err := os.Open(p.path("fd"))
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ return names, nil
+}
+
+func (p Proc) path(pa ...string) string {
+ return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
new file mode 100644
index 000000000..b4e31d7ba
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -0,0 +1,55 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// ProcIO models the content of /proc/<pid>/io.
+type ProcIO struct {
+ // Chars read.
+ RChar uint64
+ // Chars written.
+ WChar uint64
+ // Read syscalls.
+ SyscR uint64
+ // Write syscalls.
+ SyscW uint64
+ // Bytes read.
+ ReadBytes uint64
+ // Bytes written.
+ WriteBytes uint64
+ // Bytes written, but taking into account truncation. See
+ // Documentation/filesystems/proc.txt in the kernel sources for
+ // detailed explanation.
+ CancelledWriteBytes int64
+}
+
+// NewIO creates a new ProcIO instance from a given Proc instance.
+func (p Proc) NewIO() (ProcIO, error) {
+ pio := ProcIO{}
+
+ f, err := os.Open(p.path("io"))
+ if err != nil {
+ return pio, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return pio, err
+ }
+
+ ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
+ "read_bytes: %d\nwrite_bytes: %d\n" +
+ "cancelled_write_bytes: %d\n"
+
+ _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
+ &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
+ if err != nil {
+ return pio, err
+ }
+
+ return pio, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
new file mode 100644
index 000000000..2df997ce1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -0,0 +1,137 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+)
+
+// ProcLimits represents the soft limits for each of the process's resource
+// limits. For more information see getrlimit(2):
+// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
+type ProcLimits struct {
+ // CPU time limit in seconds.
+ CPUTime int
+ // Maximum size of files that the process may create.
+ FileSize int
+ // Maximum size of the process's data segment (initialized data,
+ // uninitialized data, and heap).
+ DataSize int
+ // Maximum size of the process stack in bytes.
+ StackSize int
+ // Maximum size of a core file.
+ CoreFileSize int
+ // Limit of the process's resident set in pages.
+ ResidentSet int
+ // Maximum number of processes that can be created for the real user ID of
+ // the calling process.
+ Processes int
+ // Value one greater than the maximum file descriptor number that can be
+ // opened by this process.
+ OpenFiles int
+ // Maximum number of bytes of memory that may be locked into RAM.
+ LockedMemory int
+ // Maximum size of the process's virtual memory address space in bytes.
+ AddressSpace int
+ // Limit on the combined number of flock(2) locks and fcntl(2) leases that
+ // this process may establish.
+ FileLocks int
+ // Limit of signals that may be queued for the real user ID of the calling
+ // process.
+ PendingSignals int
+ // Limit on the number of bytes that can be allocated for POSIX message
+ // queues for the real user ID of the calling process.
+ MsqqueueSize int
+ // Limit of the nice priority set using setpriority(2) or nice(2).
+ NicePriority int
+ // Limit of the real-time priority set using sched_setscheduler(2) or
+ // sched_setparam(2).
+ RealtimePriority int
+ // Limit (in microseconds) on the amount of CPU time that a process
+ // scheduled under a real-time scheduling policy may consume without making
+ // a blocking system call.
+ RealtimeTimeout int
+}
+
+const (
+ limitsFields = 3
+ limitsUnlimited = "unlimited"
+)
+
+var (
+ limitsDelimiter = regexp.MustCompile(" +")
+)
+
+// NewLimits returns the current soft limits of the process.
+func (p Proc) NewLimits() (ProcLimits, error) {
+ f, err := os.Open(p.path("limits"))
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ defer f.Close()
+
+ var (
+ l = ProcLimits{}
+ s = bufio.NewScanner(f)
+ )
+ for s.Scan() {
+ fields := limitsDelimiter.Split(s.Text(), limitsFields)
+ if len(fields) != limitsFields {
+ return ProcLimits{}, fmt.Errorf(
+ "couldn't parse %s line %s", f.Name(), s.Text())
+ }
+
+ switch fields[0] {
+ case "Max cpu time":
+ l.CPUTime, err = parseInt(fields[1])
+ case "Max file size":
+ l.FileSize, err = parseInt(fields[1])
+ case "Max data size":
+ l.DataSize, err = parseInt(fields[1])
+ case "Max stack size":
+ l.StackSize, err = parseInt(fields[1])
+ case "Max core file size":
+ l.CoreFileSize, err = parseInt(fields[1])
+ case "Max resident set":
+ l.ResidentSet, err = parseInt(fields[1])
+ case "Max processes":
+ l.Processes, err = parseInt(fields[1])
+ case "Max open files":
+ l.OpenFiles, err = parseInt(fields[1])
+ case "Max locked memory":
+ l.LockedMemory, err = parseInt(fields[1])
+ case "Max address space":
+ l.AddressSpace, err = parseInt(fields[1])
+ case "Max file locks":
+ l.FileLocks, err = parseInt(fields[1])
+ case "Max pending signals":
+ l.PendingSignals, err = parseInt(fields[1])
+ case "Max msgqueue size":
+ l.MsqqueueSize, err = parseInt(fields[1])
+ case "Max nice priority":
+ l.NicePriority, err = parseInt(fields[1])
+ case "Max realtime priority":
+ l.RealtimePriority, err = parseInt(fields[1])
+ case "Max realtime timeout":
+ l.RealtimeTimeout, err = parseInt(fields[1])
+ }
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ }
+
+ return l, s.Err()
+}
+
+func parseInt(s string) (int, error) {
+ if s == limitsUnlimited {
+ return -1, nil
+ }
+ i, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
+ }
+ return int(i), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
new file mode 100644
index 000000000..724e271b9
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -0,0 +1,175 @@
+package procfs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
+// which required cgo. However, that caused a lot of problems regarding
+// cross-compilation. Alternatives such as running a binary to determine the
+// value, or trying to derive it in some other way were all problematic. After
+// much research it was determined that USER_HZ is actually hardcoded to 100 on
+// all Go-supported platforms as of the time of this writing. This is why we
+// decided to hardcode it here as well. It is not impossible that there could
+// be systems with exceptions, but they should be very exotic edge cases, and
+// in that case, the worst outcome will be two misreported metrics.
+//
+// See also the following discussions:
+//
+// - https://github.com/prometheus/node_exporter/issues/52
+// - https://github.com/prometheus/procfs/pull/2
+// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
+const userHZ = 100
+
+// ProcStat provides status information about the process,
+// read from /proc/[pid]/stat.
+type ProcStat struct {
+ // The process ID.
+ PID int
+ // The filename of the executable.
+ Comm string
+ // The process state.
+ State string
+ // The PID of the parent of this process.
+ PPID int
+ // The process group ID of the process.
+ PGRP int
+ // The session ID of the process.
+ Session int
+ // The controlling terminal of the process.
+ TTY int
+ // The ID of the foreground process group of the controlling terminal of
+ // the process.
+ TPGID int
+ // The kernel flags word of the process.
+ Flags uint
+ // The number of minor faults the process has made which have not required
+ // loading a memory page from disk.
+ MinFlt uint
+ // The number of minor faults that the process's waited-for children have
+ // made.
+ CMinFlt uint
+ // The number of major faults the process has made which have required
+ // loading a memory page from disk.
+ MajFlt uint
+ // The number of major faults that the process's waited-for children have
+ // made.
+ CMajFlt uint
+ // Amount of time that this process has been scheduled in user mode,
+ // measured in clock ticks.
+ UTime uint
+ // Amount of time that this process has been scheduled in kernel mode,
+ // measured in clock ticks.
+ STime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in user mode, measured in clock ticks.
+ CUTime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in kernel mode, measured in clock ticks.
+ CSTime uint
+ // For processes running a real-time scheduling policy, this is the negated
+ // scheduling priority, minus one.
+ Priority int
+ // The nice value, a value in the range 19 (low priority) to -20 (high
+ // priority).
+ Nice int
+ // Number of threads in this process.
+ NumThreads int
+ // The time the process started after system boot, the value is expressed
+ // in clock ticks.
+ Starttime uint64
+ // Virtual memory size in bytes.
+ VSize int
+ // Resident set size in pages.
+ RSS int
+
+ fs FS
+}
+
+// NewStat returns the current status information of the process.
+func (p Proc) NewStat() (ProcStat, error) {
+ f, err := os.Open(p.path("stat"))
+ if err != nil {
+ return ProcStat{}, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ var (
+ ignore int
+
+ s = ProcStat{PID: p.PID, fs: p.fs}
+ l = bytes.Index(data, []byte("("))
+ r = bytes.LastIndex(data, []byte(")"))
+ )
+
+ if l < 0 || r < 0 {
+ return ProcStat{}, fmt.Errorf(
+ "unexpected format, couldn't extract comm: %s",
+ data,
+ )
+ }
+
+ s.Comm = string(data[l+1 : r])
+ _, err = fmt.Fscan(
+ bytes.NewBuffer(data[r+2:]),
+ &s.State,
+ &s.PPID,
+ &s.PGRP,
+ &s.Session,
+ &s.TTY,
+ &s.TPGID,
+ &s.Flags,
+ &s.MinFlt,
+ &s.CMinFlt,
+ &s.MajFlt,
+ &s.CMajFlt,
+ &s.UTime,
+ &s.STime,
+ &s.CUTime,
+ &s.CSTime,
+ &s.Priority,
+ &s.Nice,
+ &s.NumThreads,
+ &ignore,
+ &s.Starttime,
+ &s.VSize,
+ &s.RSS,
+ )
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ return s, nil
+}
+
+// VirtualMemory returns the virtual memory size in bytes.
+func (s ProcStat) VirtualMemory() int {
+ return s.VSize
+}
+
+// ResidentMemory returns the resident memory size in bytes.
+func (s ProcStat) ResidentMemory() int {
+ return s.RSS * os.Getpagesize()
+}
+
+// StartTime returns the unix timestamp of the process in seconds.
+func (s ProcStat) StartTime() (float64, error) {
+ stat, err := s.fs.NewStat()
+ if err != nil {
+ return 0, err
+ }
+ return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
+}
+
+// CPUTime returns the total CPU user and system time in seconds.
+func (s ProcStat) CPUTime() float64 {
+ return float64(s.UTime+s.STime) / userHZ
+}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
new file mode 100644
index 000000000..1ca217e8c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -0,0 +1,56 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Stat represents kernel/system statistics.
+type Stat struct {
+ // Boot time in seconds since the Epoch.
+ BootTime int64
+}
+
+// NewStat returns kernel/system statistics read from /proc/stat.
+func NewStat() (Stat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Stat{}, err
+ }
+
+ return fs.NewStat()
+}
+
+// NewStat returns an information about current kernel/system statistics.
+func (fs FS) NewStat() (Stat, error) {
+ f, err := os.Open(fs.Path("stat"))
+ if err != nil {
+ return Stat{}, err
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ line := s.Text()
+ if !strings.HasPrefix(line, "btime") {
+ continue
+ }
+ fields := strings.Fields(line)
+ if len(fields) != 2 {
+ return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
+ }
+ i, err := strconv.ParseInt(fields[1], 10, 32)
+ if err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
+ }
+ return Stat{BootTime: i}, nil
+ }
+ if err := s.Err(); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
+ }
+
+ return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
+}
diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go
new file mode 100644
index 000000000..ffe9df50d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/xfrm.go
@@ -0,0 +1,187 @@
+// Copyright 2017 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// XfrmStat models the contents of /proc/net/xfrm_stat.
+type XfrmStat struct {
+ // All errors which are not matched by other
+ XfrmInError int
+ // No buffer is left
+ XfrmInBufferError int
+ // Header Error
+ XfrmInHdrError int
+ // No state found
+ // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
+ XfrmInNoStates int
+ // Transformation protocol specific error
+ // e.g. SA Key is wrong
+ XfrmInStateProtoError int
+ // Transformation mode specific error
+ XfrmInStateModeError int
+ // Sequence error
+ // e.g. sequence number is out of window
+ XfrmInStateSeqError int
+ // State is expired
+ XfrmInStateExpired int
+ // State has mismatch option
+ // e.g. UDP encapsulation type is mismatched
+ XfrmInStateMismatch int
+ // State is invalid
+ XfrmInStateInvalid int
+ // No matching template for states
+ // e.g. Inbound SAs are correct but SP rule is wrong
+ XfrmInTmplMismatch int
+ // No policy is found for states
+ // e.g. Inbound SAs are correct but no SP is found
+ XfrmInNoPols int
+ // Policy discards
+ XfrmInPolBlock int
+ // Policy error
+ XfrmInPolError int
+ // All errors which are not matched by others
+ XfrmOutError int
+ // Bundle generation error
+ XfrmOutBundleGenError int
+ // Bundle check error
+ XfrmOutBundleCheckError int
+ // No state was found
+ XfrmOutNoStates int
+ // Transformation protocol specific error
+ XfrmOutStateProtoError int
+ // Transportation mode specific error
+ XfrmOutStateModeError int
+ // Sequence error
+ // i.e sequence number overflow
+ XfrmOutStateSeqError int
+ // State is expired
+ XfrmOutStateExpired int
+ // Policy discads
+ XfrmOutPolBlock int
+ // Policy is dead
+ XfrmOutPolDead int
+ // Policy Error
+ XfrmOutPolError int
+ XfrmFwdHdrError int
+ XfrmOutStateInvalid int
+ XfrmAcquireError int
+}
+
+// NewXfrmStat reads the xfrm_stat statistics.
+func NewXfrmStat() (XfrmStat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return XfrmStat{}, err
+ }
+
+ return fs.NewXfrmStat()
+}
+
+// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
+func (fs FS) NewXfrmStat() (XfrmStat, error) {
+ file, err := os.Open(fs.Path("net/xfrm_stat"))
+ if err != nil {
+ return XfrmStat{}, err
+ }
+ defer file.Close()
+
+ var (
+ x = XfrmStat{}
+ s = bufio.NewScanner(file)
+ )
+
+ for s.Scan() {
+ fields := strings.Fields(s.Text())
+
+ if len(fields) != 2 {
+ return XfrmStat{}, fmt.Errorf(
+ "couldnt parse %s line %s", file.Name(), s.Text())
+ }
+
+ name := fields[0]
+ value, err := strconv.Atoi(fields[1])
+ if err != nil {
+ return XfrmStat{}, err
+ }
+
+ switch name {
+ case "XfrmInError":
+ x.XfrmInError = value
+ case "XfrmInBufferError":
+ x.XfrmInBufferError = value
+ case "XfrmInHdrError":
+ x.XfrmInHdrError = value
+ case "XfrmInNoStates":
+ x.XfrmInNoStates = value
+ case "XfrmInStateProtoError":
+ x.XfrmInStateProtoError = value
+ case "XfrmInStateModeError":
+ x.XfrmInStateModeError = value
+ case "XfrmInStateSeqError":
+ x.XfrmInStateSeqError = value
+ case "XfrmInStateExpired":
+ x.XfrmInStateExpired = value
+ case "XfrmInStateInvalid":
+ x.XfrmInStateInvalid = value
+ case "XfrmInTmplMismatch":
+ x.XfrmInTmplMismatch = value
+ case "XfrmInNoPols":
+ x.XfrmInNoPols = value
+ case "XfrmInPolBlock":
+ x.XfrmInPolBlock = value
+ case "XfrmInPolError":
+ x.XfrmInPolError = value
+ case "XfrmOutError":
+ x.XfrmOutError = value
+ case "XfrmInStateMismatch":
+ x.XfrmInStateMismatch = value
+ case "XfrmOutBundleGenError":
+ x.XfrmOutBundleGenError = value
+ case "XfrmOutBundleCheckError":
+ x.XfrmOutBundleCheckError = value
+ case "XfrmOutNoStates":
+ x.XfrmOutNoStates = value
+ case "XfrmOutStateProtoError":
+ x.XfrmOutStateProtoError = value
+ case "XfrmOutStateModeError":
+ x.XfrmOutStateModeError = value
+ case "XfrmOutStateSeqError":
+ x.XfrmOutStateSeqError = value
+ case "XfrmOutStateExpired":
+ x.XfrmOutStateExpired = value
+ case "XfrmOutPolBlock":
+ x.XfrmOutPolBlock = value
+ case "XfrmOutPolDead":
+ x.XfrmOutPolDead = value
+ case "XfrmOutPolError":
+ x.XfrmOutPolError = value
+ case "XfrmFwdHdrError":
+ x.XfrmFwdHdrError = value
+ case "XfrmOutStateInvalid":
+ x.XfrmOutStateInvalid = value
+ case "XfrmAcquireError":
+ x.XfrmAcquireError = value
+ }
+
+ }
+
+ return x, s.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go
new file mode 100644
index 000000000..c8f6279f3
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/xfs/parse.go
@@ -0,0 +1,359 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package xfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+// ParseStats parses a Stats from an input io.Reader, using the format
+// found in /proc/fs/xfs/stat.
+func ParseStats(r io.Reader) (*Stats, error) {
+ const (
+ // Fields parsed into stats structures.
+ fieldExtentAlloc = "extent_alloc"
+ fieldAbt = "abt"
+ fieldBlkMap = "blk_map"
+ fieldBmbt = "bmbt"
+ fieldDir = "dir"
+ fieldTrans = "trans"
+ fieldIg = "ig"
+ fieldLog = "log"
+ fieldRw = "rw"
+ fieldAttr = "attr"
+ fieldIcluster = "icluster"
+ fieldVnodes = "vnodes"
+ fieldBuf = "buf"
+ fieldXpc = "xpc"
+
+ // Unimplemented at this time due to lack of documentation.
+ fieldPushAil = "push_ail"
+ fieldXstrat = "xstrat"
+ fieldAbtb2 = "abtb2"
+ fieldAbtc2 = "abtc2"
+ fieldBmbt2 = "bmbt2"
+ fieldIbt2 = "ibt2"
+ fieldFibt2 = "fibt2"
+ fieldQm = "qm"
+ fieldDebug = "debug"
+ )
+
+ var xfss Stats
+
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ // Expect at least a string label and a single integer value, ex:
+ // - abt 0
+ // - rw 1 2
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) < 2 {
+ continue
+ }
+ label := ss[0]
+
+ // Extended precision counters are uint64 values.
+ if label == fieldXpc {
+ us, err := parseUint64s(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ xfss.ExtendedPrecision, err = extendedPrecisionStats(us)
+ if err != nil {
+ return nil, err
+ }
+
+ continue
+ }
+
+ // All other counters are uint32 values.
+ us, err := parseUint32s(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ switch label {
+ case fieldExtentAlloc:
+ xfss.ExtentAllocation, err = extentAllocationStats(us)
+ case fieldAbt:
+ xfss.AllocationBTree, err = btreeStats(us)
+ case fieldBlkMap:
+ xfss.BlockMapping, err = blockMappingStats(us)
+ case fieldBmbt:
+ xfss.BlockMapBTree, err = btreeStats(us)
+ case fieldDir:
+ xfss.DirectoryOperation, err = directoryOperationStats(us)
+ case fieldTrans:
+ xfss.Transaction, err = transactionStats(us)
+ case fieldIg:
+ xfss.InodeOperation, err = inodeOperationStats(us)
+ case fieldLog:
+ xfss.LogOperation, err = logOperationStats(us)
+ case fieldRw:
+ xfss.ReadWrite, err = readWriteStats(us)
+ case fieldAttr:
+ xfss.AttributeOperation, err = attributeOperationStats(us)
+ case fieldIcluster:
+ xfss.InodeClustering, err = inodeClusteringStats(us)
+ case fieldVnodes:
+ xfss.Vnode, err = vnodeStats(us)
+ case fieldBuf:
+ xfss.Buffer, err = bufferStats(us)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &xfss, s.Err()
+}
+
+// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s.
+func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) {
+ if l := len(us); l != 4 {
+ return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l)
+ }
+
+ return ExtentAllocationStats{
+ ExtentsAllocated: us[0],
+ BlocksAllocated: us[1],
+ ExtentsFreed: us[2],
+ BlocksFreed: us[3],
+ }, nil
+}
+
+// btreeStats builds a BTreeStats from a slice of uint32s.
+func btreeStats(us []uint32) (BTreeStats, error) {
+ if l := len(us); l != 4 {
+ return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l)
+ }
+
+ return BTreeStats{
+ Lookups: us[0],
+ Compares: us[1],
+ RecordsInserted: us[2],
+ RecordsDeleted: us[3],
+ }, nil
+}
+
+// BlockMappingStat builds a BlockMappingStats from a slice of uint32s.
+func blockMappingStats(us []uint32) (BlockMappingStats, error) {
+ if l := len(us); l != 7 {
+ return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l)
+ }
+
+ return BlockMappingStats{
+ Reads: us[0],
+ Writes: us[1],
+ Unmaps: us[2],
+ ExtentListInsertions: us[3],
+ ExtentListDeletions: us[4],
+ ExtentListLookups: us[5],
+ ExtentListCompares: us[6],
+ }, nil
+}
+
+// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s.
+func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) {
+ if l := len(us); l != 4 {
+ return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l)
+ }
+
+ return DirectoryOperationStats{
+ Lookups: us[0],
+ Creates: us[1],
+ Removes: us[2],
+ Getdents: us[3],
+ }, nil
+}
+
+// TransactionStats builds a TransactionStats from a slice of uint32s.
+func transactionStats(us []uint32) (TransactionStats, error) {
+ if l := len(us); l != 3 {
+ return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l)
+ }
+
+ return TransactionStats{
+ Sync: us[0],
+ Async: us[1],
+ Empty: us[2],
+ }, nil
+}
+
+// InodeOperationStats builds an InodeOperationStats from a slice of uint32s.
+func inodeOperationStats(us []uint32) (InodeOperationStats, error) {
+ if l := len(us); l != 7 {
+ return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l)
+ }
+
+ return InodeOperationStats{
+ Attempts: us[0],
+ Found: us[1],
+ Recycle: us[2],
+ Missed: us[3],
+ Duplicate: us[4],
+ Reclaims: us[5],
+ AttributeChange: us[6],
+ }, nil
+}
+
+// LogOperationStats builds a LogOperationStats from a slice of uint32s.
+func logOperationStats(us []uint32) (LogOperationStats, error) {
+ if l := len(us); l != 5 {
+ return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l)
+ }
+
+ return LogOperationStats{
+ Writes: us[0],
+ Blocks: us[1],
+ NoInternalBuffers: us[2],
+ Force: us[3],
+ ForceSleep: us[4],
+ }, nil
+}
+
+// ReadWriteStats builds a ReadWriteStats from a slice of uint32s.
+func readWriteStats(us []uint32) (ReadWriteStats, error) {
+ if l := len(us); l != 2 {
+ return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l)
+ }
+
+ return ReadWriteStats{
+ Read: us[0],
+ Write: us[1],
+ }, nil
+}
+
+// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s.
+func attributeOperationStats(us []uint32) (AttributeOperationStats, error) {
+ if l := len(us); l != 4 {
+ return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l)
+ }
+
+ return AttributeOperationStats{
+ Get: us[0],
+ Set: us[1],
+ Remove: us[2],
+ List: us[3],
+ }, nil
+}
+
+// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s.
+func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) {
+ if l := len(us); l != 3 {
+ return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l)
+ }
+
+ return InodeClusteringStats{
+ Iflush: us[0],
+ Flush: us[1],
+ FlushInode: us[2],
+ }, nil
+}
+
+// VnodeStats builds a VnodeStats from a slice of uint32s.
+func vnodeStats(us []uint32) (VnodeStats, error) {
+ // The attribute "Free" appears to not be available on older XFS
+ // stats versions. Therefore, 7 or 8 elements may appear in
+ // this slice.
+ l := len(us)
+ if l != 7 && l != 8 {
+ return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l)
+ }
+
+ s := VnodeStats{
+ Active: us[0],
+ Allocate: us[1],
+ Get: us[2],
+ Hold: us[3],
+ Release: us[4],
+ Reclaim: us[5],
+ Remove: us[6],
+ }
+
+ // Skip adding free, unless it is present. The zero value will
+ // be used in place of an actual count.
+ if l == 7 {
+ return s, nil
+ }
+
+ s.Free = us[7]
+ return s, nil
+}
+
+// BufferStats builds a BufferStats from a slice of uint32s.
+func bufferStats(us []uint32) (BufferStats, error) {
+ if l := len(us); l != 9 {
+ return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l)
+ }
+
+ return BufferStats{
+ Get: us[0],
+ Create: us[1],
+ GetLocked: us[2],
+ GetLockedWaited: us[3],
+ BusyLocked: us[4],
+ MissLocked: us[5],
+ PageRetries: us[6],
+ PageFound: us[7],
+ GetRead: us[8],
+ }, nil
+}
+
+// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s.
+func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
+ if l := len(us); l != 3 {
+ return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l)
+ }
+
+ return ExtendedPrecisionStats{
+ FlushBytes: us[0],
+ WriteBytes: us[1],
+ ReadBytes: us[2],
+ }, nil
+}
+
+// parseUint32s parses a slice of strings into a slice of uint32s.
+func parseUint32s(ss []string) ([]uint32, error) {
+ us := make([]uint32, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 10, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, uint32(u))
+ }
+
+ return us, nil
+}
+
+// parseUint64s parses a slice of strings into a slice of uint64s.
+func parseUint64s(ss []string) ([]uint64, error) {
+ us := make([]uint64, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, u)
+ }
+
+ return us, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/prometheus/procfs/xfs/xfs.go
new file mode 100644
index 000000000..d86794b7c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/xfs/xfs.go
@@ -0,0 +1,163 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package xfs provides access to statistics exposed by the XFS filesystem.
+package xfs
+
+// Stats contains XFS filesystem runtime statistics, parsed from
+// /proc/fs/xfs/stat.
+//
+// The names and meanings of each statistic were taken from
+// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux
+// kernel source. Most counters are uint32s (same data types used in
+// xfs_stats.h), but some of the "extended precision stats" are uint64s.
+type Stats struct {
+ // The name of the filesystem used to source these statistics.
+ // If empty, this indicates aggregated statistics for all XFS
+ // filesystems on the host.
+ Name string
+
+ ExtentAllocation ExtentAllocationStats
+ AllocationBTree BTreeStats
+ BlockMapping BlockMappingStats
+ BlockMapBTree BTreeStats
+ DirectoryOperation DirectoryOperationStats
+ Transaction TransactionStats
+ InodeOperation InodeOperationStats
+ LogOperation LogOperationStats
+ ReadWrite ReadWriteStats
+ AttributeOperation AttributeOperationStats
+ InodeClustering InodeClusteringStats
+ Vnode VnodeStats
+ Buffer BufferStats
+ ExtendedPrecision ExtendedPrecisionStats
+}
+
+// ExtentAllocationStats contains statistics regarding XFS extent allocations.
+type ExtentAllocationStats struct {
+ ExtentsAllocated uint32
+ BlocksAllocated uint32
+ ExtentsFreed uint32
+ BlocksFreed uint32
+}
+
+// BTreeStats contains statistics regarding an XFS internal B-tree.
+type BTreeStats struct {
+ Lookups uint32
+ Compares uint32
+ RecordsInserted uint32
+ RecordsDeleted uint32
+}
+
+// BlockMappingStats contains statistics regarding XFS block maps.
+type BlockMappingStats struct {
+ Reads uint32
+ Writes uint32
+ Unmaps uint32
+ ExtentListInsertions uint32
+ ExtentListDeletions uint32
+ ExtentListLookups uint32
+ ExtentListCompares uint32
+}
+
+// DirectoryOperationStats contains statistics regarding XFS directory entries.
+type DirectoryOperationStats struct {
+ Lookups uint32
+ Creates uint32
+ Removes uint32
+ Getdents uint32
+}
+
+// TransactionStats contains statistics regarding XFS metadata transactions.
+type TransactionStats struct {
+ Sync uint32
+ Async uint32
+ Empty uint32
+}
+
+// InodeOperationStats contains statistics regarding XFS inode operations.
+type InodeOperationStats struct {
+ Attempts uint32
+ Found uint32
+ Recycle uint32
+ Missed uint32
+ Duplicate uint32
+ Reclaims uint32
+ AttributeChange uint32
+}
+
+// LogOperationStats contains statistics regarding the XFS log buffer.
+type LogOperationStats struct {
+ Writes uint32
+ Blocks uint32
+ NoInternalBuffers uint32
+ Force uint32
+ ForceSleep uint32
+}
+
+// ReadWriteStats contains statistics regarding the number of read and write
+// system calls for XFS filesystems.
+type ReadWriteStats struct {
+ Read uint32
+ Write uint32
+}
+
+// AttributeOperationStats contains statistics regarding manipulation of
+// XFS extended file attributes.
+type AttributeOperationStats struct {
+ Get uint32
+ Set uint32
+ Remove uint32
+ List uint32
+}
+
+// InodeClusteringStats contains statistics regarding XFS inode clustering
+// operations.
+type InodeClusteringStats struct {
+ Iflush uint32
+ Flush uint32
+ FlushInode uint32
+}
+
+// VnodeStats contains statistics regarding XFS vnode operations.
+type VnodeStats struct {
+ Active uint32
+ Allocate uint32
+ Get uint32
+ Hold uint32
+ Release uint32
+ Reclaim uint32
+ Remove uint32
+ Free uint32
+}
+
+// BufferStats contains statistics regarding XFS read/write I/O buffers.
+type BufferStats struct {
+ Get uint32
+ Create uint32
+ GetLocked uint32
+ GetLockedWaited uint32
+ BusyLocked uint32
+ MissLocked uint32
+ PageRetries uint32
+ PageFound uint32
+ GetRead uint32
+}
+
+// ExtendedPrecisionStats contains high precision counters used to track the
+// total number of bytes read, written, or flushed, during XFS operations.
+type ExtendedPrecisionStats struct {
+ FlushBytes uint64
+ WriteBytes uint64
+ ReadBytes uint64
+}
diff --git a/vendor/github.com/renstrom/dedent/LICENSE b/vendor/github.com/renstrom/dedent/LICENSE
new file mode 100644
index 000000000..66a9870fc
--- /dev/null
+++ b/vendor/github.com/renstrom/dedent/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Peter Renström
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/renstrom/dedent/README.md b/vendor/github.com/renstrom/dedent/README.md
new file mode 100644
index 000000000..35b5aa134
--- /dev/null
+++ b/vendor/github.com/renstrom/dedent/README.md
@@ -0,0 +1,50 @@
+# Dedent
+
+[![Build Status](https://travis-ci.org/renstrom/dedent.svg?branch=master)](https://travis-ci.org/renstrom/dedent)
+[![Godoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/renstrom/dedent)
+
+Removes common leading whitespace from multiline strings. Inspired by [`textwrap.dedent`](https://docs.python.org/3/library/textwrap.html#textwrap.dedent) in Python.
+
+## Usage / example
+
+Imagine the following snippet that prints a multiline string. You want the indentation to both look nice in the code as well as in the actual output.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/renstrom/dedent"
+)
+
+func main() {
+ s := `Lorem ipsum dolor sit amet,
+ consectetur adipiscing elit.
+ Curabitur justo tellus, facilisis nec efficitur dictum,
+ fermentum vitae ligula. Sed eu convallis sapien.`
+ fmt.Println(dedent.Dedent(s))
+ fmt.Println("-------------")
+ fmt.Println(s)
+}
+```
+
+To illustrate the difference, here's the output:
+
+
+```bash
+$ go run main.go
+Lorem ipsum dolor sit amet,
+consectetur adipiscing elit.
+Curabitur justo tellus, facilisis nec efficitur dictum,
+fermentum vitae ligula. Sed eu convallis sapien.
+-------------
+Lorem ipsum dolor sit amet,
+ consectetur adipiscing elit.
+ Curabitur justo tellus, facilisis nec efficitur dictum,
+ fermentum vitae ligula. Sed eu convallis sapien.
+```
+
+## License
+
+MIT
diff --git a/vendor/github.com/renstrom/dedent/dedent.go b/vendor/github.com/renstrom/dedent/dedent.go
new file mode 100644
index 000000000..f58dc47d5
--- /dev/null
+++ b/vendor/github.com/renstrom/dedent/dedent.go
@@ -0,0 +1,56 @@
+package dedent
+
+import (
+ "regexp"
+ "strings"
+)
+
+var whitespaceOnly = regexp.MustCompile("(?m)^[ \t]+$")
+var leadingWhitespace = regexp.MustCompile("(?m)(^[ \t]*)")
+
+// Dedent removes any common leading whitespace from every line in s.
+//
+// This can be used to make multiline strings to line up with the left edge of
+// the display, while still presenting them in the source code in indented
+// form.
+func Dedent(s string) string {
+ s = whitespaceOnly.ReplaceAllString(s, "")
+ margin := findMargin(s)
+ if len(margin) == 0 {
+ return s
+ }
+ return regexp.MustCompile("(?m)^"+margin).ReplaceAllString(s, "")
+}
+
+// Look for the longest leading string of spaces and tabs common to all lines.
+func findMargin(s string) string {
+ var margin string
+
+ indents := leadingWhitespace.FindAllString(s, -1)
+ numIndents := len(indents)
+ for i, indent := range indents {
+ // Don't use last row if it is empty
+ if i == numIndents-1 && indent == "" {
+ break
+ }
+
+ if margin == "" {
+ margin = indent
+ } else if strings.HasPrefix(indent, margin) {
+ // Current line more deeply indented than previous winner:
+ // no change (previous winner is still on top).
+ continue
+ } else if strings.HasPrefix(margin, indent) {
+ // Current line consistent with and no deeper than previous winner:
+ // it's the new winner.
+ margin = indent
+ } else {
+ // Current line and previous winner have no common whitespace:
+ // there is no margin.
+ margin = ""
+ break
+ }
+ }
+
+ return margin
+}
diff --git a/vendor/github.com/seccomp/libseccomp-golang/LICENSE b/vendor/github.com/seccomp/libseccomp-golang/LICENSE
new file mode 100644
index 000000000..81cf60de2
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2015 Matthew Heon <mheon@redhat.com>
+Copyright (c) 2015 Paul Moore <pmoore@redhat.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+- Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+- Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/seccomp/libseccomp-golang/README b/vendor/github.com/seccomp/libseccomp-golang/README
new file mode 100644
index 000000000..66839a466
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/README
@@ -0,0 +1,51 @@
+libseccomp-golang: Go Language Bindings for the libseccomp Project
+===============================================================================
+https://github.com/seccomp/libseccomp-golang
+https://github.com/seccomp/libseccomp
+
+The libseccomp library provides an easy to use, platform independent, interface
+to the Linux Kernel's syscall filtering mechanism. The libseccomp API is
+designed to abstract away the underlying BPF based syscall filter language and
+present a more conventional function-call based filtering interface that should
+be familiar to, and easily adopted by, application developers.
+
+The libseccomp-golang library provides a Go based interface to the libseccomp
+library.
+
+* Online Resources
+
+The library source repository currently lives on GitHub at the following URLs:
+
+ -> https://github.com/seccomp/libseccomp-golang
+ -> https://github.com/seccomp/libseccomp
+
+The project mailing list is currently hosted on Google Groups at the URL below,
+please note that a Google account is not required to subscribe to the mailing
+list.
+
+ -> https://groups.google.com/d/forum/libseccomp
+
+Documentation is also available at:
+
+ -> https://godoc.org/github.com/seccomp/libseccomp-golang
+
+* Installing the package
+
+The libseccomp-golang bindings require at least Go v1.2.1 and GCC v4.8.4;
+earlier versions may yield unpredictable results. If you meet these
+requirements you can install this package using the command below:
+
+ $ go get github.com/seccomp/libseccomp-golang
+
+* Testing the Library
+
+A number of tests and lint related recipes are provided in the Makefile, if
+you want to run the standard regression tests, you can excute the following:
+
+ $ make check
+
+In order to execute the 'make lint' recipe the 'golint' tool is needed, it
+can be found at:
+
+ -> https://github.com/golang/lint
+
diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go
new file mode 100644
index 000000000..53bcb024d
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go
@@ -0,0 +1,902 @@
+// +build linux
+
+// Public API specification for libseccomp Go bindings
+// Contains public API for the bindings
+
+// Package seccomp provides bindings for libseccomp, a library wrapping the Linux
+// seccomp syscall. Seccomp enables an application to restrict system call use
+// for itself and its children.
+package seccomp
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+// C wrapping code
+
+// #cgo pkg-config: libseccomp
+// #include <stdlib.h>
+// #include <seccomp.h>
+import "C"
+
+// Exported types
+
+// VersionError denotes that the system libseccomp version is incompatible
+// with this package.
+type VersionError struct {
+ message string
+ minimum string
+}
+
+func (e VersionError) Error() string {
+ format := "Libseccomp version too low: "
+ if e.message != "" {
+ format += e.message + ": "
+ }
+ format += "minimum supported is "
+ if e.minimum != "" {
+ format += e.minimum + ": "
+ } else {
+ format += "2.1.0: "
+ }
+ format += "detected %d.%d.%d"
+ return fmt.Sprintf(format, verMajor, verMinor, verMicro)
+}
+
+// ScmpArch represents a CPU architecture. Seccomp can restrict syscalls on a
+// per-architecture basis.
+type ScmpArch uint
+
+// ScmpAction represents an action to be taken on a filter rule match in
+// libseccomp
+type ScmpAction uint
+
+// ScmpCompareOp represents a comparison operator which can be used in a filter
+// rule
+type ScmpCompareOp uint
+
+// ScmpCondition represents a rule in a libseccomp filter context
+type ScmpCondition struct {
+ Argument uint `json:"argument,omitempty"`
+ Op ScmpCompareOp `json:"operator,omitempty"`
+ Operand1 uint64 `json:"operand_one,omitempty"`
+ Operand2 uint64 `json:"operand_two,omitempty"`
+}
+
+// ScmpSyscall represents a Linux System Call
+type ScmpSyscall int32
+
+// Exported Constants
+
+const (
+ // Valid architectures recognized by libseccomp
+ // ARM64 and all MIPS architectures are unsupported by versions of the
+ // library before v2.2 and will return errors if used
+
+ // ArchInvalid is a placeholder to ensure uninitialized ScmpArch
+ // variables are invalid
+ ArchInvalid ScmpArch = iota
+ // ArchNative is the native architecture of the kernel
+ ArchNative ScmpArch = iota
+ // ArchX86 represents 32-bit x86 syscalls
+ ArchX86 ScmpArch = iota
+ // ArchAMD64 represents 64-bit x86-64 syscalls
+ ArchAMD64 ScmpArch = iota
+ // ArchX32 represents 64-bit x86-64 syscalls (32-bit pointers)
+ ArchX32 ScmpArch = iota
+ // ArchARM represents 32-bit ARM syscalls
+ ArchARM ScmpArch = iota
+ // ArchARM64 represents 64-bit ARM syscalls
+ ArchARM64 ScmpArch = iota
+ // ArchMIPS represents 32-bit MIPS syscalls
+ ArchMIPS ScmpArch = iota
+ // ArchMIPS64 represents 64-bit MIPS syscalls
+ ArchMIPS64 ScmpArch = iota
+ // ArchMIPS64N32 represents 64-bit MIPS syscalls (32-bit pointers)
+ ArchMIPS64N32 ScmpArch = iota
+ // ArchMIPSEL represents 32-bit MIPS syscalls (little endian)
+ ArchMIPSEL ScmpArch = iota
+ // ArchMIPSEL64 represents 64-bit MIPS syscalls (little endian)
+ ArchMIPSEL64 ScmpArch = iota
+ // ArchMIPSEL64N32 represents 64-bit MIPS syscalls (little endian,
+ // 32-bit pointers)
+ ArchMIPSEL64N32 ScmpArch = iota
+ // ArchPPC represents 32-bit POWERPC syscalls
+ ArchPPC ScmpArch = iota
+ // ArchPPC64 represents 64-bit POWER syscalls (big endian)
+ ArchPPC64 ScmpArch = iota
+ // ArchPPC64LE represents 64-bit POWER syscalls (little endian)
+ ArchPPC64LE ScmpArch = iota
+ // ArchS390 represents 31-bit System z/390 syscalls
+ ArchS390 ScmpArch = iota
+ // ArchS390X represents 64-bit System z/390 syscalls
+ ArchS390X ScmpArch = iota
+)
+
+const (
+ // Supported actions on filter match
+
+ // ActInvalid is a placeholder to ensure uninitialized ScmpAction
+ // variables are invalid
+ ActInvalid ScmpAction = iota
+ // ActKill kills the process
+ ActKill ScmpAction = iota
+ // ActTrap throws SIGSYS
+ ActTrap ScmpAction = iota
+ // ActErrno causes the syscall to return a negative error code. This
+ // code can be set with the SetReturnCode method
+ ActErrno ScmpAction = iota
+ // ActTrace causes the syscall to notify tracing processes with the
+ // given error code. This code can be set with the SetReturnCode method
+ ActTrace ScmpAction = iota
+ // ActAllow permits the syscall to continue execution
+ ActAllow ScmpAction = iota
+)
+
+const (
+ // These are comparison operators used in conditional seccomp rules
+ // They are used to compare the value of a single argument of a syscall
+ // against a user-defined constant
+
+ // CompareInvalid is a placeholder to ensure uninitialized ScmpCompareOp
+ // variables are invalid
+ CompareInvalid ScmpCompareOp = iota
+ // CompareNotEqual returns true if the argument is not equal to the
+ // given value
+ CompareNotEqual ScmpCompareOp = iota
+ // CompareLess returns true if the argument is less than the given value
+ CompareLess ScmpCompareOp = iota
+ // CompareLessOrEqual returns true if the argument is less than or equal
+ // to the given value
+ CompareLessOrEqual ScmpCompareOp = iota
+ // CompareEqual returns true if the argument is equal to the given value
+ CompareEqual ScmpCompareOp = iota
+ // CompareGreaterEqual returns true if the argument is greater than or
+ // equal to the given value
+ CompareGreaterEqual ScmpCompareOp = iota
+ // CompareGreater returns true if the argument is greater than the given
+ // value
+ CompareGreater ScmpCompareOp = iota
+ // CompareMaskedEqual returns true if the argument is equal to the given
+ // value, when masked (bitwise &) against the second given value
+ CompareMaskedEqual ScmpCompareOp = iota
+)
+
+// Helpers for types
+
+// GetArchFromString returns an ScmpArch constant from a string representing an
+// architecture
+func GetArchFromString(arch string) (ScmpArch, error) {
+ if err := ensureSupportedVersion(); err != nil {
+ return ArchInvalid, err
+ }
+
+ switch strings.ToLower(arch) {
+ case "x86":
+ return ArchX86, nil
+ case "amd64", "x86-64", "x86_64", "x64":
+ return ArchAMD64, nil
+ case "x32":
+ return ArchX32, nil
+ case "arm":
+ return ArchARM, nil
+ case "arm64", "aarch64":
+ return ArchARM64, nil
+ case "mips":
+ return ArchMIPS, nil
+ case "mips64":
+ return ArchMIPS64, nil
+ case "mips64n32":
+ return ArchMIPS64N32, nil
+ case "mipsel":
+ return ArchMIPSEL, nil
+ case "mipsel64":
+ return ArchMIPSEL64, nil
+ case "mipsel64n32":
+ return ArchMIPSEL64N32, nil
+ case "ppc":
+ return ArchPPC, nil
+ case "ppc64":
+ return ArchPPC64, nil
+ case "ppc64le":
+ return ArchPPC64LE, nil
+ case "s390":
+ return ArchS390, nil
+ case "s390x":
+ return ArchS390X, nil
+ default:
+ return ArchInvalid, fmt.Errorf("cannot convert unrecognized string %s", arch)
+ }
+}
+
+// String returns a string representation of an architecture constant
+func (a ScmpArch) String() string {
+ switch a {
+ case ArchX86:
+ return "x86"
+ case ArchAMD64:
+ return "amd64"
+ case ArchX32:
+ return "x32"
+ case ArchARM:
+ return "arm"
+ case ArchARM64:
+ return "arm64"
+ case ArchMIPS:
+ return "mips"
+ case ArchMIPS64:
+ return "mips64"
+ case ArchMIPS64N32:
+ return "mips64n32"
+ case ArchMIPSEL:
+ return "mipsel"
+ case ArchMIPSEL64:
+ return "mipsel64"
+ case ArchMIPSEL64N32:
+ return "mipsel64n32"
+ case ArchPPC:
+ return "ppc"
+ case ArchPPC64:
+ return "ppc64"
+ case ArchPPC64LE:
+ return "ppc64le"
+ case ArchS390:
+ return "s390"
+ case ArchS390X:
+ return "s390x"
+ case ArchNative:
+ return "native"
+ case ArchInvalid:
+ return "Invalid architecture"
+ default:
+ return "Unknown architecture"
+ }
+}
+
+// String returns a string representation of a comparison operator constant
+func (a ScmpCompareOp) String() string {
+ switch a {
+ case CompareNotEqual:
+ return "Not equal"
+ case CompareLess:
+ return "Less than"
+ case CompareLessOrEqual:
+ return "Less than or equal to"
+ case CompareEqual:
+ return "Equal"
+ case CompareGreaterEqual:
+ return "Greater than or equal to"
+ case CompareGreater:
+ return "Greater than"
+ case CompareMaskedEqual:
+ return "Masked equality"
+ case CompareInvalid:
+ return "Invalid comparison operator"
+ default:
+ return "Unrecognized comparison operator"
+ }
+}
+
+// String returns a string representation of a seccomp match action
+func (a ScmpAction) String() string {
+ switch a & 0xFFFF {
+ case ActKill:
+ return "Action: Kill Process"
+ case ActTrap:
+ return "Action: Send SIGSYS"
+ case ActErrno:
+ return fmt.Sprintf("Action: Return error code %d", (a >> 16))
+ case ActTrace:
+ return fmt.Sprintf("Action: Notify tracing processes with code %d",
+ (a >> 16))
+ case ActAllow:
+ return "Action: Allow system call"
+ default:
+ return "Unrecognized Action"
+ }
+}
+
+// SetReturnCode adds a return code to a supporting ScmpAction, clearing any
+// existing code Only valid on ActErrno and ActTrace. Takes no action otherwise.
+// Accepts 16-bit return code as argument.
+// Returns a valid ScmpAction of the original type with the new error code set.
+func (a ScmpAction) SetReturnCode(code int16) ScmpAction {
+ aTmp := a & 0x0000FFFF
+ if aTmp == ActErrno || aTmp == ActTrace {
+ return (aTmp | (ScmpAction(code)&0xFFFF)<<16)
+ }
+ return a
+}
+
+// GetReturnCode returns the return code of an ScmpAction
+func (a ScmpAction) GetReturnCode() int16 {
+ return int16(a >> 16)
+}
+
+// General utility functions
+
+// GetLibraryVersion returns the version of the library the bindings are built
+// against.
+// The version is formatted as follows: Major.Minor.Micro
+func GetLibraryVersion() (major, minor, micro int) {
+ return verMajor, verMinor, verMicro
+}
+
+// Syscall functions
+
+// GetName retrieves the name of a syscall from its number.
+// Acts on any syscall number.
+// Returns either a string containing the name of the syscall, or an error.
+func (s ScmpSyscall) GetName() (string, error) {
+ return s.GetNameByArch(ArchNative)
+}
+
+// GetNameByArch retrieves the name of a syscall from its number for a given
+// architecture.
+// Acts on any syscall number.
+// Accepts a valid architecture constant.
+// Returns either a string containing the name of the syscall, or an error.
+// if the syscall is unrecognized or an issue occurred.
+func (s ScmpSyscall) GetNameByArch(arch ScmpArch) (string, error) {
+ if err := sanitizeArch(arch); err != nil {
+ return "", err
+ }
+
+ cString := C.seccomp_syscall_resolve_num_arch(arch.toNative(), C.int(s))
+ if cString == nil {
+ return "", fmt.Errorf("could not resolve syscall name")
+ }
+ defer C.free(unsafe.Pointer(cString))
+
+ finalStr := C.GoString(cString)
+ return finalStr, nil
+}
+
+// GetSyscallFromName returns the number of a syscall by name on the kernel's
+// native architecture.
+// Accepts a string containing the name of a syscall.
+// Returns the number of the syscall, or an error if no syscall with that name
+// was found.
+func GetSyscallFromName(name string) (ScmpSyscall, error) {
+ if err := ensureSupportedVersion(); err != nil {
+ return 0, err
+ }
+
+ cString := C.CString(name)
+ defer C.free(unsafe.Pointer(cString))
+
+ result := C.seccomp_syscall_resolve_name(cString)
+ if result == scmpError {
+ return 0, fmt.Errorf("could not resolve name to syscall")
+ }
+
+ return ScmpSyscall(result), nil
+}
+
+// GetSyscallFromNameByArch returns the number of a syscall by name for a given
+// architecture's ABI.
+// Accepts the name of a syscall and an architecture constant.
+// Returns the number of the syscall, or an error if an invalid architecture is
+// passed or a syscall with that name was not found.
+func GetSyscallFromNameByArch(name string, arch ScmpArch) (ScmpSyscall, error) {
+ if err := ensureSupportedVersion(); err != nil {
+ return 0, err
+ }
+ if err := sanitizeArch(arch); err != nil {
+ return 0, err
+ }
+
+ cString := C.CString(name)
+ defer C.free(unsafe.Pointer(cString))
+
+ result := C.seccomp_syscall_resolve_name_arch(arch.toNative(), cString)
+ if result == scmpError {
+ return 0, fmt.Errorf("could not resolve name to syscall")
+ }
+
+ return ScmpSyscall(result), nil
+}
+
+// MakeCondition creates and returns a new condition to attach to a filter rule.
+// Associated rules will only match if this condition is true.
+// Accepts the number the argument we are checking, and a comparison operator
+// and value to compare to.
+// The rule will match if argument $arg (zero-indexed) of the syscall is
+// $COMPARE_OP the provided comparison value.
+// Some comparison operators accept two values. Masked equals, for example,
+// will mask $arg of the syscall with the second value provided (via bitwise
+// AND) and then compare against the first value provided.
+// For example, in the less than or equal case, if the syscall argument was
+// 0 and the value provided was 1, the condition would match, as 0 is less
+// than or equal to 1.
+// Return either an error on bad argument or a valid ScmpCondition struct.
+func MakeCondition(arg uint, comparison ScmpCompareOp, values ...uint64) (ScmpCondition, error) {
+ var condStruct ScmpCondition
+
+ if err := ensureSupportedVersion(); err != nil {
+ return condStruct, err
+ }
+
+ if comparison == CompareInvalid {
+ return condStruct, fmt.Errorf("invalid comparison operator")
+ } else if arg > 5 {
+ return condStruct, fmt.Errorf("syscalls only have up to 6 arguments")
+ } else if len(values) > 2 {
+ return condStruct, fmt.Errorf("conditions can have at most 2 arguments")
+ } else if len(values) == 0 {
+ return condStruct, fmt.Errorf("must provide at least one value to compare against")
+ }
+
+ condStruct.Argument = arg
+ condStruct.Op = comparison
+ condStruct.Operand1 = values[0]
+ if len(values) == 2 {
+ condStruct.Operand2 = values[1]
+ } else {
+ condStruct.Operand2 = 0 // Unused
+ }
+
+ return condStruct, nil
+}
+
+// Utility Functions
+
+// GetNativeArch returns architecture token representing the native kernel
+// architecture
+func GetNativeArch() (ScmpArch, error) {
+ if err := ensureSupportedVersion(); err != nil {
+ return ArchInvalid, err
+ }
+
+ arch := C.seccomp_arch_native()
+
+ return archFromNative(arch)
+}
+
+// Public Filter API
+
+// ScmpFilter represents a filter context in libseccomp.
+// A filter context is initially empty. Rules can be added to it, and it can
+// then be loaded into the kernel.
+type ScmpFilter struct {
+ filterCtx C.scmp_filter_ctx
+ valid bool
+ lock sync.Mutex
+}
+
+// NewFilter creates and returns a new filter context.
+// Accepts a default action to be taken for syscalls which match no rules in
+// the filter.
+// Returns a reference to a valid filter context, or nil and an error if the
+// filter context could not be created or an invalid default action was given.
+func NewFilter(defaultAction ScmpAction) (*ScmpFilter, error) {
+ if err := ensureSupportedVersion(); err != nil {
+ return nil, err
+ }
+
+ if err := sanitizeAction(defaultAction); err != nil {
+ return nil, err
+ }
+
+ fPtr := C.seccomp_init(defaultAction.toNative())
+ if fPtr == nil {
+ return nil, fmt.Errorf("could not create filter")
+ }
+
+ filter := new(ScmpFilter)
+ filter.filterCtx = fPtr
+ filter.valid = true
+ runtime.SetFinalizer(filter, filterFinalizer)
+
+ return filter, nil
+}
+
+// IsValid determines whether a filter context is valid to use.
+// Some operations (Release and Merge) render filter contexts invalid and
+// consequently prevent further use.
+func (f *ScmpFilter) IsValid() bool {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ return f.valid
+}
+
+// Reset resets a filter context, removing all its existing state.
+// Accepts a new default action to be taken for syscalls which do not match.
+// Returns an error if the filter or action provided are invalid.
+func (f *ScmpFilter) Reset(defaultAction ScmpAction) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if err := sanitizeAction(defaultAction); err != nil {
+ return err
+ } else if !f.valid {
+ return errBadFilter
+ }
+
+ retCode := C.seccomp_reset(f.filterCtx, defaultAction.toNative())
+ if retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// Release releases a filter context, freeing its memory. Should be called after
+// loading into the kernel, when the filter is no longer needed.
+// After calling this function, the given filter is no longer valid and cannot
+// be used.
+// Release() will be invoked automatically when a filter context is garbage
+// collected, but can also be called manually to free memory.
+func (f *ScmpFilter) Release() {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.valid {
+ return
+ }
+
+ f.valid = false
+ C.seccomp_release(f.filterCtx)
+}
+
+// Merge merges two filter contexts.
+// The source filter src will be released as part of the process, and will no
+// longer be usable or valid after this call.
+// To be merged, filters must NOT share any architectures, and all their
+// attributes (Default Action, Bad Arch Action, No New Privs and TSync bools)
+// must match.
+// The filter src will be merged into the filter this is called on.
+// The architectures of the src filter not present in the destination, and all
+// associated rules, will be added to the destination.
+// Returns an error if merging the filters failed.
+func (f *ScmpFilter) Merge(src *ScmpFilter) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ src.lock.Lock()
+ defer src.lock.Unlock()
+
+ if !src.valid || !f.valid {
+ return fmt.Errorf("one or more of the filter contexts is invalid or uninitialized")
+ }
+
+ // Merge the filters
+ retCode := C.seccomp_merge(f.filterCtx, src.filterCtx)
+ if syscall.Errno(-1*retCode) == syscall.EINVAL {
+ return fmt.Errorf("filters could not be merged due to a mismatch in attributes or invalid filter")
+ } else if retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ src.valid = false
+
+ return nil
+}
+
+// IsArchPresent checks if an architecture is present in a filter.
+// If a filter contains an architecture, it uses its default action for
+// syscalls which do not match rules in it, and its rules can match syscalls
+// for that ABI.
+// If a filter does not contain an architecture, all syscalls made to that
+// kernel ABI will fail with the filter's default Bad Architecture Action
+// (by default, killing the process).
+// Accepts an architecture constant.
+// Returns true if the architecture is present in the filter, false otherwise,
+// and an error on an invalid filter context, architecture constant, or an
+// issue with the call to libseccomp.
+func (f *ScmpFilter) IsArchPresent(arch ScmpArch) (bool, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if err := sanitizeArch(arch); err != nil {
+ return false, err
+ } else if !f.valid {
+ return false, errBadFilter
+ }
+
+ retCode := C.seccomp_arch_exist(f.filterCtx, arch.toNative())
+ if syscall.Errno(-1*retCode) == syscall.EEXIST {
+ // -EEXIST is "arch not present"
+ return false, nil
+ } else if retCode != 0 {
+ return false, syscall.Errno(-1 * retCode)
+ }
+
+ return true, nil
+}
+
+// AddArch adds an architecture to the filter.
+// Accepts an architecture constant.
+// Returns an error on invalid filter context or architecture token, or an
+// issue with the call to libseccomp.
+func (f *ScmpFilter) AddArch(arch ScmpArch) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if err := sanitizeArch(arch); err != nil {
+ return err
+ } else if !f.valid {
+ return errBadFilter
+ }
+
+ // Libseccomp returns -EEXIST if the specified architecture is already
+ // present. Succeed silently in this case, as it's not fatal, and the
+ // architecture is present already.
+ retCode := C.seccomp_arch_add(f.filterCtx, arch.toNative())
+ if retCode != 0 && syscall.Errno(-1*retCode) != syscall.EEXIST {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// RemoveArch removes an architecture from the filter.
+// Accepts an architecture constant.
+// Returns an error on invalid filter context or architecture token, or an
+// issue with the call to libseccomp.
+func (f *ScmpFilter) RemoveArch(arch ScmpArch) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if err := sanitizeArch(arch); err != nil {
+ return err
+ } else if !f.valid {
+ return errBadFilter
+ }
+
+ // Similar to AddArch, -EEXIST is returned if the arch is not present
+ // Succeed silently in that case, this is not fatal and the architecture
+ // is not present in the filter after RemoveArch
+ retCode := C.seccomp_arch_remove(f.filterCtx, arch.toNative())
+ if retCode != 0 && syscall.Errno(-1*retCode) != syscall.EEXIST {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// Load loads a filter context into the kernel.
+// Returns an error if the filter context is invalid or the syscall failed.
+func (f *ScmpFilter) Load() error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.valid {
+ return errBadFilter
+ }
+
+ if retCode := C.seccomp_load(f.filterCtx); retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// GetDefaultAction returns the default action taken on a syscall which does not
+// match a rule in the filter, or an error if an issue was encountered
+// retrieving the value.
+func (f *ScmpFilter) GetDefaultAction() (ScmpAction, error) {
+ action, err := f.getFilterAttr(filterAttrActDefault)
+ if err != nil {
+ return 0x0, err
+ }
+
+ return actionFromNative(action)
+}
+
+// GetBadArchAction returns the default action taken on a syscall for an
+// architecture not in the filter, or an error if an issue was encountered
+// retrieving the value.
+func (f *ScmpFilter) GetBadArchAction() (ScmpAction, error) {
+ action, err := f.getFilterAttr(filterAttrActBadArch)
+ if err != nil {
+ return 0x0, err
+ }
+
+ return actionFromNative(action)
+}
+
+// GetNoNewPrivsBit returns the current state the No New Privileges bit will be set
+// to on the filter being loaded, or an error if an issue was encountered
+// retrieving the value.
+// The No New Privileges bit tells the kernel that new processes run with exec()
+// cannot gain more privileges than the process that ran exec().
+// For example, a process with No New Privileges set would be unable to exec
+// setuid/setgid executables.
+func (f *ScmpFilter) GetNoNewPrivsBit() (bool, error) {
+ noNewPrivs, err := f.getFilterAttr(filterAttrNNP)
+ if err != nil {
+ return false, err
+ }
+
+ if noNewPrivs == 0 {
+ return false, nil
+ }
+
+ return true, nil
+}
+
+// GetTsyncBit returns whether Thread Synchronization will be enabled on the
+// filter being loaded, or an error if an issue was encountered retrieving the
+// value.
+// Thread Sync ensures that all members of the thread group of the calling
+// process will share the same Seccomp filter set.
+// Tsync is a fairly recent addition to the Linux kernel and older kernels
+// lack support. If the running kernel does not support Tsync and it is
+// requested in a filter, Libseccomp will not enable TSync support and will
+// proceed as normal.
+// This function is unavailable before v2.2 of libseccomp and will return an
+// error.
+func (f *ScmpFilter) GetTsyncBit() (bool, error) {
+ tSync, err := f.getFilterAttr(filterAttrTsync)
+ if err != nil {
+ return false, err
+ }
+
+ if tSync == 0 {
+ return false, nil
+ }
+
+ return true, nil
+}
+
+// SetBadArchAction sets the default action taken on a syscall for an
+// architecture not in the filter, or an error if an issue was encountered
+// setting the value.
+func (f *ScmpFilter) SetBadArchAction(action ScmpAction) error {
+ if err := sanitizeAction(action); err != nil {
+ return err
+ }
+
+ return f.setFilterAttr(filterAttrActBadArch, action.toNative())
+}
+
+// SetNoNewPrivsBit sets the state of the No New Privileges bit, which will be
+// applied on filter load, or an error if an issue was encountered setting the
+// value.
+// Filters with No New Privileges set to 0 can only be loaded if the process
+// has the CAP_SYS_ADMIN capability.
+func (f *ScmpFilter) SetNoNewPrivsBit(state bool) error {
+ var toSet C.uint32_t = 0x0
+
+ if state {
+ toSet = 0x1
+ }
+
+ return f.setFilterAttr(filterAttrNNP, toSet)
+}
+
+// SetTsync sets whether Thread Synchronization will be enabled on the filter
+// being loaded. Returns an error if setting Tsync failed, or the filter is
+// invalid.
+// Thread Sync ensures that all members of the thread group of the calling
+// process will share the same Seccomp filter set.
+// Tsync is a fairly recent addition to the Linux kernel and older kernels
+// lack support. If the running kernel does not support Tsync and it is
+// requested in a filter, Libseccomp will not enable TSync support and will
+// proceed as normal.
+// This function is unavailable before v2.2 of libseccomp and will return an
+// error.
+func (f *ScmpFilter) SetTsync(enable bool) error {
+ var toSet C.uint32_t = 0x0
+
+ if enable {
+ toSet = 0x1
+ }
+
+ return f.setFilterAttr(filterAttrTsync, toSet)
+}
+
+// SetSyscallPriority sets a syscall's priority.
+// This provides a hint to the filter generator in libseccomp about the
+// importance of this syscall. High-priority syscalls are placed
+// first in the filter code, and incur less overhead (at the expense of
+// lower-priority syscalls).
+func (f *ScmpFilter) SetSyscallPriority(call ScmpSyscall, priority uint8) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.valid {
+ return errBadFilter
+ }
+
+ if retCode := C.seccomp_syscall_priority(f.filterCtx, C.int(call),
+ C.uint8_t(priority)); retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// AddRule adds a single rule for an unconditional action on a syscall.
+// Accepts the number of the syscall and the action to be taken on the call
+// being made.
+// Returns an error if an issue was encountered adding the rule.
+func (f *ScmpFilter) AddRule(call ScmpSyscall, action ScmpAction) error {
+ return f.addRuleGeneric(call, action, false, nil)
+}
+
+// AddRuleExact adds a single rule for an unconditional action on a syscall.
+// Accepts the number of the syscall and the action to be taken on the call
+// being made.
+// No modifications will be made to the rule, and it will fail to add if it
+// cannot be applied to the current architecture without modification.
+// The rule will function exactly as described, but it may not function identically
+// (or be able to be applied to) all architectures.
+// Returns an error if an issue was encountered adding the rule.
+func (f *ScmpFilter) AddRuleExact(call ScmpSyscall, action ScmpAction) error {
+ return f.addRuleGeneric(call, action, true, nil)
+}
+
+// AddRuleConditional adds a single rule for a conditional action on a syscall.
+// Returns an error if an issue was encountered adding the rule.
+// All conditions must match for the rule to match.
+// There is a bug in library versions below v2.2.1 which can, in some cases,
+// cause conditions to be lost when more than one are used. Consequently,
+// AddRuleConditional is disabled on library versions lower than v2.2.1
+func (f *ScmpFilter) AddRuleConditional(call ScmpSyscall, action ScmpAction, conds []ScmpCondition) error {
+ return f.addRuleGeneric(call, action, false, conds)
+}
+
+// AddRuleConditionalExact adds a single rule for a conditional action on a
+// syscall.
+// No modifications will be made to the rule, and it will fail to add if it
+// cannot be applied to the current architecture without modification.
+// The rule will function exactly as described, but it may not function identically
+// (or be able to be applied to) all architectures.
+// Returns an error if an issue was encountered adding the rule.
+// There is a bug in library versions below v2.2.1 which can, in some cases,
+// cause conditions to be lost when more than one are used. Consequently,
+// AddRuleConditionalExact is disabled on library versions lower than v2.2.1
+func (f *ScmpFilter) AddRuleConditionalExact(call ScmpSyscall, action ScmpAction, conds []ScmpCondition) error {
+ return f.addRuleGeneric(call, action, true, conds)
+}
+
+// ExportPFC output PFC-formatted, human-readable dump of a filter context's
+// rules to a file.
+// Accepts file to write to (must be open for writing).
+// Returns an error if writing to the file fails.
+func (f *ScmpFilter) ExportPFC(file *os.File) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ fd := file.Fd()
+
+ if !f.valid {
+ return errBadFilter
+ }
+
+ if retCode := C.seccomp_export_pfc(f.filterCtx, C.int(fd)); retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// ExportBPF outputs Berkeley Packet Filter-formatted, kernel-readable dump of a
+// filter context's rules to a file.
+// Accepts file to write to (must be open for writing).
+// Returns an error if writing to the file fails.
+func (f *ScmpFilter) ExportBPF(file *os.File) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ fd := file.Fd()
+
+ if !f.valid {
+ return errBadFilter
+ }
+
+ if retCode := C.seccomp_export_bpf(f.filterCtx, C.int(fd)); retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go
new file mode 100644
index 000000000..b0caac91b
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go
@@ -0,0 +1,514 @@
+// +build linux
+
+// Internal functions for libseccomp Go bindings
+// No exported functions
+
+package seccomp
+
+import (
+ "fmt"
+ "syscall"
+)
+
+// Unexported C wrapping code - provides the C-Golang interface
+// Get the seccomp header in scope
+// Need stdlib.h for free() on cstrings
+
+// #cgo pkg-config: libseccomp
+/*
+#include <stdlib.h>
+#include <seccomp.h>
+
+#if SCMP_VER_MAJOR < 2
+#error Minimum supported version of Libseccomp is v2.1.0
+#elif SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 1
+#error Minimum supported version of Libseccomp is v2.1.0
+#endif
+
+#define ARCH_BAD ~0
+
+const uint32_t C_ARCH_BAD = ARCH_BAD;
+
+#ifndef SCMP_ARCH_AARCH64
+#define SCMP_ARCH_AARCH64 ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_MIPS
+#define SCMP_ARCH_MIPS ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_MIPS64
+#define SCMP_ARCH_MIPS64 ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_MIPS64N32
+#define SCMP_ARCH_MIPS64N32 ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_MIPSEL
+#define SCMP_ARCH_MIPSEL ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_MIPSEL64
+#define SCMP_ARCH_MIPSEL64 ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_MIPSEL64N32
+#define SCMP_ARCH_MIPSEL64N32 ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_PPC
+#define SCMP_ARCH_PPC ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_PPC64
+#define SCMP_ARCH_PPC64 ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_PPC64LE
+#define SCMP_ARCH_PPC64LE ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_S390
+#define SCMP_ARCH_S390 ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_S390X
+#define SCMP_ARCH_S390X ARCH_BAD
+#endif
+
+const uint32_t C_ARCH_NATIVE = SCMP_ARCH_NATIVE;
+const uint32_t C_ARCH_X86 = SCMP_ARCH_X86;
+const uint32_t C_ARCH_X86_64 = SCMP_ARCH_X86_64;
+const uint32_t C_ARCH_X32 = SCMP_ARCH_X32;
+const uint32_t C_ARCH_ARM = SCMP_ARCH_ARM;
+const uint32_t C_ARCH_AARCH64 = SCMP_ARCH_AARCH64;
+const uint32_t C_ARCH_MIPS = SCMP_ARCH_MIPS;
+const uint32_t C_ARCH_MIPS64 = SCMP_ARCH_MIPS64;
+const uint32_t C_ARCH_MIPS64N32 = SCMP_ARCH_MIPS64N32;
+const uint32_t C_ARCH_MIPSEL = SCMP_ARCH_MIPSEL;
+const uint32_t C_ARCH_MIPSEL64 = SCMP_ARCH_MIPSEL64;
+const uint32_t C_ARCH_MIPSEL64N32 = SCMP_ARCH_MIPSEL64N32;
+const uint32_t C_ARCH_PPC = SCMP_ARCH_PPC;
+const uint32_t C_ARCH_PPC64 = SCMP_ARCH_PPC64;
+const uint32_t C_ARCH_PPC64LE = SCMP_ARCH_PPC64LE;
+const uint32_t C_ARCH_S390 = SCMP_ARCH_S390;
+const uint32_t C_ARCH_S390X = SCMP_ARCH_S390X;
+
+const uint32_t C_ACT_KILL = SCMP_ACT_KILL;
+const uint32_t C_ACT_TRAP = SCMP_ACT_TRAP;
+const uint32_t C_ACT_ERRNO = SCMP_ACT_ERRNO(0);
+const uint32_t C_ACT_TRACE = SCMP_ACT_TRACE(0);
+const uint32_t C_ACT_ALLOW = SCMP_ACT_ALLOW;
+
+// If TSync is not supported, make sure it doesn't map to a supported filter attribute
+// Don't worry about major version < 2, the minimum version checks should catch that case
+#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 2
+#define SCMP_FLTATR_CTL_TSYNC _SCMP_CMP_MIN
+#endif
+
+const uint32_t C_ATTRIBUTE_DEFAULT = (uint32_t)SCMP_FLTATR_ACT_DEFAULT;
+const uint32_t C_ATTRIBUTE_BADARCH = (uint32_t)SCMP_FLTATR_ACT_BADARCH;
+const uint32_t C_ATTRIBUTE_NNP = (uint32_t)SCMP_FLTATR_CTL_NNP;
+const uint32_t C_ATTRIBUTE_TSYNC = (uint32_t)SCMP_FLTATR_CTL_TSYNC;
+
+const int C_CMP_NE = (int)SCMP_CMP_NE;
+const int C_CMP_LT = (int)SCMP_CMP_LT;
+const int C_CMP_LE = (int)SCMP_CMP_LE;
+const int C_CMP_EQ = (int)SCMP_CMP_EQ;
+const int C_CMP_GE = (int)SCMP_CMP_GE;
+const int C_CMP_GT = (int)SCMP_CMP_GT;
+const int C_CMP_MASKED_EQ = (int)SCMP_CMP_MASKED_EQ;
+
+const int C_VERSION_MAJOR = SCMP_VER_MAJOR;
+const int C_VERSION_MINOR = SCMP_VER_MINOR;
+const int C_VERSION_MICRO = SCMP_VER_MICRO;
+
+typedef struct scmp_arg_cmp* scmp_cast_t;
+
+// Wrapper to create an scmp_arg_cmp struct
+void*
+make_struct_arg_cmp(
+ unsigned int arg,
+ int compare,
+ uint64_t a,
+ uint64_t b
+ )
+{
+ struct scmp_arg_cmp *s = malloc(sizeof(struct scmp_arg_cmp));
+
+ s->arg = arg;
+ s->op = compare;
+ s->datum_a = a;
+ s->datum_b = b;
+
+ return s;
+}
+*/
+import "C"
+
+// Nonexported types
+type scmpFilterAttr uint32
+
+// Nonexported constants
+
+const (
+ filterAttrActDefault scmpFilterAttr = iota
+ filterAttrActBadArch scmpFilterAttr = iota
+ filterAttrNNP scmpFilterAttr = iota
+ filterAttrTsync scmpFilterAttr = iota
+)
+
+const (
+ // An error return from certain libseccomp functions
+ scmpError C.int = -1
+ // Comparison boundaries to check for architecture validity
+ archStart ScmpArch = ArchNative
+ archEnd ScmpArch = ArchS390X
+ // Comparison boundaries to check for action validity
+ actionStart ScmpAction = ActKill
+ actionEnd ScmpAction = ActAllow
+ // Comparison boundaries to check for comparison operator validity
+ compareOpStart ScmpCompareOp = CompareNotEqual
+ compareOpEnd ScmpCompareOp = CompareMaskedEqual
+)
+
+var (
+ // Error thrown on bad filter context
+ errBadFilter = fmt.Errorf("filter is invalid or uninitialized")
+ // Constants representing library major, minor, and micro versions
+ verMajor = int(C.C_VERSION_MAJOR)
+ verMinor = int(C.C_VERSION_MINOR)
+ verMicro = int(C.C_VERSION_MICRO)
+)
+
+// Nonexported functions
+
+// Check if library version is greater than or equal to the given one
+func checkVersionAbove(major, minor, micro int) bool {
+ return (verMajor > major) ||
+ (verMajor == major && verMinor > minor) ||
+ (verMajor == major && verMinor == minor && verMicro >= micro)
+}
+
+// Ensure that the library is supported, i.e. >= 2.1.0.
+func ensureSupportedVersion() error {
+ if !checkVersionAbove(2, 1, 0) {
+ return VersionError{}
+ }
+ return nil
+}
+
+// Filter helpers
+
+// Filter finalizer - ensure that kernel context for filters is freed
+func filterFinalizer(f *ScmpFilter) {
+ f.Release()
+}
+
+// Get a raw filter attribute
+func (f *ScmpFilter) getFilterAttr(attr scmpFilterAttr) (C.uint32_t, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.valid {
+ return 0x0, errBadFilter
+ }
+
+ if !checkVersionAbove(2, 2, 0) && attr == filterAttrTsync {
+ return 0x0, VersionError{
+ message: "thread synchronization attribute is not supported",
+ minimum: "2.2.0",
+ }
+ }
+
+ var attribute C.uint32_t
+
+ retCode := C.seccomp_attr_get(f.filterCtx, attr.toNative(), &attribute)
+ if retCode != 0 {
+ return 0x0, syscall.Errno(-1 * retCode)
+ }
+
+ return attribute, nil
+}
+
+// Set a raw filter attribute
+func (f *ScmpFilter) setFilterAttr(attr scmpFilterAttr, value C.uint32_t) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.valid {
+ return errBadFilter
+ }
+
+ if !checkVersionAbove(2, 2, 0) && attr == filterAttrTsync {
+ return VersionError{
+ message: "thread synchronization attribute is not supported",
+ minimum: "2.2.0",
+ }
+ }
+
+ retCode := C.seccomp_attr_set(f.filterCtx, attr.toNative(), value)
+ if retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// DOES NOT LOCK OR CHECK VALIDITY
+// Assumes caller has already done this
+// Wrapper for seccomp_rule_add_... functions
+func (f *ScmpFilter) addRuleWrapper(call ScmpSyscall, action ScmpAction, exact bool, cond C.scmp_cast_t) error {
+ var length C.uint
+ if cond != nil {
+ length = 1
+ } else {
+ length = 0
+ }
+
+ var retCode C.int
+ if exact {
+ retCode = C.seccomp_rule_add_exact_array(f.filterCtx, action.toNative(), C.int(call), length, cond)
+ } else {
+ retCode = C.seccomp_rule_add_array(f.filterCtx, action.toNative(), C.int(call), length, cond)
+ }
+
+ if syscall.Errno(-1*retCode) == syscall.EFAULT {
+ return fmt.Errorf("unrecognized syscall")
+ } else if syscall.Errno(-1*retCode) == syscall.EPERM {
+ return fmt.Errorf("requested action matches default action of filter")
+ } else if retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// Generic add function for filter rules
+func (f *ScmpFilter) addRuleGeneric(call ScmpSyscall, action ScmpAction, exact bool, conds []ScmpCondition) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.valid {
+ return errBadFilter
+ }
+
+ if len(conds) == 0 {
+ if err := f.addRuleWrapper(call, action, exact, nil); err != nil {
+ return err
+ }
+ } else {
+ // We don't support conditional filtering in library version v2.1
+ if !checkVersionAbove(2, 2, 1) {
+ return VersionError{
+ message: "conditional filtering is not supported",
+ minimum: "2.2.1",
+ }
+ }
+
+ for _, cond := range conds {
+ cmpStruct := C.make_struct_arg_cmp(C.uint(cond.Argument), cond.Op.toNative(), C.uint64_t(cond.Operand1), C.uint64_t(cond.Operand2))
+ defer C.free(cmpStruct)
+
+ if err := f.addRuleWrapper(call, action, exact, C.scmp_cast_t(cmpStruct)); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// Generic Helpers
+
+// Helper - Sanitize Arch token input
+func sanitizeArch(in ScmpArch) error {
+ if in < archStart || in > archEnd {
+ return fmt.Errorf("unrecognized architecture")
+ }
+
+ if in.toNative() == C.C_ARCH_BAD {
+ return fmt.Errorf("architecture is not supported on this version of the library")
+ }
+
+ return nil
+}
+
+func sanitizeAction(in ScmpAction) error {
+ inTmp := in & 0x0000FFFF
+ if inTmp < actionStart || inTmp > actionEnd {
+ return fmt.Errorf("unrecognized action")
+ }
+
+ if inTmp != ActTrace && inTmp != ActErrno && (in&0xFFFF0000) != 0 {
+ return fmt.Errorf("highest 16 bits must be zeroed except for Trace and Errno")
+ }
+
+ return nil
+}
+
+func sanitizeCompareOp(in ScmpCompareOp) error {
+ if in < compareOpStart || in > compareOpEnd {
+ return fmt.Errorf("unrecognized comparison operator")
+ }
+
+ return nil
+}
+
+func archFromNative(a C.uint32_t) (ScmpArch, error) {
+ switch a {
+ case C.C_ARCH_X86:
+ return ArchX86, nil
+ case C.C_ARCH_X86_64:
+ return ArchAMD64, nil
+ case C.C_ARCH_X32:
+ return ArchX32, nil
+ case C.C_ARCH_ARM:
+ return ArchARM, nil
+ case C.C_ARCH_NATIVE:
+ return ArchNative, nil
+ case C.C_ARCH_AARCH64:
+ return ArchARM64, nil
+ case C.C_ARCH_MIPS:
+ return ArchMIPS, nil
+ case C.C_ARCH_MIPS64:
+ return ArchMIPS64, nil
+ case C.C_ARCH_MIPS64N32:
+ return ArchMIPS64N32, nil
+ case C.C_ARCH_MIPSEL:
+ return ArchMIPSEL, nil
+ case C.C_ARCH_MIPSEL64:
+ return ArchMIPSEL64, nil
+ case C.C_ARCH_MIPSEL64N32:
+ return ArchMIPSEL64N32, nil
+ case C.C_ARCH_PPC:
+ return ArchPPC, nil
+ case C.C_ARCH_PPC64:
+ return ArchPPC64, nil
+ case C.C_ARCH_PPC64LE:
+ return ArchPPC64LE, nil
+ case C.C_ARCH_S390:
+ return ArchS390, nil
+ case C.C_ARCH_S390X:
+ return ArchS390X, nil
+ default:
+ return 0x0, fmt.Errorf("unrecognized architecture")
+ }
+}
+
+// Only use with sanitized arches, no error handling
+func (a ScmpArch) toNative() C.uint32_t {
+ switch a {
+ case ArchX86:
+ return C.C_ARCH_X86
+ case ArchAMD64:
+ return C.C_ARCH_X86_64
+ case ArchX32:
+ return C.C_ARCH_X32
+ case ArchARM:
+ return C.C_ARCH_ARM
+ case ArchARM64:
+ return C.C_ARCH_AARCH64
+ case ArchMIPS:
+ return C.C_ARCH_MIPS
+ case ArchMIPS64:
+ return C.C_ARCH_MIPS64
+ case ArchMIPS64N32:
+ return C.C_ARCH_MIPS64N32
+ case ArchMIPSEL:
+ return C.C_ARCH_MIPSEL
+ case ArchMIPSEL64:
+ return C.C_ARCH_MIPSEL64
+ case ArchMIPSEL64N32:
+ return C.C_ARCH_MIPSEL64N32
+ case ArchPPC:
+ return C.C_ARCH_PPC
+ case ArchPPC64:
+ return C.C_ARCH_PPC64
+ case ArchPPC64LE:
+ return C.C_ARCH_PPC64LE
+ case ArchS390:
+ return C.C_ARCH_S390
+ case ArchS390X:
+ return C.C_ARCH_S390X
+ case ArchNative:
+ return C.C_ARCH_NATIVE
+ default:
+ return 0x0
+ }
+}
+
+// Only use with sanitized ops, no error handling
+func (a ScmpCompareOp) toNative() C.int {
+ switch a {
+ case CompareNotEqual:
+ return C.C_CMP_NE
+ case CompareLess:
+ return C.C_CMP_LT
+ case CompareLessOrEqual:
+ return C.C_CMP_LE
+ case CompareEqual:
+ return C.C_CMP_EQ
+ case CompareGreaterEqual:
+ return C.C_CMP_GE
+ case CompareGreater:
+ return C.C_CMP_GT
+ case CompareMaskedEqual:
+ return C.C_CMP_MASKED_EQ
+ default:
+ return 0x0
+ }
+}
+
+func actionFromNative(a C.uint32_t) (ScmpAction, error) {
+ aTmp := a & 0xFFFF
+ switch a & 0xFFFF0000 {
+ case C.C_ACT_KILL:
+ return ActKill, nil
+ case C.C_ACT_TRAP:
+ return ActTrap, nil
+ case C.C_ACT_ERRNO:
+ return ActErrno.SetReturnCode(int16(aTmp)), nil
+ case C.C_ACT_TRACE:
+ return ActTrace.SetReturnCode(int16(aTmp)), nil
+ case C.C_ACT_ALLOW:
+ return ActAllow, nil
+ default:
+ return 0x0, fmt.Errorf("unrecognized action")
+ }
+}
+
+// Only use with sanitized actions, no error handling
+func (a ScmpAction) toNative() C.uint32_t {
+ switch a & 0xFFFF {
+ case ActKill:
+ return C.C_ACT_KILL
+ case ActTrap:
+ return C.C_ACT_TRAP
+ case ActErrno:
+ return C.C_ACT_ERRNO | (C.uint32_t(a) >> 16)
+ case ActTrace:
+ return C.C_ACT_TRACE | (C.uint32_t(a) >> 16)
+ case ActAllow:
+ return C.C_ACT_ALLOW
+ default:
+ return 0x0
+ }
+}
+
+// Internal only, assumes safe attribute
+func (a scmpFilterAttr) toNative() uint32 {
+ switch a {
+ case filterAttrActDefault:
+ return uint32(C.C_ATTRIBUTE_DEFAULT)
+ case filterAttrActBadArch:
+ return uint32(C.C_ATTRIBUTE_BADARCH)
+ case filterAttrNNP:
+ return uint32(C.C_ATTRIBUTE_NNP)
+ case filterAttrTsync:
+ return uint32(C.C_ATTRIBUTE_TSYNC)
+ default:
+ return 0x0
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE
new file mode 100644
index 000000000..f090cb42f
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md
new file mode 100644
index 000000000..cbe8b6962
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/README.md
@@ -0,0 +1,501 @@
+# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0). Logrus itself is completely stable and has been used in
+many large deployments. The core API is unlikely to change much but please
+version control your Logrus to make sure you aren't fetching latest `master` on
+every build.**
+
+**Seeing weird case-sensitive problems?** Unfortunately, the author failed to
+realize the consequences of renaming to lower-case. Due to the Go package
+environment, this caused issues. Regretfully, there's no turning back now.
+Everything using `logrus` will need to use the lower-case:
+`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
+
+I am terribly sorry for this inconvenience. Logrus strives hard for backwards
+compatibility, and the author failed to realize the cascading consequences of
+such a name-change. To fix Glide, see [these
+comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
+```
+
+#### Case-sensitivity
+
+The organization's name was changed to lower-case--and this will not be changed
+back. If you are getting import conflicts due to case sensitivity, please use
+the lower-case import: `github.com/sirupsen/logrus`.
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/sirupsen/logrus"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Output to stdout instead of the default stderr
+ // Can be any io.Writer, see below for File example
+ log.SetOutput(os.Stdout)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+
+ // A common pattern is to re-use fields between logging statements by re-using
+ // the logrus.Entry returned from WithFields()
+ contextLogger := log.WithFields(log.Fields{
+ "common": "this is a common field",
+ "other": "I also should be logged always",
+ })
+
+ contextLogger.Info("I'll be logged with common and other field")
+ contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "os"
+ "github.com/sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stdout
+
+ // You could set this to any `io.Writer` such as a file
+ // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
+ // if err == nil {
+ // log.Out = file
+ // } else {
+ // log.Info("Failed to log to file, using default stderr")
+ // }
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging through logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Default Fields
+
+Often it's helpful to have fields _always_ attached to log statements in an
+application or parts of one. For example, you may want to always log the
+`request_id` and `user_ip` in the context of a request. Instead of writing
+`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
+every line, you can create a `logrus.Entry` to pass around instead:
+
+```go
+requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
+requestLogger.Info("something happened on that request") # will log request_id and user_ip
+requestLogger.Warn("something not great happened")
+```
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+ log "github.com/sirupsen/logrus"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+ logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+| Hook | Description |
+| ----- | ----------- |
+| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
+| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
+| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
+| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
+| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
+| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
+| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
+| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
+| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
+| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
+| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
+| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
+| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
+| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
+| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
+| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
+| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
+| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
+| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
+| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
+| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/sirupsen/logrus"
+)
+
+init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(&log.JSONFormatter{})
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(&log.TextFormatter{})
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`. For Windows, see
+ [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
+ * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
+* `logrus.JSONFormatter`. Logs fields as JSON.
+ * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
+
+Third party logging formatters:
+
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+ // create a stdlib log.Logger that writes to
+ // logrus.Logger.
+ ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+This means that we can override the standard library logger easily:
+
+```go
+logger := logrus.New()
+logger.Formatter = &logrus.JSONFormatter{}
+
+// Use logrus for standard log output
+// Note that `log` here references stdlib's log
+// Not logrus imported under the name `log`.
+log.SetOutput(logger.Writer())
+```
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+import(
+ "github.com/sirupsen/logrus"
+ "github.com/sirupsen/logrus/hooks/null"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestSomething(t*testing.T){
+ logger, hook := null.NewNullLogger()
+ logger.Error("Helloerror")
+
+ assert.Equal(t, 1, len(hook.Entries))
+ assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
+ assert.Equal(t, "Helloerror", hook.LastEntry().Message)
+
+ hook.Reset()
+ assert.Nil(t, hook.LastEntry())
+}
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+ // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safety
+
+By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+ 1) logger.Out is protected by locks.
+
+ 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
+
+ (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go
new file mode 100644
index 000000000..8af90637a
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/alt_exit.go
@@ -0,0 +1,64 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://github.com/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+ "fmt"
+ "os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+ defer func() {
+ if err := recover(); err != nil {
+ fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+ }
+ }()
+
+ handler()
+}
+
+func runHandlers() {
+ for _, handler := range handlers {
+ runHandler(handler)
+ }
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+ runHandlers()
+ os.Exit(code)
+}
+
+// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
+// all handlers. The handlers will also be invoked when any Fatal log entry is
+// made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+ handlers = append(handlers, handler)
+}
diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go
new file mode 100644
index 000000000..da67aba06
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+ package main
+
+ import (
+ log "github.com/sirupsen/logrus"
+ )
+
+ func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "number": 1,
+ "size": 10,
+ }).Info("A walrus appears")
+ }
+
+Output:
+ time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/sirupsen/logrus
+*/
+package logrus
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
new file mode 100644
index 000000000..320e5d5b8
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -0,0 +1,275 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "sync"
+ "time"
+)
+
+var bufferPool *sync.Pool
+
+func init() {
+ bufferPool = &sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+ }
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+ Level Level
+
+ // Message passed to Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+
+ // When formatter is called in entry.log(), an Buffer may be set to entry
+ Buffer *bytes.Buffer
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, give a little extra room
+ Data: make(Fields, 5),
+ }
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ if err != nil {
+ return "", err
+ }
+ str := string(serialized)
+ return str, nil
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+ return entry.WithField(ErrorKey, err)
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := make(Fields, len(entry.Data)+len(fields))
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ for k, v := range fields {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data}
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+ var buffer *bytes.Buffer
+ entry.Time = time.Now()
+ entry.Level = level
+ entry.Message = msg
+
+ if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+ buffer = bufferPool.Get().(*bytes.Buffer)
+ buffer.Reset()
+ defer bufferPool.Put(buffer)
+ entry.Buffer = buffer
+ serialized, err := entry.Logger.Formatter.Format(&entry)
+ entry.Buffer = nil
+ if err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ entry.Logger.mu.Unlock()
+ } else {
+ entry.Logger.mu.Lock()
+ _, err = entry.Logger.Out.Write(serialized)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+ entry.Logger.mu.Unlock()
+ }
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(&entry)
+ }
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ if entry.Logger.level() >= DebugLevel {
+ entry.log(DebugLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ if entry.Logger.level() >= InfoLevel {
+ entry.log(InfoLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ if entry.Logger.level() >= WarnLevel {
+ entry.log(WarnLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+ entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ if entry.Logger.level() >= ErrorLevel {
+ entry.log(ErrorLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ if entry.Logger.level() >= FatalLevel {
+ entry.log(FatalLevel, fmt.Sprint(args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ if entry.Logger.level() >= PanicLevel {
+ entry.log(PanicLevel, fmt.Sprint(args...))
+ }
+ panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ if entry.Logger.level() >= DebugLevel {
+ entry.Debug(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ if entry.Logger.level() >= InfoLevel {
+ entry.Info(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ if entry.Logger.level() >= WarnLevel {
+ entry.Warn(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ if entry.Logger.level() >= ErrorLevel {
+ entry.Error(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ if entry.Logger.level() >= FatalLevel {
+ entry.Fatal(fmt.Sprintf(format, args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ if entry.Logger.level() >= PanicLevel {
+ entry.Panic(fmt.Sprintf(format, args...))
+ }
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ if entry.Logger.level() >= DebugLevel {
+ entry.Debug(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ if entry.Logger.level() >= InfoLevel {
+ entry.Info(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ if entry.Logger.level() >= WarnLevel {
+ entry.Warn(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ if entry.Logger.level() >= ErrorLevel {
+ entry.Error(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ if entry.Logger.level() >= FatalLevel {
+ entry.Fatal(entry.sprintlnn(args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ if entry.Logger.level() >= PanicLevel {
+ entry.Panic(entry.sprintlnn(args...))
+ }
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go
new file mode 100644
index 000000000..1aeaa90ba
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/exported.go
@@ -0,0 +1,193 @@
+package logrus
+
+import (
+ "io"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+func StandardLogger() *Logger {
+ return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.setLevel(level)
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ return std.level()
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Hooks.Add(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+ return std.WithField(ErrorKey, err)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go
new file mode 100644
index 000000000..b5fbe934d
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/formatter.go
@@ -0,0 +1,45 @@
+package logrus
+
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+ if t, ok := data["time"]; ok {
+ data["fields.time"] = t
+ }
+
+ if m, ok := data["msg"]; ok {
+ data["fields.msg"] = m
+ }
+
+ if l, ok := data["level"]; ok {
+ data["fields.level"] = l
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go
new file mode 100644
index 000000000..3f151cdc3
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
new file mode 100644
index 000000000..e787ea175
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/json_formatter.go
@@ -0,0 +1,74 @@
+package logrus
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type fieldKey string
+type FieldMap map[fieldKey]string
+
+const (
+ FieldKeyMsg = "msg"
+ FieldKeyLevel = "level"
+ FieldKeyTime = "time"
+)
+
+func (f FieldMap) resolve(key fieldKey) string {
+ if k, ok := f[key]; ok {
+ return k
+ }
+
+ return string(key)
+}
+
+type JSONFormatter struct {
+ // TimestampFormat sets the format used for marshaling timestamps.
+ TimestampFormat string
+
+ // DisableTimestamp allows disabling automatic timestamps in output
+ DisableTimestamp bool
+
+ // FieldMap allows users to customize the names of keys for various fields.
+ // As an example:
+ // formatter := &JSONFormatter{
+ // FieldMap: FieldMap{
+ // FieldKeyTime: "@timestamp",
+ // FieldKeyLevel: "@level",
+ // FieldKeyMsg: "@message",
+ // },
+ // }
+ FieldMap FieldMap
+}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields, len(entry.Data)+3)
+ for k, v := range entry.Data {
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
+ data[k] = v
+ }
+ }
+ prefixFieldClashes(data)
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+
+ if !f.DisableTimestamp {
+ data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+ }
+ data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+ data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
+
+ serialized, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
new file mode 100644
index 000000000..370fff5d1
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logger.go
@@ -0,0 +1,317 @@
+package logrus
+
+import (
+ "io"
+ "os"
+ "sync"
+ "sync/atomic"
+)
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stderr`. You can also set this to
+ // something more adventorous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks LevelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged. `logrus.Debug` is useful in
+ Level Level
+ // Used to sync writing to the log. Locking is enabled by Default
+ mu MutexWrap
+ // Reusable empty entry
+ entryPool sync.Pool
+}
+
+type MutexWrap struct {
+ lock sync.Mutex
+ disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+ if !mw.disabled {
+ mw.lock.Lock()
+ }
+}
+
+func (mw *MutexWrap) Unlock() {
+ if !mw.disabled {
+ mw.lock.Unlock()
+ }
+}
+
+func (mw *MutexWrap) Disable() {
+ mw.disabled = true
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &Logger{
+// Out: os.Stderr,
+// Formatter: new(JSONFormatter),
+// Hooks: make(LevelHooks),
+// Level: logrus.DebugLevel,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stderr,
+ Formatter: new(TextFormatter),
+ Hooks: make(LevelHooks),
+ Level: InfoLevel,
+ }
+}
+
+func (logger *Logger) newEntry() *Entry {
+ entry, ok := logger.entryPool.Get().(*Entry)
+ if ok {
+ return entry
+ }
+ return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+ logger.entryPool.Put(entry)
+}
+
+// Adds a field to the log entry, note that it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry. All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithError(err)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ if logger.level() >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debugf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ if logger.level() >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Infof(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Printf(format, args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ if logger.level() >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ if logger.level() >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ if logger.level() >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Errorf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ if logger.level() >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatalf(format, args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ if logger.level() >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panicf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ if logger.level() >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debug(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ if logger.level() >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Info(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Info(args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ if logger.level() >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warn(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ if logger.level() >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warn(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ if logger.level() >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Error(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ if logger.level() >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatal(args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ if logger.level() >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panic(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ if logger.level() >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debugln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ if logger.level() >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Infoln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Println(args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ if logger.level() >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ if logger.level() >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ if logger.level() >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Errorln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ if logger.level() >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatalln(args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ if logger.level() >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panicln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+ logger.mu.Disable()
+}
+
+func (logger *Logger) level() Level {
+ return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
+}
+
+func (logger *Logger) setLevel(level Level) {
+ atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
+}
diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go
new file mode 100644
index 000000000..dd3899974
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logrus.go
@@ -0,0 +1,143 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+ "strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint32
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ switch level {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warning"
+ case ErrorLevel:
+ return "error"
+ case FatalLevel:
+ return "fatal"
+ case PanicLevel:
+ return "panic"
+ }
+
+ return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch strings.ToLower(lvl) {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+ PanicLevel,
+ FatalLevel,
+ ErrorLevel,
+ WarnLevel,
+ InfoLevel,
+ DebugLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+ _ StdLogger = &log.Logger{}
+ _ StdLogger = &Entry{}
+ _ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+ WithField(key string, value interface{}) *Entry
+ WithFields(fields Fields) *Entry
+ WithError(err error) *Entry
+
+ Debugf(format string, args ...interface{})
+ Infof(format string, args ...interface{})
+ Printf(format string, args ...interface{})
+ Warnf(format string, args ...interface{})
+ Warningf(format string, args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Panicf(format string, args ...interface{})
+
+ Debug(args ...interface{})
+ Info(args ...interface{})
+ Print(args ...interface{})
+ Warn(args ...interface{})
+ Warning(args ...interface{})
+ Error(args ...interface{})
+ Fatal(args ...interface{})
+ Panic(args ...interface{})
+
+ Debugln(args ...interface{})
+ Infoln(args ...interface{})
+ Println(args ...interface{})
+ Warnln(args ...interface{})
+ Warningln(args ...interface{})
+ Errorln(args ...interface{})
+ Fatalln(args ...interface{})
+ Panicln(args ...interface{})
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_appengine.go
new file mode 100644
index 000000000..e011a8694
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_appengine.go
@@ -0,0 +1,10 @@
+// +build appengine
+
+package logrus
+
+import "io"
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal(f io.Writer) bool {
+ return true
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_bsd.go
new file mode 100644
index 000000000..5f6be4d3c
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_bsd.go
@@ -0,0 +1,10 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/vendor/github.com/sirupsen/logrus/terminal_linux.go b/vendor/github.com/sirupsen/logrus/terminal_linux.go
new file mode 100644
index 000000000..308160ca8
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,14 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/vendor/github.com/sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 000000000..190297abf
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,28 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import (
+ "io"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal(f io.Writer) bool {
+ var termios Termios
+ switch v := f.(type) {
+ case *os.File:
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_solaris.go
new file mode 100644
index 000000000..3c86b1abe
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_solaris.go
@@ -0,0 +1,21 @@
+// +build solaris,!appengine
+
+package logrus
+
+import (
+ "io"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(f io.Writer) bool {
+ switch v := f.(type) {
+ case *os.File:
+ _, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA)
+ return err == nil
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_windows.go b/vendor/github.com/sirupsen/logrus/terminal_windows.go
new file mode 100644
index 000000000..7a336307e
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,82 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows,!appengine
+
+package logrus
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "os"
+ "os/exec"
+ "strconv"
+ "strings"
+ "syscall"
+ "unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+ procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
+)
+
+const (
+ enableProcessedOutput = 0x0001
+ enableWrapAtEolOutput = 0x0002
+ enableVirtualTerminalProcessing = 0x0004
+)
+
+func getVersion() (float64, error) {
+ stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
+ cmd := exec.Command("cmd", "ver")
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ err := cmd.Run()
+ if err != nil {
+ return -1, err
+ }
+
+ // The output should be like "Microsoft Windows [Version XX.X.XXXXXX]"
+ version := strings.Replace(stdout.String(), "\n", "", -1)
+ version = strings.Replace(version, "\r\n", "", -1)
+
+ x1 := strings.Index(version, "[Version")
+
+ if x1 == -1 || strings.Index(version, "]") == -1 {
+ return -1, errors.New("Can't determine Windows version")
+ }
+
+ return strconv.ParseFloat(version[x1+9:x1+13], 64)
+}
+
+func init() {
+ ver, err := getVersion()
+ if err != nil {
+ return
+ }
+
+ // Activate Virtual Processing for Windows CMD
+ // Info: https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
+ if ver >= 10 {
+ handle := syscall.Handle(os.Stderr.Fd())
+ procSetConsoleMode.Call(uintptr(handle), enableProcessedOutput|enableWrapAtEolOutput|enableVirtualTerminalProcessing)
+ }
+}
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal(f io.Writer) bool {
+ switch v := f.(type) {
+ case *os.File:
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
new file mode 100644
index 000000000..ba8885406
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/text_formatter.go
@@ -0,0 +1,189 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ nocolor = 0
+ red = 31
+ green = 32
+ yellow = 33
+ blue = 34
+ gray = 37
+)
+
+var (
+ baseTimestamp time.Time
+)
+
+func init() {
+ baseTimestamp = time.Now()
+}
+
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+
+ // Force disabling colors.
+ DisableColors bool
+
+ // Disable timestamp logging. useful when output is redirected to logging
+ // system that already adds timestamps.
+ DisableTimestamp bool
+
+ // Enable logging the full timestamp when a TTY is attached instead of just
+ // the time passed since beginning of execution.
+ FullTimestamp bool
+
+ // TimestampFormat to use for display when a full timestamp is printed
+ TimestampFormat string
+
+ // The fields are sorted by default for a consistent output. For applications
+ // that log extremely frequently and don't use the JSON formatter this may not
+ // be desired.
+ DisableSorting bool
+
+ // QuoteEmptyFields will wrap empty fields in quotes if true
+ QuoteEmptyFields bool
+
+ // QuoteCharacter can be set to the override the default quoting character "
+ // with something else. For example: ', or `.
+ QuoteCharacter string
+
+ // Whether the logger's out is to a terminal
+ isTerminal bool
+
+ sync.Once
+}
+
+func (f *TextFormatter) init(entry *Entry) {
+ if len(f.QuoteCharacter) == 0 {
+ f.QuoteCharacter = "\""
+ }
+ if entry.Logger != nil {
+ f.isTerminal = IsTerminal(entry.Logger.Out)
+ }
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+ var b *bytes.Buffer
+ keys := make([]string, 0, len(entry.Data))
+ for k := range entry.Data {
+ keys = append(keys, k)
+ }
+
+ if !f.DisableSorting {
+ sort.Strings(keys)
+ }
+ if entry.Buffer != nil {
+ b = entry.Buffer
+ } else {
+ b = &bytes.Buffer{}
+ }
+
+ prefixFieldClashes(entry.Data)
+
+ f.Do(func() { f.init(entry) })
+
+ isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+ if isColored {
+ f.printColored(b, entry, keys, timestampFormat)
+ } else {
+ if !f.DisableTimestamp {
+ f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
+ }
+ f.appendKeyValue(b, "level", entry.Level.String())
+ if entry.Message != "" {
+ f.appendKeyValue(b, "msg", entry.Message)
+ }
+ for _, key := range keys {
+ f.appendKeyValue(b, key, entry.Data[key])
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
+ var levelColor int
+ switch entry.Level {
+ case DebugLevel:
+ levelColor = gray
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+ if f.DisableTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
+ } else if !f.FullTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
+ } else {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+ }
+ for _, k := range keys {
+ v := entry.Data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+ f.appendValue(b, v)
+ }
+}
+
+func (f *TextFormatter) needsQuoting(text string) bool {
+ if f.QuoteEmptyFields && len(text) == 0 {
+ return true
+ }
+ for _, ch := range text {
+ if !((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9') ||
+ ch == '-' || ch == '.') {
+ return true
+ }
+ }
+ return false
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+
+ b.WriteString(key)
+ b.WriteByte('=')
+ f.appendValue(b, value)
+ b.WriteByte(' ')
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+ switch value := value.(type) {
+ case string:
+ if !f.needsQuoting(value) {
+ b.WriteString(value)
+ } else {
+ fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter)
+ }
+ case error:
+ errmsg := value.Error()
+ if !f.needsQuoting(errmsg) {
+ b.WriteString(errmsg)
+ } else {
+ fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter)
+ }
+ default:
+ fmt.Fprint(b, value)
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go
new file mode 100644
index 000000000..7bdebedc6
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/writer.go
@@ -0,0 +1,62 @@
+package logrus
+
+import (
+ "bufio"
+ "io"
+ "runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+ return logger.WriterLevel(InfoLevel)
+}
+
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+ return NewEntry(logger).WriterLevel(level)
+}
+
+func (entry *Entry) Writer() *io.PipeWriter {
+ return entry.WriterLevel(InfoLevel)
+}
+
+func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
+ reader, writer := io.Pipe()
+
+ var printFunc func(args ...interface{})
+
+ switch level {
+ case DebugLevel:
+ printFunc = entry.Debug
+ case InfoLevel:
+ printFunc = entry.Info
+ case WarnLevel:
+ printFunc = entry.Warn
+ case ErrorLevel:
+ printFunc = entry.Error
+ case FatalLevel:
+ printFunc = entry.Fatal
+ case PanicLevel:
+ printFunc = entry.Panic
+ default:
+ printFunc = entry.Print
+ }
+
+ go entry.writerScanner(reader, printFunc)
+ runtime.SetFinalizer(writer, writerFinalizer)
+
+ return writer
+}
+
+func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ printFunc(scanner.Text())
+ }
+ if err := scanner.Err(); err != nil {
+ entry.Errorf("Error while reading from Writer: %s", err)
+ }
+ reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+ writer.Close()
+}
diff --git a/vendor/github.com/soheilhy/cmux/LICENSE b/vendor/github.com/soheilhy/cmux/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/soheilhy/cmux/README.md b/vendor/github.com/soheilhy/cmux/README.md
new file mode 100644
index 000000000..70306e6ab
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/README.md
@@ -0,0 +1,83 @@
+# cmux: Connection Mux ![Travis Build Status](https://api.travis-ci.org/soheilhy/args.svg?branch=master "Travis Build Status") [![GoDoc](https://godoc.org/github.com/soheilhy/cmux?status.svg)](http://godoc.org/github.com/soheilhy/cmux)
+
+cmux is a generic Go library to multiplex connections based on
+their payload. Using cmux, you can serve gRPC, SSH, HTTPS, HTTP,
+Go RPC, and pretty much any other protocol on the same TCP listener.
+
+## How-To
+Simply create your main listener, create a cmux for that listener,
+and then match connections:
+```go
+// Create the main listener.
+l, err := net.Listen("tcp", ":23456")
+if err != nil {
+ log.Fatal(err)
+}
+
+// Create a cmux.
+m := cmux.New(l)
+
+// Match connections in order:
+// First grpc, then HTTP, and otherwise Go RPC/TCP.
+grpcL := m.Match(cmux.HTTP2HeaderField("content-type", "application/grpc"))
+httpL := m.Match(cmux.HTTP1Fast())
+trpcL := m.Match(cmux.Any()) // Any means anything that is not yet matched.
+
+// Create your protocol servers.
+grpcS := grpc.NewServer()
+grpchello.RegisterGreeterServer(grpcs, &server{})
+
+httpS := &http.Server{
+ Handler: &helloHTTP1Handler{},
+}
+
+trpcS := rpc.NewServer()
+trpcS.Register(&ExampleRPCRcvr{})
+
+// Use the muxed listeners for your servers.
+go grpcS.Serve(grpcL)
+go httpS.Serve(httpL)
+go trpcS.Accept(trpcL)
+
+// Start serving!
+m.Serve()
+```
+
+Take a look at [other examples in the GoDoc](http://godoc.org/github.com/soheilhy/cmux/#pkg-examples).
+
+## Docs
+* [GoDocs](https://godoc.org/github.com/soheilhy/cmux)
+
+## Performance
+There is room for improvment but, since we are only matching
+the very first bytes of a connection, the performance overheads on
+long-lived connections (i.e., RPCs and pipelined HTTP streams)
+is negligible.
+
+*TODO(soheil)*: Add benchmarks.
+
+## Limitations
+* *TLS*: `net/http` uses a type assertion to identify TLS connections; since
+cmux's lookahead-implementing connection wraps the underlying TLS connection,
+this type assertion fails.
+Because of that, you can serve HTTPS using cmux but `http.Request.TLS`
+would not be set in your handlers.
+
+* *Different Protocols on The Same Connection*: `cmux` matches the connection
+when it's accepted. For example, one connection can be either gRPC or REST, but
+not both. That is, we assume that a client connection is either used for gRPC
+or REST.
+
+* *Java gRPC Clients*: Java gRPC client blocks until it receives a SETTINGS
+frame from the server. If you are using the Java client to connect to a cmux'ed
+gRPC server please match with writers:
+```go
+grpcl := m.MatchWithWriters(cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc"))
+```
+
+# Copyright and License
+Copyright 2016 The CMux Authors. All rights reserved.
+
+See [CONTRIBUTORS](https://github.com/soheilhy/cmux/blob/master/CONTRIBUTORS)
+for the CMux Authors. Code is released under
+[the Apache 2 license](https://github.com/soheilhy/cmux/blob/master/LICENSE).
diff --git a/vendor/github.com/soheilhy/cmux/buffer.go b/vendor/github.com/soheilhy/cmux/buffer.go
new file mode 100644
index 000000000..f8cf30a1e
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/buffer.go
@@ -0,0 +1,67 @@
+// Copyright 2016 The CMux Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package cmux
+
+import (
+ "bytes"
+ "io"
+)
+
+// bufferedReader is an optimized implementation of io.Reader that behaves like
+// ```
+// io.MultiReader(bytes.NewReader(buffer.Bytes()), io.TeeReader(source, buffer))
+// ```
+// without allocating.
+type bufferedReader struct {
+ source io.Reader
+ buffer bytes.Buffer
+ bufferRead int
+ bufferSize int
+ sniffing bool
+ lastErr error
+}
+
+func (s *bufferedReader) Read(p []byte) (int, error) {
+ if s.bufferSize > s.bufferRead {
+ // If we have already read something from the buffer before, we return the
+ // same data and the last error if any. We need to immediately return,
+ // otherwise we may block for ever, if we try to be smart and call
+ // source.Read() seeking a little bit of more data.
+ bn := copy(p, s.buffer.Bytes()[s.bufferRead:s.bufferSize])
+ s.bufferRead += bn
+ return bn, s.lastErr
+ } else if !s.sniffing && s.buffer.Cap() != 0 {
+ // We don't need the buffer anymore.
+ // Reset it to release the internal slice.
+ s.buffer = bytes.Buffer{}
+ }
+
+ // If there is nothing more to return in the sniffed buffer, read from the
+ // source.
+ sn, sErr := s.source.Read(p)
+ if sn > 0 && s.sniffing {
+ s.lastErr = sErr
+ if wn, wErr := s.buffer.Write(p[:sn]); wErr != nil {
+ return wn, wErr
+ }
+ }
+ return sn, sErr
+}
+
+func (s *bufferedReader) reset(snif bool) {
+ s.sniffing = snif
+ s.bufferRead = 0
+ s.bufferSize = s.buffer.Len()
+}
diff --git a/vendor/github.com/soheilhy/cmux/cmux.go b/vendor/github.com/soheilhy/cmux/cmux.go
new file mode 100644
index 000000000..9de6b0a3c
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/cmux.go
@@ -0,0 +1,269 @@
+// Copyright 2016 The CMux Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package cmux
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "sync"
+ "time"
+)
+
+// Matcher matches a connection based on its content.
+type Matcher func(io.Reader) bool
+
+// MatchWriter is a match that can also write response (say to do handshake).
+type MatchWriter func(io.Writer, io.Reader) bool
+
+// ErrorHandler handles an error and returns whether
+// the mux should continue serving the listener.
+type ErrorHandler func(error) bool
+
+var _ net.Error = ErrNotMatched{}
+
+// ErrNotMatched is returned whenever a connection is not matched by any of
+// the matchers registered in the multiplexer.
+type ErrNotMatched struct {
+ c net.Conn
+}
+
+func (e ErrNotMatched) Error() string {
+ return fmt.Sprintf("mux: connection %v not matched by an matcher",
+ e.c.RemoteAddr())
+}
+
+// Temporary implements the net.Error interface.
+func (e ErrNotMatched) Temporary() bool { return true }
+
+// Timeout implements the net.Error interface.
+func (e ErrNotMatched) Timeout() bool { return false }
+
+type errListenerClosed string
+
+func (e errListenerClosed) Error() string { return string(e) }
+func (e errListenerClosed) Temporary() bool { return false }
+func (e errListenerClosed) Timeout() bool { return false }
+
+// ErrListenerClosed is returned from muxListener.Accept when the underlying
+// listener is closed.
+var ErrListenerClosed = errListenerClosed("mux: listener closed")
+
+// for readability of readTimeout
+var noTimeout time.Duration
+
+// New instantiates a new connection multiplexer.
+func New(l net.Listener) CMux {
+ return &cMux{
+ root: l,
+ bufLen: 1024,
+ errh: func(_ error) bool { return true },
+ donec: make(chan struct{}),
+ readTimeout: noTimeout,
+ }
+}
+
+// CMux is a multiplexer for network connections.
+type CMux interface {
+ // Match returns a net.Listener that sees (i.e., accepts) only
+ // the connections matched by at least one of the matcher.
+ //
+ // The order used to call Match determines the priority of matchers.
+ Match(...Matcher) net.Listener
+ // MatchWithWriters returns a net.Listener that accepts only the
+ // connections that matched by at least of the matcher writers.
+ //
+ // Prefer Matchers over MatchWriters, since the latter can write on the
+ // connection before the actual handler.
+ //
+ // The order used to call Match determines the priority of matchers.
+ MatchWithWriters(...MatchWriter) net.Listener
+ // Serve starts multiplexing the listener. Serve blocks and perhaps
+ // should be invoked concurrently within a go routine.
+ Serve() error
+ // HandleError registers an error handler that handles listener errors.
+ HandleError(ErrorHandler)
+ // sets a timeout for the read of matchers
+ SetReadTimeout(time.Duration)
+}
+
+type matchersListener struct {
+ ss []MatchWriter
+ l muxListener
+}
+
+type cMux struct {
+ root net.Listener
+ bufLen int
+ errh ErrorHandler
+ donec chan struct{}
+ sls []matchersListener
+ readTimeout time.Duration
+}
+
+func matchersToMatchWriters(matchers []Matcher) []MatchWriter {
+ mws := make([]MatchWriter, 0, len(matchers))
+ for _, m := range matchers {
+ mws = append(mws, func(w io.Writer, r io.Reader) bool {
+ return m(r)
+ })
+ }
+ return mws
+}
+
+func (m *cMux) Match(matchers ...Matcher) net.Listener {
+ mws := matchersToMatchWriters(matchers)
+ return m.MatchWithWriters(mws...)
+}
+
+func (m *cMux) MatchWithWriters(matchers ...MatchWriter) net.Listener {
+ ml := muxListener{
+ Listener: m.root,
+ connc: make(chan net.Conn, m.bufLen),
+ }
+ m.sls = append(m.sls, matchersListener{ss: matchers, l: ml})
+ return ml
+}
+
+func (m *cMux) SetReadTimeout(t time.Duration) {
+ m.readTimeout = t
+}
+
+func (m *cMux) Serve() error {
+ var wg sync.WaitGroup
+
+ defer func() {
+ close(m.donec)
+ wg.Wait()
+
+ for _, sl := range m.sls {
+ close(sl.l.connc)
+ // Drain the connections enqueued for the listener.
+ for c := range sl.l.connc {
+ _ = c.Close()
+ }
+ }
+ }()
+
+ for {
+ c, err := m.root.Accept()
+ if err != nil {
+ if !m.handleErr(err) {
+ return err
+ }
+ continue
+ }
+
+ wg.Add(1)
+ go m.serve(c, m.donec, &wg)
+ }
+}
+
+func (m *cMux) serve(c net.Conn, donec <-chan struct{}, wg *sync.WaitGroup) {
+ defer wg.Done()
+
+ muc := newMuxConn(c)
+ if m.readTimeout > noTimeout {
+ _ = c.SetReadDeadline(time.Now().Add(m.readTimeout))
+ }
+ for _, sl := range m.sls {
+ for _, s := range sl.ss {
+ matched := s(muc.Conn, muc.startSniffing())
+ if matched {
+ muc.doneSniffing()
+ if m.readTimeout > noTimeout {
+ _ = c.SetReadDeadline(time.Time{})
+ }
+ select {
+ case sl.l.connc <- muc:
+ case <-donec:
+ _ = c.Close()
+ }
+ return
+ }
+ }
+ }
+
+ _ = c.Close()
+ err := ErrNotMatched{c: c}
+ if !m.handleErr(err) {
+ _ = m.root.Close()
+ }
+}
+
+func (m *cMux) HandleError(h ErrorHandler) {
+ m.errh = h
+}
+
+func (m *cMux) handleErr(err error) bool {
+ if !m.errh(err) {
+ return false
+ }
+
+ if ne, ok := err.(net.Error); ok {
+ return ne.Temporary()
+ }
+
+ return false
+}
+
+type muxListener struct {
+ net.Listener
+ connc chan net.Conn
+}
+
+func (l muxListener) Accept() (net.Conn, error) {
+ c, ok := <-l.connc
+ if !ok {
+ return nil, ErrListenerClosed
+ }
+ return c, nil
+}
+
+// MuxConn wraps a net.Conn and provides transparent sniffing of connection data.
+type MuxConn struct {
+ net.Conn
+ buf bufferedReader
+}
+
+func newMuxConn(c net.Conn) *MuxConn {
+ return &MuxConn{
+ Conn: c,
+ buf: bufferedReader{source: c},
+ }
+}
+
+// From the io.Reader documentation:
+//
+// When Read encounters an error or end-of-file condition after
+// successfully reading n > 0 bytes, it returns the number of
+// bytes read. It may return the (non-nil) error from the same call
+// or return the error (and n == 0) from a subsequent call.
+// An instance of this general case is that a Reader returning
+// a non-zero number of bytes at the end of the input stream may
+// return either err == EOF or err == nil. The next Read should
+// return 0, EOF.
+func (m *MuxConn) Read(p []byte) (int, error) {
+ return m.buf.Read(p)
+}
+
+func (m *MuxConn) startSniffing() io.Reader {
+ m.buf.reset(true)
+ return &m.buf
+}
+
+func (m *MuxConn) doneSniffing() {
+ m.buf.reset(false)
+}
diff --git a/vendor/github.com/soheilhy/cmux/doc.go b/vendor/github.com/soheilhy/cmux/doc.go
new file mode 100644
index 000000000..aaa8f3158
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/doc.go
@@ -0,0 +1,18 @@
+// Copyright 2016 The CMux Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+// Package cmux is a library to multiplex network connections based on
+// their payload. Using cmux, you can serve different protocols from the
+// same listener.
+package cmux
diff --git a/vendor/github.com/soheilhy/cmux/matchers.go b/vendor/github.com/soheilhy/cmux/matchers.go
new file mode 100644
index 000000000..652fd8691
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/matchers.go
@@ -0,0 +1,262 @@
+// Copyright 2016 The CMux Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package cmux
+
+import (
+ "bufio"
+ "crypto/tls"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+
+ "golang.org/x/net/http2"
+ "golang.org/x/net/http2/hpack"
+)
+
+// Any is a Matcher that matches any connection.
+func Any() Matcher {
+ return func(r io.Reader) bool { return true }
+}
+
+// PrefixMatcher returns a matcher that matches a connection if it
+// starts with any of the strings in strs.
+func PrefixMatcher(strs ...string) Matcher {
+ pt := newPatriciaTreeString(strs...)
+ return pt.matchPrefix
+}
+
+func prefixByteMatcher(list ...[]byte) Matcher {
+ pt := newPatriciaTree(list...)
+ return pt.matchPrefix
+}
+
+var defaultHTTPMethods = []string{
+ "OPTIONS",
+ "GET",
+ "HEAD",
+ "POST",
+ "PUT",
+ "DELETE",
+ "TRACE",
+ "CONNECT",
+}
+
+// HTTP1Fast only matches the methods in the HTTP request.
+//
+// This matcher is very optimistic: if it returns true, it does not mean that
+// the request is a valid HTTP response. If you want a correct but slower HTTP1
+// matcher, use HTTP1 instead.
+func HTTP1Fast(extMethods ...string) Matcher {
+ return PrefixMatcher(append(defaultHTTPMethods, extMethods...)...)
+}
+
+// TLS matches HTTPS requests.
+//
+// By default, any TLS handshake packet is matched. An optional whitelist
+// of versions can be passed in to restrict the matcher, for example:
+// TLS(tls.VersionTLS11, tls.VersionTLS12)
+func TLS(versions ...int) Matcher {
+ if len(versions) == 0 {
+ versions = []int{
+ tls.VersionSSL30,
+ tls.VersionTLS10,
+ tls.VersionTLS11,
+ tls.VersionTLS12,
+ }
+ }
+ prefixes := [][]byte{}
+ for _, v := range versions {
+ prefixes = append(prefixes, []byte{22, byte(v >> 8 & 0xff), byte(v & 0xff)})
+ }
+ return prefixByteMatcher(prefixes...)
+}
+
+const maxHTTPRead = 4096
+
+// HTTP1 parses the first line or upto 4096 bytes of the request to see if
+// the conection contains an HTTP request.
+func HTTP1() Matcher {
+ return func(r io.Reader) bool {
+ br := bufio.NewReader(&io.LimitedReader{R: r, N: maxHTTPRead})
+ l, part, err := br.ReadLine()
+ if err != nil || part {
+ return false
+ }
+
+ _, _, proto, ok := parseRequestLine(string(l))
+ if !ok {
+ return false
+ }
+
+ v, _, ok := http.ParseHTTPVersion(proto)
+ return ok && v == 1
+ }
+}
+
+// grabbed from net/http.
+func parseRequestLine(line string) (method, uri, proto string, ok bool) {
+ s1 := strings.Index(line, " ")
+ s2 := strings.Index(line[s1+1:], " ")
+ if s1 < 0 || s2 < 0 {
+ return
+ }
+ s2 += s1 + 1
+ return line[:s1], line[s1+1 : s2], line[s2+1:], true
+}
+
+// HTTP2 parses the frame header of the first frame to detect whether the
+// connection is an HTTP2 connection.
+func HTTP2() Matcher {
+ return hasHTTP2Preface
+}
+
+// HTTP1HeaderField returns a matcher matching the header fields of the first
+// request of an HTTP 1 connection.
+func HTTP1HeaderField(name, value string) Matcher {
+ return func(r io.Reader) bool {
+ return matchHTTP1Field(r, name, func(gotValue string) bool {
+ return gotValue == value
+ })
+ }
+}
+
+// HTTP1HeaderFieldPrefix returns a matcher matching the header fields of the
+// first request of an HTTP 1 connection. If the header with key name has a
+// value prefixed with valuePrefix, this will match.
+func HTTP1HeaderFieldPrefix(name, valuePrefix string) Matcher {
+ return func(r io.Reader) bool {
+ return matchHTTP1Field(r, name, func(gotValue string) bool {
+ return strings.HasPrefix(gotValue, valuePrefix)
+ })
+ }
+}
+
+// HTTP2HeaderField returns a matcher matching the header fields of the first
+// headers frame.
+func HTTP2HeaderField(name, value string) Matcher {
+ return func(r io.Reader) bool {
+ return matchHTTP2Field(ioutil.Discard, r, name, func(gotValue string) bool {
+ return gotValue == value
+ })
+ }
+}
+
+// HTTP2HeaderFieldPrefix returns a matcher matching the header fields of the
+// first headers frame. If the header with key name has a value prefixed with
+// valuePrefix, this will match.
+func HTTP2HeaderFieldPrefix(name, valuePrefix string) Matcher {
+ return func(r io.Reader) bool {
+ return matchHTTP2Field(ioutil.Discard, r, name, func(gotValue string) bool {
+ return strings.HasPrefix(gotValue, valuePrefix)
+ })
+ }
+}
+
+// HTTP2MatchHeaderFieldSendSettings matches the header field and writes the
+// settings to the server. Prefer HTTP2HeaderField over this one, if the client
+// does not block on receiving a SETTING frame.
+func HTTP2MatchHeaderFieldSendSettings(name, value string) MatchWriter {
+ return func(w io.Writer, r io.Reader) bool {
+ return matchHTTP2Field(w, r, name, func(gotValue string) bool {
+ return gotValue == value
+ })
+ }
+}
+
+// HTTP2MatchHeaderFieldPrefixSendSettings matches the header field prefix
+// and writes the settings to the server. Prefer HTTP2HeaderFieldPrefix over
+// this one, if the client does not block on receiving a SETTING frame.
+func HTTP2MatchHeaderFieldPrefixSendSettings(name, valuePrefix string) MatchWriter {
+ return func(w io.Writer, r io.Reader) bool {
+ return matchHTTP2Field(w, r, name, func(gotValue string) bool {
+ return strings.HasPrefix(gotValue, valuePrefix)
+ })
+ }
+}
+
+func hasHTTP2Preface(r io.Reader) bool {
+ var b [len(http2.ClientPreface)]byte
+ last := 0
+
+ for {
+ n, err := r.Read(b[last:])
+ if err != nil {
+ return false
+ }
+
+ last += n
+ eq := string(b[:last]) == http2.ClientPreface[:last]
+ if last == len(http2.ClientPreface) {
+ return eq
+ }
+ if !eq {
+ return false
+ }
+ }
+}
+
+func matchHTTP1Field(r io.Reader, name string, matches func(string) bool) (matched bool) {
+ req, err := http.ReadRequest(bufio.NewReader(r))
+ if err != nil {
+ return false
+ }
+
+ return matches(req.Header.Get(name))
+}
+
+func matchHTTP2Field(w io.Writer, r io.Reader, name string, matches func(string) bool) (matched bool) {
+ if !hasHTTP2Preface(r) {
+ return false
+ }
+
+ done := false
+ framer := http2.NewFramer(w, r)
+ hdec := hpack.NewDecoder(uint32(4<<10), func(hf hpack.HeaderField) {
+ if hf.Name == name {
+ done = true
+ if matches(hf.Value) {
+ matched = true
+ }
+ }
+ })
+ for {
+ f, err := framer.ReadFrame()
+ if err != nil {
+ return false
+ }
+
+ switch f := f.(type) {
+ case *http2.SettingsFrame:
+ if err := framer.WriteSettings(); err != nil {
+ return false
+ }
+ case *http2.ContinuationFrame:
+ if _, err := hdec.Write(f.HeaderBlockFragment()); err != nil {
+ return false
+ }
+ done = done || f.FrameHeader.Flags&http2.FlagHeadersEndHeaders != 0
+ case *http2.HeadersFrame:
+ if _, err := hdec.Write(f.HeaderBlockFragment()); err != nil {
+ return false
+ }
+ done = done || f.FrameHeader.Flags&http2.FlagHeadersEndHeaders != 0
+ }
+
+ if done {
+ return matched
+ }
+ }
+}
diff --git a/vendor/github.com/soheilhy/cmux/patricia.go b/vendor/github.com/soheilhy/cmux/patricia.go
new file mode 100644
index 000000000..c3e3d85bd
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/patricia.go
@@ -0,0 +1,179 @@
+// Copyright 2016 The CMux Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package cmux
+
+import (
+ "bytes"
+ "io"
+)
+
+// patriciaTree is a simple patricia tree that handles []byte instead of string
+// and cannot be changed after instantiation.
+type patriciaTree struct {
+ root *ptNode
+ maxDepth int // max depth of the tree.
+}
+
+func newPatriciaTree(bs ...[]byte) *patriciaTree {
+ max := 0
+ for _, b := range bs {
+ if max < len(b) {
+ max = len(b)
+ }
+ }
+ return &patriciaTree{
+ root: newNode(bs),
+ maxDepth: max + 1,
+ }
+}
+
+func newPatriciaTreeString(strs ...string) *patriciaTree {
+ b := make([][]byte, len(strs))
+ for i, s := range strs {
+ b[i] = []byte(s)
+ }
+ return newPatriciaTree(b...)
+}
+
+func (t *patriciaTree) matchPrefix(r io.Reader) bool {
+ buf := make([]byte, t.maxDepth)
+ n, _ := io.ReadFull(r, buf)
+ return t.root.match(buf[:n], true)
+}
+
+func (t *patriciaTree) match(r io.Reader) bool {
+ buf := make([]byte, t.maxDepth)
+ n, _ := io.ReadFull(r, buf)
+ return t.root.match(buf[:n], false)
+}
+
+type ptNode struct {
+ prefix []byte
+ next map[byte]*ptNode
+ terminal bool
+}
+
+func newNode(strs [][]byte) *ptNode {
+ if len(strs) == 0 {
+ return &ptNode{
+ prefix: []byte{},
+ terminal: true,
+ }
+ }
+
+ if len(strs) == 1 {
+ return &ptNode{
+ prefix: strs[0],
+ terminal: true,
+ }
+ }
+
+ p, strs := splitPrefix(strs)
+ n := &ptNode{
+ prefix: p,
+ }
+
+ nexts := make(map[byte][][]byte)
+ for _, s := range strs {
+ if len(s) == 0 {
+ n.terminal = true
+ continue
+ }
+ nexts[s[0]] = append(nexts[s[0]], s[1:])
+ }
+
+ n.next = make(map[byte]*ptNode)
+ for first, rests := range nexts {
+ n.next[first] = newNode(rests)
+ }
+
+ return n
+}
+
+func splitPrefix(bss [][]byte) (prefix []byte, rest [][]byte) {
+ if len(bss) == 0 || len(bss[0]) == 0 {
+ return prefix, bss
+ }
+
+ if len(bss) == 1 {
+ return bss[0], [][]byte{{}}
+ }
+
+ for i := 0; ; i++ {
+ var cur byte
+ eq := true
+ for j, b := range bss {
+ if len(b) <= i {
+ eq = false
+ break
+ }
+
+ if j == 0 {
+ cur = b[i]
+ continue
+ }
+
+ if cur != b[i] {
+ eq = false
+ break
+ }
+ }
+
+ if !eq {
+ break
+ }
+
+ prefix = append(prefix, cur)
+ }
+
+ rest = make([][]byte, 0, len(bss))
+ for _, b := range bss {
+ rest = append(rest, b[len(prefix):])
+ }
+
+ return prefix, rest
+}
+
+func (n *ptNode) match(b []byte, prefix bool) bool {
+ l := len(n.prefix)
+ if l > 0 {
+ if l > len(b) {
+ l = len(b)
+ }
+ if !bytes.Equal(b[:l], n.prefix) {
+ return false
+ }
+ }
+
+ if n.terminal && (prefix || len(n.prefix) == len(b)) {
+ return true
+ }
+
+ if l >= len(b) {
+ return false
+ }
+
+ nextN, ok := n.next[b[l]]
+ if !ok {
+ return false
+ }
+
+ if l == len(b) {
+ b = b[l:l]
+ } else {
+ b = b[l+1:]
+ }
+ return nextN.match(b, prefix)
+}
diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE
new file mode 100644
index 000000000..63ed1cfea
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 Alex Ogier. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md
new file mode 100644
index 000000000..eefb46dec
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/README.md
@@ -0,0 +1,277 @@
+[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag)
+[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag)
+[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag)
+
+## Description
+
+pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the [GNU extensions to the POSIX recommendations
+for command-line options][1]. For a more precise description, see the
+"Command-line flag syntax" section below.
+
+[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+pflag is available under the same style of BSD license as the Go language,
+which can be found in the LICENSE file.
+
+## Installation
+
+pflag is available using the standard `go get` command.
+
+Install by running:
+
+ go get github.com/spf13/pflag
+
+Run tests by running:
+
+ go test github.com/spf13/pflag
+
+## Usage
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+``` go
+import flag "github.com/spf13/pflag"
+```
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+
+``` go
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+```
+
+If you like, you can bind the flag to a variable using the Var() functions.
+
+``` go
+var flagvar int
+func init() {
+ flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+}
+```
+
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+
+``` go
+flag.Var(&flagVal, "name", "help message for flagname")
+```
+
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+
+``` go
+flag.Parse()
+```
+
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+
+``` go
+fmt.Println("ip has value ", *ip)
+fmt.Println("flagvar has value ", flagvar)
+```
+
+There are helpers function to get values later if you have the FlagSet but
+it was difficult to keep up with all of the flag pointers in your code.
+If you have a pflag.FlagSet with a flag called 'flagname' of type int you
+can use GetInt() to get the int value. But notice that 'flagname' must exist
+and it must be an int. GetString("flagname") will fail.
+
+``` go
+i, err := flagset.GetInt("flagname")
+```
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+var flagvar bool
+func init() {
+ flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
+}
+flag.VarP(&flagVal, "varname", "v", "help message")
+```
+
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+The default set of command-line flags is controlled by
+top-level functions. The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+
+## Setting no option default values for flags
+
+After you create a flag it is possible to set the pflag.NoOptDefVal for
+the given flag. Doing this changes the meaning of the flag slightly. If
+a flag has a NoOptDefVal and the flag is set on the command line without
+an option the flag will be set to the NoOptDefVal. For example given:
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+flag.Lookup("flagname").NoOptDefVal = "4321"
+```
+
+Would result in something like
+
+| Parsed Arguments | Resulting Value |
+| ------------- | ------------- |
+| --flagname=1357 | ip=1357 |
+| --flagname | ip=4321 |
+| [nothing] | ip=1234 |
+
+## Command line flag syntax
+
+```
+--flag // boolean flags, or flags with no option default values
+--flag x // only on flags without a default value
+--flag=x
+```
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags
+or a flag with a default value
+
+```
+// boolean or flags where the 'no option default value' is set
+-f
+-f=true
+-abc
+but
+-b true is INVALID
+
+// non-boolean and flags without a 'no option default value'
+-n 1234
+-n=1234
+-n1234
+
+// mixed
+-abcs "hello"
+-absd="hello"
+-abcs1234
+```
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+## Mutating or "Normalizing" Flag names
+
+It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow.
+
+**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag
+
+``` go
+func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ from := []string{"-", "_"}
+ to := "."
+ for _, sep := range from {
+ name = strings.Replace(name, sep, to, -1)
+ }
+ return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc)
+```
+
+**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name
+
+``` go
+func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ switch name {
+ case "old-flag-name":
+ name = "new-flag-name"
+ break
+ }
+ return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(aliasNormalizeFunc)
+```
+
+## Deprecating a flag or its shorthand
+It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used.
+
+**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead.
+```go
+// deprecate a flag by specifying its name and a usage message
+flags.MarkDeprecated("badflag", "please use --good-flag instead")
+```
+This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used.
+
+**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n".
+```go
+// deprecate a flag shorthand by specifying its flag name and a usage message
+flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only")
+```
+This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used.
+
+Note that usage message is essential here, and it should not be empty.
+
+## Hidden flags
+It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text.
+
+**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available.
+```go
+// hide a flag by specifying its name
+flags.MarkHidden("secretFlag")
+```
+
+## Supporting Go flags when using pflag
+In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary
+to support flags defined by third-party dependencies (e.g. `golang/glog`).
+
+**Example**: You want to add the Go flags to the `CommandLine` flagset
+```go
+import (
+ goflag "flag"
+ flag "github.com/spf13/pflag"
+)
+
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+
+func main() {
+ flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
+ flag.Parse()
+}
+```
+
+## More info
+
+You can see the full reference documentation of the pflag package
+[at godoc.org][3], or through go's standard documentation system by
+running `godoc -http=:6060` and browsing to
+[http://localhost:6060/pkg/github.com/ogier/pflag][2] after
+installation.
+
+[2]: http://localhost:6060/pkg/github.com/ogier/pflag
+[3]: http://godoc.org/github.com/ogier/pflag
diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go
new file mode 100644
index 000000000..c4c5c0bfd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool.go
@@ -0,0 +1,94 @@
+package pflag
+
+import "strconv"
+
+// optional interface to indicate boolean flags that can be
+// supplied without "=value" text
+type boolFlag interface {
+ Value
+ IsBoolFlag() bool
+}
+
+// -- bool Value
+type boolValue bool
+
+func newBoolValue(val bool, p *bool) *boolValue {
+ *p = val
+ return (*boolValue)(p)
+}
+
+func (b *boolValue) Set(s string) error {
+ v, err := strconv.ParseBool(s)
+ *b = boolValue(v)
+ return err
+}
+
+func (b *boolValue) Type() string {
+ return "bool"
+}
+
+func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) }
+
+func (b *boolValue) IsBoolFlag() bool { return true }
+
+func boolConv(sval string) (interface{}, error) {
+ return strconv.ParseBool(sval)
+}
+
+// GetBool return the bool value of a flag with the given name
+func (f *FlagSet) GetBool(name string) (bool, error) {
+ val, err := f.getFlagType(name, "bool", boolConv)
+ if err != nil {
+ return false, err
+ }
+ return val.(bool), nil
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
+ f.BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+ flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage)
+ flag.NoOptDefVal = "true"
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func BoolVar(p *bool, name string, value bool, usage string) {
+ BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+ flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage)
+ flag.NoOptDefVal = "true"
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
+ return f.BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool {
+ p := new(bool)
+ f.BoolVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func Bool(name string, value bool, usage string) *bool {
+ return BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func BoolP(name, shorthand string, value bool, usage string) *bool {
+ b := CommandLine.BoolP(name, shorthand, value, usage)
+ return b
+}
diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go
new file mode 100644
index 000000000..5af02f1a7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool_slice.go
@@ -0,0 +1,147 @@
+package pflag
+
+import (
+ "io"
+ "strconv"
+ "strings"
+)
+
+// -- boolSlice Value
+type boolSliceValue struct {
+ value *[]bool
+ changed bool
+}
+
+func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue {
+ bsv := new(boolSliceValue)
+ bsv.value = p
+ *bsv.value = val
+ return bsv
+}
+
+// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag.
+// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended.
+func (s *boolSliceValue) Set(val string) error {
+
+ // remove all quote characters
+ rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+ // read flag arguments with CSV parser
+ boolStrSlice, err := readAsCSV(rmQuote.Replace(val))
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ // parse boolean values into slice
+ out := make([]bool, 0, len(boolStrSlice))
+ for _, boolStr := range boolStrSlice {
+ b, err := strconv.ParseBool(strings.TrimSpace(boolStr))
+ if err != nil {
+ return err
+ }
+ out = append(out, b)
+ }
+
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+
+ s.changed = true
+
+ return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *boolSliceValue) Type() string {
+ return "boolSlice"
+}
+
+// String defines a "native" format for this boolean slice flag value.
+func (s *boolSliceValue) String() string {
+
+ boolStrSlice := make([]string, len(*s.value))
+ for i, b := range *s.value {
+ boolStrSlice[i] = strconv.FormatBool(b)
+ }
+
+ out, _ := writeAsCSV(boolStrSlice)
+
+ return "[" + out + "]"
+}
+
+func boolSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []bool{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]bool, len(ss))
+ for i, t := range ss {
+ var err error
+ out[i], err = strconv.ParseBool(t)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetBoolSlice returns the []bool value of a flag with the given name.
+func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) {
+ val, err := f.getFlagType(name, "boolSlice", boolSliceConv)
+ if err != nil {
+ return []bool{}, err
+ }
+ return val.([]bool), nil
+}
+
+// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+ f.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+ f.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSliceVar defines a []bool flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+ CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+ CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool {
+ p := []bool{}
+ f.BoolSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+ p := []bool{}
+ f.BoolSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func BoolSlice(name string, value []bool, usage string) *[]bool {
+ return CommandLine.BoolSliceP(name, "", value, usage)
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+ return CommandLine.BoolSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go
new file mode 100644
index 000000000..d22be41f2
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/count.go
@@ -0,0 +1,94 @@
+package pflag
+
+import "strconv"
+
+// -- count Value
+type countValue int
+
+func newCountValue(val int, p *int) *countValue {
+ *p = val
+ return (*countValue)(p)
+}
+
+func (i *countValue) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ // -1 means that no specific value was passed, so increment
+ if v == -1 {
+ *i = countValue(*i + 1)
+ } else {
+ *i = countValue(v)
+ }
+ return err
+}
+
+func (i *countValue) Type() string {
+ return "count"
+}
+
+func (i *countValue) String() string { return strconv.Itoa(int(*i)) }
+
+func countConv(sval string) (interface{}, error) {
+ i, err := strconv.Atoi(sval)
+ if err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+// GetCount return the int value of a flag with the given name
+func (f *FlagSet) GetCount(name string) (int, error) {
+ val, err := f.getFlagType(name, "count", countConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int), nil
+}
+
+// CountVar defines a count flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func (f *FlagSet) CountVar(p *int, name string, usage string) {
+ f.CountVarP(p, name, "", usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) {
+ flag := f.VarPF(newCountValue(0, p), name, shorthand, usage)
+ flag.NoOptDefVal = "-1"
+}
+
+// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set
+func CountVar(p *int, name string, usage string) {
+ CommandLine.CountVar(p, name, usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func CountVarP(p *int, name, shorthand string, usage string) {
+ CommandLine.CountVarP(p, name, shorthand, usage)
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func (f *FlagSet) Count(name string, usage string) *int {
+ p := new(int)
+ f.CountVarP(p, name, "", usage)
+ return p
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
+ p := new(int)
+ f.CountVarP(p, name, shorthand, usage)
+ return p
+}
+
+// Count like Count only the flag is placed on the CommandLine isntead of a given flag set
+func Count(name string, usage string) *int {
+ return CommandLine.CountP(name, "", usage)
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func CountP(name, shorthand string, usage string) *int {
+ return CommandLine.CountP(name, shorthand, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go
new file mode 100644
index 000000000..e9debef88
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration.go
@@ -0,0 +1,86 @@
+package pflag
+
+import (
+ "time"
+)
+
+// -- time.Duration Value
+type durationValue time.Duration
+
+func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
+ *p = val
+ return (*durationValue)(p)
+}
+
+func (d *durationValue) Set(s string) error {
+ v, err := time.ParseDuration(s)
+ *d = durationValue(v)
+ return err
+}
+
+func (d *durationValue) Type() string {
+ return "duration"
+}
+
+func (d *durationValue) String() string { return (*time.Duration)(d).String() }
+
+func durationConv(sval string) (interface{}, error) {
+ return time.ParseDuration(sval)
+}
+
+// GetDuration return the duration value of a flag with the given name
+func (f *FlagSet) GetDuration(name string) (time.Duration, error) {
+ val, err := f.getFlagType(name, "duration", durationConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(time.Duration), nil
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ f.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+ f.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ CommandLine.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+ CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ f.DurationVarP(p, name, "", value, usage)
+ return p
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ f.DurationVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func Duration(name string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.DurationP(name, "", value, usage)
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.DurationP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
new file mode 100644
index 000000000..746af6327
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/flag.go
@@ -0,0 +1,1063 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the GNU extensions to the POSIX recommendations
+for command-line options. See
+http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+Usage:
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+ import flag "github.com/ogier/pflag"
+
+ There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+ var ip = flag.Int("flagname", 1234, "help message for flagname")
+If you like, you can bind the flag to a variable using the Var() functions.
+ var flagvar int
+ func init() {
+ flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+ }
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+ flag.Var(&flagVal, "name", "help message for flagname")
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+ flag.Parse()
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+ fmt.Println("ip has value ", *ip)
+ fmt.Println("flagvar has value ", flagvar)
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+ var ip = flag.IntP("flagname", "f", 1234, "help message")
+ var flagvar bool
+ func init() {
+ flag.BoolVarP("boolname", "b", true, "help message")
+ }
+ flag.VarP(&flagVar, "varname", "v", 1234, "help message")
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+Command line flag syntax:
+ --flag // boolean flags only
+ --flag=x
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags.
+ // boolean flags
+ -f
+ -abc
+ // non-boolean flags
+ -n 1234
+ -Ifile
+ // mixed
+ -abcs "hello"
+ -abcn1234
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+The default set of command-line flags is controlled by
+top-level functions. The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+*/
+package pflag
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+)
+
+// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined.
+var ErrHelp = errors.New("pflag: help requested")
+
+// ErrorHandling defines how to handle flag parsing errors.
+type ErrorHandling int
+
+const (
+ // ContinueOnError will return an err from Parse() if an error is found
+ ContinueOnError ErrorHandling = iota
+ // ExitOnError will call os.Exit(2) if an error is found when parsing
+ ExitOnError
+ // PanicOnError will panic() if an error is found when parsing flags
+ PanicOnError
+)
+
+// NormalizedName is a flag name that has been normalized according to rules
+// for the FlagSet (e.g. making '-' and '_' equivalent).
+type NormalizedName string
+
+// A FlagSet represents a set of defined flags.
+type FlagSet struct {
+ // Usage is the function called when an error occurs while parsing flags.
+ // The field is a function (not a method) that may be changed to point to
+ // a custom error handler.
+ Usage func()
+
+ name string
+ parsed bool
+ actual map[NormalizedName]*Flag
+ formal map[NormalizedName]*Flag
+ shorthands map[byte]*Flag
+ args []string // arguments after flags
+ argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no --
+ exitOnError bool // does the program exit if there's an error?
+ errorHandling ErrorHandling
+ output io.Writer // nil means stderr; use out() accessor
+ interspersed bool // allow interspersed option/non-option args
+ normalizeNameFunc func(f *FlagSet, name string) NormalizedName
+}
+
+// A Flag represents the state of a flag.
+type Flag struct {
+ Name string // name as it appears on command line
+ Shorthand string // one-letter abbreviated flag
+ Usage string // help message
+ Value Value // value as set
+ DefValue string // default value (as text); for usage message
+ Changed bool // If the user set the value (or if left to default)
+ NoOptDefVal string //default value (as text); if the flag is on the command line without any options
+ Deprecated string // If this flag is deprecated, this string is the new or now thing to use
+ Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text
+ ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use
+ Annotations map[string][]string // used by cobra.Command bash autocomple code
+}
+
+// Value is the interface to the dynamic value stored in a flag.
+// (The default value is represented as a string.)
+type Value interface {
+ String() string
+ Set(string) error
+ Type() string
+}
+
+// sortFlags returns the flags as a slice in lexicographical sorted order.
+func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
+ list := make(sort.StringSlice, len(flags))
+ i := 0
+ for k := range flags {
+ list[i] = string(k)
+ i++
+ }
+ list.Sort()
+ result := make([]*Flag, len(list))
+ for i, name := range list {
+ result[i] = flags[NormalizedName(name)]
+ }
+ return result
+}
+
+// SetNormalizeFunc allows you to add a function which can translate flag names.
+// Flags added to the FlagSet will be translated and then when anything tries to
+// look up the flag that will also be translated. So it would be possible to create
+// a flag named "getURL" and have it translated to "geturl". A user could then pass
+// "--getUrl" which may also be translated to "geturl" and everything will work.
+func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
+ f.normalizeNameFunc = n
+ for k, v := range f.formal {
+ delete(f.formal, k)
+ nname := f.normalizeFlagName(string(k))
+ f.formal[nname] = v
+ v.Name = string(nname)
+ }
+}
+
+// GetNormalizeFunc returns the previously set NormalizeFunc of a function which
+// does no translation, if not set previously.
+func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName {
+ if f.normalizeNameFunc != nil {
+ return f.normalizeNameFunc
+ }
+ return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) }
+}
+
+func (f *FlagSet) normalizeFlagName(name string) NormalizedName {
+ n := f.GetNormalizeFunc()
+ return n(f, name)
+}
+
+func (f *FlagSet) out() io.Writer {
+ if f.output == nil {
+ return os.Stderr
+ }
+ return f.output
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (f *FlagSet) SetOutput(output io.Writer) {
+ f.output = output
+}
+
+// VisitAll visits the flags in lexicographical order, calling fn for each.
+// It visits all flags, even those not set.
+func (f *FlagSet) VisitAll(fn func(*Flag)) {
+ for _, flag := range sortFlags(f.formal) {
+ fn(flag)
+ }
+}
+
+// HasFlags returns a bool to indicate if the FlagSet has any flags definied.
+func (f *FlagSet) HasFlags() bool {
+ return len(f.formal) > 0
+}
+
+// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags
+// definied that are not hidden or deprecated.
+func (f *FlagSet) HasAvailableFlags() bool {
+ for _, flag := range f.formal {
+ if !flag.Hidden && len(flag.Deprecated) == 0 {
+ return true
+ }
+ }
+ return false
+}
+
+// VisitAll visits the command-line flags in lexicographical order, calling
+// fn for each. It visits all flags, even those not set.
+func VisitAll(fn func(*Flag)) {
+ CommandLine.VisitAll(fn)
+}
+
+// Visit visits the flags in lexicographical order, calling fn for each.
+// It visits only those flags that have been set.
+func (f *FlagSet) Visit(fn func(*Flag)) {
+ for _, flag := range sortFlags(f.actual) {
+ fn(flag)
+ }
+}
+
+// Visit visits the command-line flags in lexicographical order, calling fn
+// for each. It visits only those flags that have been set.
+func Visit(fn func(*Flag)) {
+ CommandLine.Visit(fn)
+}
+
+// Lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) Lookup(name string) *Flag {
+ return f.lookup(f.normalizeFlagName(name))
+}
+
+// lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) lookup(name NormalizedName) *Flag {
+ return f.formal[name]
+}
+
+// func to return a given type for a given flag name
+func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) {
+ flag := f.Lookup(name)
+ if flag == nil {
+ err := fmt.Errorf("flag accessed but not defined: %s", name)
+ return nil, err
+ }
+
+ if flag.Value.Type() != ftype {
+ err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type())
+ return nil, err
+ }
+
+ sval := flag.Value.String()
+ result, err := convFunc(sval)
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+// ArgsLenAtDash will return the length of f.Args at the moment when a -- was
+// found during arg parsing. This allows your program to know which args were
+// before the -- and which came after.
+func (f *FlagSet) ArgsLenAtDash() int {
+ return f.argsLenAtDash
+}
+
+// MarkDeprecated indicated that a flag is deprecated in your program. It will
+// continue to function but will not show up in help or usage messages. Using
+// this flag will also print the given usageMessage.
+func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ if len(usageMessage) == 0 {
+ return fmt.Errorf("deprecated message for flag %q must be set", name)
+ }
+ flag.Deprecated = usageMessage
+ return nil
+}
+
+// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your
+// program. It will continue to function but will not show up in help or usage
+// messages. Using this flag will also print the given usageMessage.
+func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ if len(usageMessage) == 0 {
+ return fmt.Errorf("deprecated message for flag %q must be set", name)
+ }
+ flag.ShorthandDeprecated = usageMessage
+ return nil
+}
+
+// MarkHidden sets a flag to 'hidden' in your program. It will continue to
+// function but will not show up in help or usage messages.
+func (f *FlagSet) MarkHidden(name string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ flag.Hidden = true
+ return nil
+}
+
+// Lookup returns the Flag structure of the named command-line flag,
+// returning nil if none exists.
+func Lookup(name string) *Flag {
+ return CommandLine.Lookup(name)
+}
+
+// Set sets the value of the named flag.
+func (f *FlagSet) Set(name, value string) error {
+ normalName := f.normalizeFlagName(name)
+ flag, ok := f.formal[normalName]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+ err := flag.Value.Set(value)
+ if err != nil {
+ return err
+ }
+ if f.actual == nil {
+ f.actual = make(map[NormalizedName]*Flag)
+ }
+ f.actual[normalName] = flag
+ flag.Changed = true
+ if len(flag.Deprecated) > 0 {
+ fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
+ }
+ return nil
+}
+
+// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet.
+// This is sometimes used by spf13/cobra programs which want to generate additional
+// bash completion information.
+func (f *FlagSet) SetAnnotation(name, key string, values []string) error {
+ normalName := f.normalizeFlagName(name)
+ flag, ok := f.formal[normalName]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+ if flag.Annotations == nil {
+ flag.Annotations = map[string][]string{}
+ }
+ flag.Annotations[key] = values
+ return nil
+}
+
+// Changed returns true if the flag was explicitly set during Parse() and false
+// otherwise
+func (f *FlagSet) Changed(name string) bool {
+ flag := f.Lookup(name)
+ // If a flag doesn't exist, it wasn't changed....
+ if flag == nil {
+ return false
+ }
+ return flag.Changed
+}
+
+// Set sets the value of the named command-line flag.
+func Set(name, value string) error {
+ return CommandLine.Set(name, value)
+}
+
+// PrintDefaults prints, to standard error unless configured
+// otherwise, the default values of all defined flags in the set.
+func (f *FlagSet) PrintDefaults() {
+ usages := f.FlagUsages()
+ fmt.Fprint(f.out(), usages)
+}
+
+// defaultIsZeroValue returns true if the default value for this flag represents
+// a zero value.
+func (f *Flag) defaultIsZeroValue() bool {
+ switch f.Value.(type) {
+ case boolFlag:
+ return f.DefValue == "false"
+ case *durationValue:
+ // Beginning in Go 1.7, duration zero values are "0s"
+ return f.DefValue == "0" || f.DefValue == "0s"
+ case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value:
+ return f.DefValue == "0"
+ case *stringValue:
+ return f.DefValue == ""
+ case *ipValue, *ipMaskValue, *ipNetValue:
+ return f.DefValue == "<nil>"
+ case *intSliceValue, *stringSliceValue, *stringArrayValue:
+ return f.DefValue == "[]"
+ default:
+ switch f.Value.String() {
+ case "false":
+ return true
+ case "<nil>":
+ return true
+ case "":
+ return true
+ case "0":
+ return true
+ }
+ return false
+ }
+}
+
+// UnquoteUsage extracts a back-quoted name from the usage
+// string for a flag and returns it and the un-quoted usage.
+// Given "a `name` to show" it returns ("name", "a name to show").
+// If there are no back quotes, the name is an educated guess of the
+// type of the flag's value, or the empty string if the flag is boolean.
+func UnquoteUsage(flag *Flag) (name string, usage string) {
+ // Look for a back-quoted name, but avoid the strings package.
+ usage = flag.Usage
+ for i := 0; i < len(usage); i++ {
+ if usage[i] == '`' {
+ for j := i + 1; j < len(usage); j++ {
+ if usage[j] == '`' {
+ name = usage[i+1 : j]
+ usage = usage[:i] + name + usage[j+1:]
+ return name, usage
+ }
+ }
+ break // Only one back quote; use type name.
+ }
+ }
+
+ name = flag.Value.Type()
+ switch name {
+ case "bool":
+ name = ""
+ case "float64":
+ name = "float"
+ case "int64":
+ name = "int"
+ case "uint64":
+ name = "uint"
+ }
+
+ return
+}
+
+// Splits the string `s` on whitespace into an initial substring up to
+// `i` runes in length and the remainder. Will go `slop` over `i` if
+// that encompasses the entire string (which allows the caller to
+// avoid short orphan words on the final line).
+func wrapN(i, slop int, s string) (string, string) {
+ if i+slop > len(s) {
+ return s, ""
+ }
+
+ w := strings.LastIndexAny(s[:i], " \t")
+ if w <= 0 {
+ return s, ""
+ }
+
+ return s[:w], s[w+1:]
+}
+
+// Wraps the string `s` to a maximum width `w` with leading indent
+// `i`. The first line is not indented (this is assumed to be done by
+// caller). Pass `w` == 0 to do no wrapping
+func wrap(i, w int, s string) string {
+ if w == 0 {
+ return s
+ }
+
+ // space between indent i and end of line width w into which
+ // we should wrap the text.
+ wrap := w - i
+
+ var r, l string
+
+ // Not enough space for sensible wrapping. Wrap as a block on
+ // the next line instead.
+ if wrap < 24 {
+ i = 16
+ wrap = w - i
+ r += "\n" + strings.Repeat(" ", i)
+ }
+ // If still not enough space then don't even try to wrap.
+ if wrap < 24 {
+ return s
+ }
+
+ // Try to avoid short orphan words on the final line, by
+ // allowing wrapN to go a bit over if that would fit in the
+ // remainder of the line.
+ slop := 5
+ wrap = wrap - slop
+
+ // Handle first line, which is indented by the caller (or the
+ // special case above)
+ l, s = wrapN(wrap, slop, s)
+ r = r + l
+
+ // Now wrap the rest
+ for s != "" {
+ var t string
+
+ t, s = wrapN(wrap, slop, s)
+ r = r + "\n" + strings.Repeat(" ", i) + t
+ }
+
+ return r
+
+}
+
+// FlagUsagesWrapped returns a string containing the usage information
+// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no
+// wrapping)
+func (f *FlagSet) FlagUsagesWrapped(cols int) string {
+ x := new(bytes.Buffer)
+
+ lines := make([]string, 0, len(f.formal))
+
+ maxlen := 0
+ f.VisitAll(func(flag *Flag) {
+ if len(flag.Deprecated) > 0 || flag.Hidden {
+ return
+ }
+
+ line := ""
+ if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 {
+ line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name)
+ } else {
+ line = fmt.Sprintf(" --%s", flag.Name)
+ }
+
+ varname, usage := UnquoteUsage(flag)
+ if len(varname) > 0 {
+ line += " " + varname
+ }
+ if len(flag.NoOptDefVal) > 0 {
+ switch flag.Value.Type() {
+ case "string":
+ line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
+ case "bool":
+ if flag.NoOptDefVal != "true" {
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ default:
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ }
+
+ // This special character will be replaced with spacing once the
+ // correct alignment is calculated
+ line += "\x00"
+ if len(line) > maxlen {
+ maxlen = len(line)
+ }
+
+ line += usage
+ if !flag.defaultIsZeroValue() {
+ if flag.Value.Type() == "string" {
+ line += fmt.Sprintf(" (default \"%s\")", flag.DefValue)
+ } else {
+ line += fmt.Sprintf(" (default %s)", flag.DefValue)
+ }
+ }
+
+ lines = append(lines, line)
+ })
+
+ for _, line := range lines {
+ sidx := strings.Index(line, "\x00")
+ spacing := strings.Repeat(" ", maxlen-sidx)
+ // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx
+ fmt.Fprintln(x, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
+ }
+
+ return x.String()
+}
+
+// FlagUsages returns a string containing the usage information for all flags in
+// the FlagSet
+func (f *FlagSet) FlagUsages() string {
+ return f.FlagUsagesWrapped(0)
+}
+
+// PrintDefaults prints to standard error the default values of all defined command-line flags.
+func PrintDefaults() {
+ CommandLine.PrintDefaults()
+}
+
+// defaultUsage is the default function to print a usage message.
+func defaultUsage(f *FlagSet) {
+ fmt.Fprintf(f.out(), "Usage of %s:\n", f.name)
+ f.PrintDefaults()
+}
+
+// NOTE: Usage is not just defaultUsage(CommandLine)
+// because it serves (via godoc flag Usage) as the example
+// for how to write your own usage function.
+
+// Usage prints to standard error a usage message documenting all defined command-line flags.
+// The function is a variable that may be changed to point to a custom function.
+// By default it prints a simple header and calls PrintDefaults; for details about the
+// format of the output and how to control it, see the documentation for PrintDefaults.
+var Usage = func() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ PrintDefaults()
+}
+
+// NFlag returns the number of flags that have been set.
+func (f *FlagSet) NFlag() int { return len(f.actual) }
+
+// NFlag returns the number of command-line flags that have been set.
+func NFlag() int { return len(CommandLine.actual) }
+
+// Arg returns the i'th argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func (f *FlagSet) Arg(i int) string {
+ if i < 0 || i >= len(f.args) {
+ return ""
+ }
+ return f.args[i]
+}
+
+// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func Arg(i int) string {
+ return CommandLine.Arg(i)
+}
+
+// NArg is the number of arguments remaining after flags have been processed.
+func (f *FlagSet) NArg() int { return len(f.args) }
+
+// NArg is the number of arguments remaining after flags have been processed.
+func NArg() int { return len(CommandLine.args) }
+
+// Args returns the non-flag arguments.
+func (f *FlagSet) Args() []string { return f.args }
+
+// Args returns the non-flag command-line arguments.
+func Args() []string { return CommandLine.args }
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func (f *FlagSet) Var(value Value, name string, usage string) {
+ f.VarP(value, name, "", usage)
+}
+
+// VarPF is like VarP, but returns the flag created
+func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{
+ Name: name,
+ Shorthand: shorthand,
+ Usage: usage,
+ Value: value,
+ DefValue: value.String(),
+ }
+ f.AddFlag(flag)
+ return flag
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
+ f.VarPF(value, name, shorthand, usage)
+}
+
+// AddFlag will add the flag to the FlagSet
+func (f *FlagSet) AddFlag(flag *Flag) {
+ // Call normalizeFlagName function only once
+ normalizedFlagName := f.normalizeFlagName(flag.Name)
+
+ _, alreadythere := f.formal[normalizedFlagName]
+ if alreadythere {
+ msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
+ fmt.Fprintln(f.out(), msg)
+ panic(msg) // Happens only if flags are declared with identical names
+ }
+ if f.formal == nil {
+ f.formal = make(map[NormalizedName]*Flag)
+ }
+
+ flag.Name = string(normalizedFlagName)
+ f.formal[normalizedFlagName] = flag
+
+ if len(flag.Shorthand) == 0 {
+ return
+ }
+ if len(flag.Shorthand) > 1 {
+ fmt.Fprintf(f.out(), "%s shorthand more than ASCII character: %s\n", f.name, flag.Shorthand)
+ panic("shorthand is more than one character")
+ }
+ if f.shorthands == nil {
+ f.shorthands = make(map[byte]*Flag)
+ }
+ c := flag.Shorthand[0]
+ old, alreadythere := f.shorthands[c]
+ if alreadythere {
+ fmt.Fprintf(f.out(), "%s shorthand reused: %q for %s already used for %s\n", f.name, c, flag.Name, old.Name)
+ panic("shorthand redefinition")
+ }
+ f.shorthands[c] = flag
+}
+
+// AddFlagSet adds one FlagSet to another. If a flag is already present in f
+// the flag from newSet will be ignored
+func (f *FlagSet) AddFlagSet(newSet *FlagSet) {
+ if newSet == nil {
+ return
+ }
+ newSet.VisitAll(func(flag *Flag) {
+ if f.Lookup(flag.Name) == nil {
+ f.AddFlag(flag)
+ }
+ })
+}
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func Var(value Value, name string, usage string) {
+ CommandLine.VarP(value, name, "", usage)
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func VarP(value Value, name, shorthand, usage string) {
+ CommandLine.VarP(value, name, shorthand, usage)
+}
+
+// failf prints to standard error a formatted error and usage message and
+// returns the error.
+func (f *FlagSet) failf(format string, a ...interface{}) error {
+ err := fmt.Errorf(format, a...)
+ fmt.Fprintln(f.out(), err)
+ f.usage()
+ return err
+}
+
+// usage calls the Usage method for the flag set, or the usage function if
+// the flag set is CommandLine.
+func (f *FlagSet) usage() {
+ if f == CommandLine {
+ Usage()
+ } else if f.Usage == nil {
+ defaultUsage(f)
+ } else {
+ f.Usage()
+ }
+}
+
+func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error {
+ if err := flag.Value.Set(value); err != nil {
+ return f.failf("invalid argument %q for %s: %v", value, origArg, err)
+ }
+ // mark as visited for Visit()
+ if f.actual == nil {
+ f.actual = make(map[NormalizedName]*Flag)
+ }
+ f.actual[f.normalizeFlagName(flag.Name)] = flag
+ flag.Changed = true
+ if len(flag.Deprecated) > 0 {
+ fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
+ }
+ if len(flag.ShorthandDeprecated) > 0 && containsShorthand(origArg, flag.Shorthand) {
+ fmt.Fprintf(os.Stderr, "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
+ }
+ return nil
+}
+
+func containsShorthand(arg, shorthand string) bool {
+ // filter out flags --<flag_name>
+ if strings.HasPrefix(arg, "-") {
+ return false
+ }
+ arg = strings.SplitN(arg, "=", 2)[0]
+ return strings.Contains(arg, shorthand)
+}
+
+func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
+ a = args
+ name := s[2:]
+ if len(name) == 0 || name[0] == '-' || name[0] == '=' {
+ err = f.failf("bad flag syntax: %s", s)
+ return
+ }
+ split := strings.SplitN(name, "=", 2)
+ name = split[0]
+ flag, alreadythere := f.formal[f.normalizeFlagName(name)]
+ if !alreadythere {
+ if name == "help" { // special case for nice help message.
+ f.usage()
+ return a, ErrHelp
+ }
+ err = f.failf("unknown flag: --%s", name)
+ return
+ }
+ var value string
+ if len(split) == 2 {
+ // '--flag=arg'
+ value = split[1]
+ } else if len(flag.NoOptDefVal) > 0 {
+ // '--flag' (arg was optional)
+ value = flag.NoOptDefVal
+ } else if len(a) > 0 {
+ // '--flag arg'
+ value = a[0]
+ a = a[1:]
+ } else {
+ // '--flag' (arg was required)
+ err = f.failf("flag needs an argument: %s", s)
+ return
+ }
+ err = fn(flag, value, s)
+ return
+}
+
+func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
+ if strings.HasPrefix(shorthands, "test.") {
+ return
+ }
+ outArgs = args
+ outShorts = shorthands[1:]
+ c := shorthands[0]
+
+ flag, alreadythere := f.shorthands[c]
+ if !alreadythere {
+ if c == 'h' { // special case for nice help message.
+ f.usage()
+ err = ErrHelp
+ return
+ }
+ //TODO continue on error
+ err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
+ return
+ }
+ var value string
+ if len(shorthands) > 2 && shorthands[1] == '=' {
+ value = shorthands[2:]
+ outShorts = ""
+ } else if len(flag.NoOptDefVal) > 0 {
+ value = flag.NoOptDefVal
+ } else if len(shorthands) > 1 {
+ value = shorthands[1:]
+ outShorts = ""
+ } else if len(args) > 0 {
+ value = args[0]
+ outArgs = args[1:]
+ } else {
+ err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
+ return
+ }
+ err = fn(flag, value, shorthands)
+ return
+}
+
+func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) {
+ a = args
+ shorthands := s[1:]
+
+ for len(shorthands) > 0 {
+ shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn)
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) {
+ for len(args) > 0 {
+ s := args[0]
+ args = args[1:]
+ if len(s) == 0 || s[0] != '-' || len(s) == 1 {
+ if !f.interspersed {
+ f.args = append(f.args, s)
+ f.args = append(f.args, args...)
+ return nil
+ }
+ f.args = append(f.args, s)
+ continue
+ }
+
+ if s[1] == '-' {
+ if len(s) == 2 { // "--" terminates the flags
+ f.argsLenAtDash = len(f.args)
+ f.args = append(f.args, args...)
+ break
+ }
+ args, err = f.parseLongArg(s, args, fn)
+ } else {
+ args, err = f.parseShortArg(s, args, fn)
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// Parse parses flag definitions from the argument list, which should not
+// include the command name. Must be called after all flags in the FlagSet
+// are defined and before flags are accessed by the program.
+// The return value will be ErrHelp if -help was set but not defined.
+func (f *FlagSet) Parse(arguments []string) error {
+ f.parsed = true
+ f.args = make([]string, 0, len(arguments))
+
+ assign := func(flag *Flag, value, origArg string) error {
+ return f.setFlag(flag, value, origArg)
+ }
+
+ err := f.parseArgs(arguments, assign)
+ if err != nil {
+ switch f.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+type parseFunc func(flag *Flag, value, origArg string) error
+
+// ParseAll parses flag definitions from the argument list, which should not
+// include the command name. The arguments for fn are flag and value. Must be
+// called after all flags in the FlagSet are defined and before flags are
+// accessed by the program. The return value will be ErrHelp if -help was set
+// but not defined.
+func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error {
+ f.parsed = true
+ f.args = make([]string, 0, len(arguments))
+
+ assign := func(flag *Flag, value, origArg string) error {
+ return fn(flag, value)
+ }
+
+ err := f.parseArgs(arguments, assign)
+ if err != nil {
+ switch f.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+// Parsed reports whether f.Parse has been called.
+func (f *FlagSet) Parsed() bool {
+ return f.parsed
+}
+
+// Parse parses the command-line flags from os.Args[1:]. Must be called
+// after all flags are defined and before flags are accessed by the program.
+func Parse() {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.Parse(os.Args[1:])
+}
+
+// ParseAll parses the command-line flags from os.Args[1:] and called fn for each.
+// The arguments for fn are flag and value. Must be called after all flags are
+// defined and before flags are accessed by the program.
+func ParseAll(fn func(flag *Flag, value string) error) {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.ParseAll(os.Args[1:], fn)
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func SetInterspersed(interspersed bool) {
+ CommandLine.SetInterspersed(interspersed)
+}
+
+// Parsed returns true if the command-line flags have been parsed.
+func Parsed() bool {
+ return CommandLine.Parsed()
+}
+
+// CommandLine is the default set of command-line flags, parsed from os.Args.
+var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
+
+// NewFlagSet returns a new, empty flag set with the specified name and
+// error handling property.
+func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
+ f := &FlagSet{
+ name: name,
+ errorHandling: errorHandling,
+ argsLenAtDash: -1,
+ interspersed: true,
+ }
+ return f
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func (f *FlagSet) SetInterspersed(interspersed bool) {
+ f.interspersed = interspersed
+}
+
+// Init sets the name and error handling property for a flag set.
+// By default, the zero FlagSet uses an empty name and the
+// ContinueOnError error handling policy.
+func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
+ f.name = name
+ f.errorHandling = errorHandling
+ f.argsLenAtDash = -1
+}
diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go
new file mode 100644
index 000000000..a243f81f7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- float32 Value
+type float32Value float32
+
+func newFloat32Value(val float32, p *float32) *float32Value {
+ *p = val
+ return (*float32Value)(p)
+}
+
+func (f *float32Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 32)
+ *f = float32Value(v)
+ return err
+}
+
+func (f *float32Value) Type() string {
+ return "float32"
+}
+
+func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) }
+
+func float32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseFloat(sval, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(v), nil
+}
+
+// GetFloat32 return the float32 value of a flag with the given name
+func (f *FlagSet) GetFloat32(name string) (float32, error) {
+ val, err := f.getFlagType(name, "float32", float32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(float32), nil
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) {
+ f.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+ f.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func Float32Var(p *float32, name string, value float32, usage string) {
+ CommandLine.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+ CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func (f *FlagSet) Float32(name string, value float32, usage string) *float32 {
+ p := new(float32)
+ f.Float32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 {
+ p := new(float32)
+ f.Float32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func Float32(name string, value float32, usage string) *float32 {
+ return CommandLine.Float32P(name, "", value, usage)
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func Float32P(name, shorthand string, value float32, usage string) *float32 {
+ return CommandLine.Float32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go
new file mode 100644
index 000000000..04b5492a7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- float64 Value
+type float64Value float64
+
+func newFloat64Value(val float64, p *float64) *float64Value {
+ *p = val
+ return (*float64Value)(p)
+}
+
+func (f *float64Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 64)
+ *f = float64Value(v)
+ return err
+}
+
+func (f *float64Value) Type() string {
+ return "float64"
+}
+
+func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) }
+
+func float64Conv(sval string) (interface{}, error) {
+ return strconv.ParseFloat(sval, 64)
+}
+
+// GetFloat64 return the float64 value of a flag with the given name
+func (f *FlagSet) GetFloat64(name string) (float64, error) {
+ val, err := f.getFlagType(name, "float64", float64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(float64), nil
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
+ f.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+ f.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func Float64Var(p *float64, name string, value float64, usage string) {
+ CommandLine.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+ CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
+ p := new(float64)
+ f.Float64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 {
+ p := new(float64)
+ f.Float64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func Float64(name string, value float64, usage string) *float64 {
+ return CommandLine.Float64P(name, "", value, usage)
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func Float64P(name, shorthand string, value float64, usage string) *float64 {
+ return CommandLine.Float64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go
new file mode 100644
index 000000000..c4f47ebe5
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/golangflag.go
@@ -0,0 +1,101 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+ goflag "flag"
+ "reflect"
+ "strings"
+)
+
+// flagValueWrapper implements pflag.Value around a flag.Value. The main
+// difference here is the addition of the Type method that returns a string
+// name of the type. As this is generally unknown, we approximate that with
+// reflection.
+type flagValueWrapper struct {
+ inner goflag.Value
+ flagType string
+}
+
+// We are just copying the boolFlag interface out of goflag as that is what
+// they use to decide if a flag should get "true" when no arg is given.
+type goBoolFlag interface {
+ goflag.Value
+ IsBoolFlag() bool
+}
+
+func wrapFlagValue(v goflag.Value) Value {
+ // If the flag.Value happens to also be a pflag.Value, just use it directly.
+ if pv, ok := v.(Value); ok {
+ return pv
+ }
+
+ pv := &flagValueWrapper{
+ inner: v,
+ }
+
+ t := reflect.TypeOf(v)
+ if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+
+ pv.flagType = strings.TrimSuffix(t.Name(), "Value")
+ return pv
+}
+
+func (v *flagValueWrapper) String() string {
+ return v.inner.String()
+}
+
+func (v *flagValueWrapper) Set(s string) error {
+ return v.inner.Set(s)
+}
+
+func (v *flagValueWrapper) Type() string {
+ return v.flagType
+}
+
+// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag
+// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei
+// with both `-v` and `--v` in flags. If the golang flag was more than a single
+// character (ex: `verbose`) it will only be accessible via `--verbose`
+func PFlagFromGoFlag(goflag *goflag.Flag) *Flag {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{
+ Name: goflag.Name,
+ Usage: goflag.Usage,
+ Value: wrapFlagValue(goflag.Value),
+ // Looks like golang flags don't set DefValue correctly :-(
+ //DefValue: goflag.DefValue,
+ DefValue: goflag.Value.String(),
+ }
+ // Ex: if the golang flag was -v, allow both -v and --v to work
+ if len(flag.Name) == 1 {
+ flag.Shorthand = flag.Name
+ }
+ if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() {
+ flag.NoOptDefVal = "true"
+ }
+ return flag
+}
+
+// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet
+func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) {
+ if f.Lookup(goflag.Name) != nil {
+ return
+ }
+ newflag := PFlagFromGoFlag(goflag)
+ f.AddFlag(newflag)
+}
+
+// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet
+func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) {
+ if newSet == nil {
+ return
+ }
+ newSet.VisitAll(func(goflag *goflag.Flag) {
+ f.AddGoFlag(goflag)
+ })
+}
diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go
new file mode 100644
index 000000000..1474b89df
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int Value
+type intValue int
+
+func newIntValue(val int, p *int) *intValue {
+ *p = val
+ return (*intValue)(p)
+}
+
+func (i *intValue) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = intValue(v)
+ return err
+}
+
+func (i *intValue) Type() string {
+ return "int"
+}
+
+func (i *intValue) String() string { return strconv.Itoa(int(*i)) }
+
+func intConv(sval string) (interface{}, error) {
+ return strconv.Atoi(sval)
+}
+
+// GetInt return the int value of a flag with the given name
+func (f *FlagSet) GetInt(name string) (int, error) {
+ val, err := f.getFlagType(name, "int", intConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int), nil
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
+ f.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) {
+ f.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func IntVar(p *int, name string, value int, usage string) {
+ CommandLine.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func IntVarP(p *int, name, shorthand string, value int, usage string) {
+ CommandLine.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func (f *FlagSet) Int(name string, value int, usage string) *int {
+ p := new(int)
+ f.IntVarP(p, name, "", value, usage)
+ return p
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int {
+ p := new(int)
+ f.IntVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func Int(name string, value int, usage string) *int {
+ return CommandLine.IntP(name, "", value, usage)
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func IntP(name, shorthand string, value int, usage string) *int {
+ return CommandLine.IntP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go
new file mode 100644
index 000000000..9b95944f0
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int32 Value
+type int32Value int32
+
+func newInt32Value(val int32, p *int32) *int32Value {
+ *p = val
+ return (*int32Value)(p)
+}
+
+func (i *int32Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 32)
+ *i = int32Value(v)
+ return err
+}
+
+func (i *int32Value) Type() string {
+ return "int32"
+}
+
+func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(v), nil
+}
+
+// GetInt32 return the int32 value of a flag with the given name
+func (f *FlagSet) GetInt32(name string) (int32, error) {
+ val, err := f.getFlagType(name, "int32", int32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int32), nil
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) {
+ f.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+ f.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func Int32Var(p *int32, name string, value int32, usage string) {
+ CommandLine.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+ CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func (f *FlagSet) Int32(name string, value int32, usage string) *int32 {
+ p := new(int32)
+ f.Int32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 {
+ p := new(int32)
+ f.Int32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func Int32(name string, value int32, usage string) *int32 {
+ return CommandLine.Int32P(name, "", value, usage)
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func Int32P(name, shorthand string, value int32, usage string) *int32 {
+ return CommandLine.Int32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go
new file mode 100644
index 000000000..0026d781d
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int64 Value
+type int64Value int64
+
+func newInt64Value(val int64, p *int64) *int64Value {
+ *p = val
+ return (*int64Value)(p)
+}
+
+func (i *int64Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = int64Value(v)
+ return err
+}
+
+func (i *int64Value) Type() string {
+ return "int64"
+}
+
+func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int64Conv(sval string) (interface{}, error) {
+ return strconv.ParseInt(sval, 0, 64)
+}
+
+// GetInt64 return the int64 value of a flag with the given name
+func (f *FlagSet) GetInt64(name string) (int64, error) {
+ val, err := f.getFlagType(name, "int64", int64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int64), nil
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
+ f.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+ f.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func Int64Var(p *int64, name string, value int64, usage string) {
+ CommandLine.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+ CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
+ p := new(int64)
+ f.Int64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 {
+ p := new(int64)
+ f.Int64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func Int64(name string, value int64, usage string) *int64 {
+ return CommandLine.Int64P(name, "", value, usage)
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func Int64P(name, shorthand string, value int64, usage string) *int64 {
+ return CommandLine.Int64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go
new file mode 100644
index 000000000..4da92228e
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int8 Value
+type int8Value int8
+
+func newInt8Value(val int8, p *int8) *int8Value {
+ *p = val
+ return (*int8Value)(p)
+}
+
+func (i *int8Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 8)
+ *i = int8Value(v)
+ return err
+}
+
+func (i *int8Value) Type() string {
+ return "int8"
+}
+
+func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int8Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 8)
+ if err != nil {
+ return 0, err
+ }
+ return int8(v), nil
+}
+
+// GetInt8 return the int8 value of a flag with the given name
+func (f *FlagSet) GetInt8(name string) (int8, error) {
+ val, err := f.getFlagType(name, "int8", int8Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int8), nil
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) {
+ f.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+ f.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func Int8Var(p *int8, name string, value int8, usage string) {
+ CommandLine.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+ CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func (f *FlagSet) Int8(name string, value int8, usage string) *int8 {
+ p := new(int8)
+ f.Int8VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 {
+ p := new(int8)
+ f.Int8VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func Int8(name string, value int8, usage string) *int8 {
+ return CommandLine.Int8P(name, "", value, usage)
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func Int8P(name, shorthand string, value int8, usage string) *int8 {
+ return CommandLine.Int8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go
new file mode 100644
index 000000000..1e7c9edde
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int_slice.go
@@ -0,0 +1,128 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- intSlice Value
+type intSliceValue struct {
+ value *[]int
+ changed bool
+}
+
+func newIntSliceValue(val []int, p *[]int) *intSliceValue {
+ isv := new(intSliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *intSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *intSliceValue) Type() string {
+ return "intSlice"
+}
+
+func (s *intSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func intSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetIntSlice return the []int value of a flag with the given name
+func (f *FlagSet) GetIntSlice(name string) ([]int, error) {
+ val, err := f.getFlagType(name, "intSlice", intSliceConv)
+ if err != nil {
+ return []int{}, err
+ }
+ return val.([]int), nil
+}
+
+// IntSliceVar defines a intSlice flag with specified name, default value, and usage string.
+// The argument p points to a []int variable in which to store the value of the flag.
+func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) {
+ f.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+ f.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSliceVar defines a int[] flag with specified name, default value, and usage string.
+// The argument p points to a int[] variable in which to store the value of the flag.
+func IntSliceVar(p *[]int, name string, value []int, usage string) {
+ CommandLine.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+ CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int {
+ p := []int{}
+ f.IntSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+ p := []int{}
+ f.IntSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func IntSlice(name string, value []int, usage string) *[]int {
+ return CommandLine.IntSliceP(name, "", value, usage)
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+ return CommandLine.IntSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go
new file mode 100644
index 000000000..3d414ba69
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip.go
@@ -0,0 +1,94 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// -- net.IP value
+type ipValue net.IP
+
+func newIPValue(val net.IP, p *net.IP) *ipValue {
+ *p = val
+ return (*ipValue)(p)
+}
+
+func (i *ipValue) String() string { return net.IP(*i).String() }
+func (i *ipValue) Set(s string) error {
+ ip := net.ParseIP(strings.TrimSpace(s))
+ if ip == nil {
+ return fmt.Errorf("failed to parse IP: %q", s)
+ }
+ *i = ipValue(ip)
+ return nil
+}
+
+func (i *ipValue) Type() string {
+ return "ip"
+}
+
+func ipConv(sval string) (interface{}, error) {
+ ip := net.ParseIP(sval)
+ if ip != nil {
+ return ip, nil
+ }
+ return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+}
+
+// GetIP return the net.IP value of a flag with the given name
+func (f *FlagSet) GetIP(name string) (net.IP, error) {
+ val, err := f.getFlagType(name, "ip", ipConv)
+ if err != nil {
+ return nil, err
+ }
+ return val.(net.IP), nil
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) {
+ f.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+ f.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func IPVar(p *net.IP, name string, value net.IP, usage string) {
+ CommandLine.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+ CommandLine.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP {
+ p := new(net.IP)
+ f.IPVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+ p := new(net.IP)
+ f.IPVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func IP(name string, value net.IP, usage string) *net.IP {
+ return CommandLine.IPP(name, "", value, usage)
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+ return CommandLine.IPP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go
new file mode 100644
index 000000000..7dd196fe3
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip_slice.go
@@ -0,0 +1,148 @@
+package pflag
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "strings"
+)
+
+// -- ipSlice Value
+type ipSliceValue struct {
+ value *[]net.IP
+ changed bool
+}
+
+func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue {
+ ipsv := new(ipSliceValue)
+ ipsv.value = p
+ *ipsv.value = val
+ return ipsv
+}
+
+// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag.
+// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended.
+func (s *ipSliceValue) Set(val string) error {
+
+ // remove all quote characters
+ rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+ // read flag arguments with CSV parser
+ ipStrSlice, err := readAsCSV(rmQuote.Replace(val))
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ // parse ip values into slice
+ out := make([]net.IP, 0, len(ipStrSlice))
+ for _, ipStr := range ipStrSlice {
+ ip := net.ParseIP(strings.TrimSpace(ipStr))
+ if ip == nil {
+ return fmt.Errorf("invalid string being converted to IP address: %s", ipStr)
+ }
+ out = append(out, ip)
+ }
+
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+
+ s.changed = true
+
+ return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *ipSliceValue) Type() string {
+ return "ipSlice"
+}
+
+// String defines a "native" format for this net.IP slice flag value.
+func (s *ipSliceValue) String() string {
+
+ ipStrSlice := make([]string, len(*s.value))
+ for i, ip := range *s.value {
+ ipStrSlice[i] = ip.String()
+ }
+
+ out, _ := writeAsCSV(ipStrSlice)
+
+ return "[" + out + "]"
+}
+
+func ipSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Emtpy string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []net.IP{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]net.IP, len(ss))
+ for i, sval := range ss {
+ ip := net.ParseIP(strings.TrimSpace(sval))
+ if ip == nil {
+ return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+ }
+ out[i] = ip
+ }
+ return out, nil
+}
+
+// GetIPSlice returns the []net.IP value of a flag with the given name
+func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) {
+ val, err := f.getFlagType(name, "ipSlice", ipSliceConv)
+ if err != nil {
+ return []net.IP{}, err
+ }
+ return val.([]net.IP), nil
+}
+
+// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+ f.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+ f.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+ CommandLine.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+ CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of that flag.
+func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+ p := []net.IP{}
+ f.IPSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+ p := []net.IP{}
+ f.IPSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of the flag.
+func IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+ return CommandLine.IPSliceP(name, "", value, usage)
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+ return CommandLine.IPSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go
new file mode 100644
index 000000000..5bd44bd21
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipmask.go
@@ -0,0 +1,122 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+)
+
+// -- net.IPMask value
+type ipMaskValue net.IPMask
+
+func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue {
+ *p = val
+ return (*ipMaskValue)(p)
+}
+
+func (i *ipMaskValue) String() string { return net.IPMask(*i).String() }
+func (i *ipMaskValue) Set(s string) error {
+ ip := ParseIPv4Mask(s)
+ if ip == nil {
+ return fmt.Errorf("failed to parse IP mask: %q", s)
+ }
+ *i = ipMaskValue(ip)
+ return nil
+}
+
+func (i *ipMaskValue) Type() string {
+ return "ipMask"
+}
+
+// ParseIPv4Mask written in IP form (e.g. 255.255.255.0).
+// This function should really belong to the net package.
+func ParseIPv4Mask(s string) net.IPMask {
+ mask := net.ParseIP(s)
+ if mask == nil {
+ if len(s) != 8 {
+ return nil
+ }
+ // net.IPMask.String() actually outputs things like ffffff00
+ // so write a horrible parser for that as well :-(
+ m := []int{}
+ for i := 0; i < 4; i++ {
+ b := "0x" + s[2*i:2*i+2]
+ d, err := strconv.ParseInt(b, 0, 0)
+ if err != nil {
+ return nil
+ }
+ m = append(m, int(d))
+ }
+ s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3])
+ mask = net.ParseIP(s)
+ if mask == nil {
+ return nil
+ }
+ }
+ return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15])
+}
+
+func parseIPv4Mask(sval string) (interface{}, error) {
+ mask := ParseIPv4Mask(sval)
+ if mask == nil {
+ return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval)
+ }
+ return mask, nil
+}
+
+// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name
+func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) {
+ val, err := f.getFlagType(name, "ipMask", parseIPv4Mask)
+ if err != nil {
+ return nil, err
+ }
+ return val.(net.IPMask), nil
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+ f.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+ f.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+ CommandLine.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+ CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+ p := new(net.IPMask)
+ f.IPMaskVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+ p := new(net.IPMask)
+ f.IPMaskVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+ return CommandLine.IPMaskP(name, "", value, usage)
+}
+
+// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+ return CommandLine.IPMaskP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go
new file mode 100644
index 000000000..e2c1b8bcd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipnet.go
@@ -0,0 +1,98 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// IPNet adapts net.IPNet for use as a flag.
+type ipNetValue net.IPNet
+
+func (ipnet ipNetValue) String() string {
+ n := net.IPNet(ipnet)
+ return n.String()
+}
+
+func (ipnet *ipNetValue) Set(value string) error {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(value))
+ if err != nil {
+ return err
+ }
+ *ipnet = ipNetValue(*n)
+ return nil
+}
+
+func (*ipNetValue) Type() string {
+ return "ipNet"
+}
+
+func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue {
+ *p = val
+ return (*ipNetValue)(p)
+}
+
+func ipNetConv(sval string) (interface{}, error) {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(sval))
+ if err == nil {
+ return *n, nil
+ }
+ return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval)
+}
+
+// GetIPNet return the net.IPNet value of a flag with the given name
+func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) {
+ val, err := f.getFlagType(name, "ipNet", ipNetConv)
+ if err != nil {
+ return net.IPNet{}, err
+ }
+ return val.(net.IPNet), nil
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+ f.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+ f.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+ p := new(net.IPNet)
+ f.IPNetVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+ p := new(net.IPNet)
+ f.IPNetVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+ return CommandLine.IPNetP(name, "", value, usage)
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+ return CommandLine.IPNetP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go
new file mode 100644
index 000000000..04e0a26ff
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string.go
@@ -0,0 +1,80 @@
+package pflag
+
+// -- string Value
+type stringValue string
+
+func newStringValue(val string, p *string) *stringValue {
+ *p = val
+ return (*stringValue)(p)
+}
+
+func (s *stringValue) Set(val string) error {
+ *s = stringValue(val)
+ return nil
+}
+func (s *stringValue) Type() string {
+ return "string"
+}
+
+func (s *stringValue) String() string { return string(*s) }
+
+func stringConv(sval string) (interface{}, error) {
+ return sval, nil
+}
+
+// GetString return the string value of a flag with the given name
+func (f *FlagSet) GetString(name string) (string, error) {
+ val, err := f.getFlagType(name, "string", stringConv)
+ if err != nil {
+ return "", err
+ }
+ return val.(string), nil
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
+ f.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) {
+ f.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func StringVar(p *string, name string, value string, usage string) {
+ CommandLine.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringVarP(p *string, name, shorthand string, value string, usage string) {
+ CommandLine.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func (f *FlagSet) String(name string, value string, usage string) *string {
+ p := new(string)
+ f.StringVarP(p, name, "", value, usage)
+ return p
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string {
+ p := new(string)
+ f.StringVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func String(name string, value string, usage string) *string {
+ return CommandLine.StringP(name, "", value, usage)
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func StringP(name, shorthand string, value string, usage string) *string {
+ return CommandLine.StringP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go
new file mode 100644
index 000000000..276b7ed49
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_array.go
@@ -0,0 +1,103 @@
+package pflag
+
+// -- stringArray Value
+type stringArrayValue struct {
+ value *[]string
+ changed bool
+}
+
+func newStringArrayValue(val []string, p *[]string) *stringArrayValue {
+ ssv := new(stringArrayValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+func (s *stringArrayValue) Set(val string) error {
+ if !s.changed {
+ *s.value = []string{val}
+ s.changed = true
+ } else {
+ *s.value = append(*s.value, val)
+ }
+ return nil
+}
+
+func (s *stringArrayValue) Type() string {
+ return "stringArray"
+}
+
+func (s *stringArrayValue) String() string {
+ str, _ := writeAsCSV(*s.value)
+ return "[" + str + "]"
+}
+
+func stringArrayConv(sval string) (interface{}, error) {
+ sval = sval[1 : len(sval)-1]
+ // An empty string would cause a array with one (empty) string
+ if len(sval) == 0 {
+ return []string{}, nil
+ }
+ return readAsCSV(sval)
+}
+
+// GetStringArray return the []string value of a flag with the given name
+func (f *FlagSet) GetStringArray(name string) ([]string, error) {
+ val, err := f.getFlagType(name, "stringArray", stringArrayConv)
+ if err != nil {
+ return []string{}, err
+ }
+ return val.([]string), nil
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) {
+ f.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ f.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringArrayVar(p *[]string, name string, value []string, usage string) {
+ CommandLine.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringArrayVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringArrayVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringArray(name string, value []string, usage string) *[]string {
+ return CommandLine.StringArrayP(name, "", value, usage)
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+ return CommandLine.StringArrayP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go
new file mode 100644
index 000000000..05eee7543
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_slice.go
@@ -0,0 +1,129 @@
+package pflag
+
+import (
+ "bytes"
+ "encoding/csv"
+ "strings"
+)
+
+// -- stringSlice Value
+type stringSliceValue struct {
+ value *[]string
+ changed bool
+}
+
+func newStringSliceValue(val []string, p *[]string) *stringSliceValue {
+ ssv := new(stringSliceValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+func readAsCSV(val string) ([]string, error) {
+ if val == "" {
+ return []string{}, nil
+ }
+ stringReader := strings.NewReader(val)
+ csvReader := csv.NewReader(stringReader)
+ return csvReader.Read()
+}
+
+func writeAsCSV(vals []string) (string, error) {
+ b := &bytes.Buffer{}
+ w := csv.NewWriter(b)
+ err := w.Write(vals)
+ if err != nil {
+ return "", err
+ }
+ w.Flush()
+ return strings.TrimSuffix(b.String(), "\n"), nil
+}
+
+func (s *stringSliceValue) Set(val string) error {
+ v, err := readAsCSV(val)
+ if err != nil {
+ return err
+ }
+ if !s.changed {
+ *s.value = v
+ } else {
+ *s.value = append(*s.value, v...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringSliceValue) Type() string {
+ return "stringSlice"
+}
+
+func (s *stringSliceValue) String() string {
+ str, _ := writeAsCSV(*s.value)
+ return "[" + str + "]"
+}
+
+func stringSliceConv(sval string) (interface{}, error) {
+ sval = sval[1 : len(sval)-1]
+ // An empty string would cause a slice with one (empty) string
+ if len(sval) == 0 {
+ return []string{}, nil
+ }
+ return readAsCSV(sval)
+}
+
+// GetStringSlice return the []string value of a flag with the given name
+func (f *FlagSet) GetStringSlice(name string) ([]string, error) {
+ val, err := f.getFlagType(name, "stringSlice", stringSliceConv)
+ if err != nil {
+ return []string{}, err
+ }
+ return val.([]string), nil
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) {
+ f.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ f.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+func StringSliceVar(p *[]string, name string, value []string, usage string) {
+ CommandLine.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+func StringSlice(name string, value []string, usage string) *[]string {
+ return CommandLine.StringSliceP(name, "", value, usage)
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+ return CommandLine.StringSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go
new file mode 100644
index 000000000..dcbc2b758
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint Value
+type uintValue uint
+
+func newUintValue(val uint, p *uint) *uintValue {
+ *p = val
+ return (*uintValue)(p)
+}
+
+func (i *uintValue) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uintValue(v)
+ return err
+}
+
+func (i *uintValue) Type() string {
+ return "uint"
+}
+
+func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uintConv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 0)
+ if err != nil {
+ return 0, err
+ }
+ return uint(v), nil
+}
+
+// GetUint return the uint value of a flag with the given name
+func (f *FlagSet) GetUint(name string) (uint, error) {
+ val, err := f.getFlagType(name, "uint", uintConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint), nil
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
+ f.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+ f.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func UintVar(p *uint, name string, value uint, usage string) {
+ CommandLine.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+ CommandLine.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
+ p := new(uint)
+ f.UintVarP(p, name, "", value, usage)
+ return p
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint {
+ p := new(uint)
+ f.UintVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint(name string, value uint, usage string) *uint {
+ return CommandLine.UintP(name, "", value, usage)
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func UintP(name, shorthand string, value uint, usage string) *uint {
+ return CommandLine.UintP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go
new file mode 100644
index 000000000..7e9914edd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint16 value
+type uint16Value uint16
+
+func newUint16Value(val uint16, p *uint16) *uint16Value {
+ *p = val
+ return (*uint16Value)(p)
+}
+
+func (i *uint16Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 16)
+ *i = uint16Value(v)
+ return err
+}
+
+func (i *uint16Value) Type() string {
+ return "uint16"
+}
+
+func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint16Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 16)
+ if err != nil {
+ return 0, err
+ }
+ return uint16(v), nil
+}
+
+// GetUint16 return the uint16 value of a flag with the given name
+func (f *FlagSet) GetUint16(name string) (uint16, error) {
+ val, err := f.getFlagType(name, "uint16", uint16Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint16), nil
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) {
+ f.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+ f.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func Uint16Var(p *uint16, name string, value uint16, usage string) {
+ CommandLine.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+ CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 {
+ p := new(uint16)
+ f.Uint16VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+ p := new(uint16)
+ f.Uint16VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint16(name string, value uint16, usage string) *uint16 {
+ return CommandLine.Uint16P(name, "", value, usage)
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+ return CommandLine.Uint16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go
new file mode 100644
index 000000000..d8024539b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint32 value
+type uint32Value uint32
+
+func newUint32Value(val uint32, p *uint32) *uint32Value {
+ *p = val
+ return (*uint32Value)(p)
+}
+
+func (i *uint32Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 32)
+ *i = uint32Value(v)
+ return err
+}
+
+func (i *uint32Value) Type() string {
+ return "uint32"
+}
+
+func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return uint32(v), nil
+}
+
+// GetUint32 return the uint32 value of a flag with the given name
+func (f *FlagSet) GetUint32(name string) (uint32, error) {
+ val, err := f.getFlagType(name, "uint32", uint32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint32), nil
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) {
+ f.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+ f.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func Uint32Var(p *uint32, name string, value uint32, usage string) {
+ CommandLine.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+ CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32 variable that stores the value of the flag.
+func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 {
+ p := new(uint32)
+ f.Uint32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+ p := new(uint32)
+ f.Uint32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32 variable that stores the value of the flag.
+func Uint32(name string, value uint32, usage string) *uint32 {
+ return CommandLine.Uint32P(name, "", value, usage)
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+ return CommandLine.Uint32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go
new file mode 100644
index 000000000..f62240f2c
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint64.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint64 Value
+type uint64Value uint64
+
+func newUint64Value(val uint64, p *uint64) *uint64Value {
+ *p = val
+ return (*uint64Value)(p)
+}
+
+func (i *uint64Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uint64Value(v)
+ return err
+}
+
+func (i *uint64Value) Type() string {
+ return "uint64"
+}
+
+func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint64Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 64)
+ if err != nil {
+ return 0, err
+ }
+ return uint64(v), nil
+}
+
+// GetUint64 return the uint64 value of a flag with the given name
+func (f *FlagSet) GetUint64(name string) (uint64, error) {
+ val, err := f.getFlagType(name, "uint64", uint64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint64), nil
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
+ f.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+ f.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func Uint64Var(p *uint64, name string, value uint64, usage string) {
+ CommandLine.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+ CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ f.Uint64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ f.Uint64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func Uint64(name string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64P(name, "", value, usage)
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go
new file mode 100644
index 000000000..bb0e83c1f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint8 Value
+type uint8Value uint8
+
+func newUint8Value(val uint8, p *uint8) *uint8Value {
+ *p = val
+ return (*uint8Value)(p)
+}
+
+func (i *uint8Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 8)
+ *i = uint8Value(v)
+ return err
+}
+
+func (i *uint8Value) Type() string {
+ return "uint8"
+}
+
+func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint8Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 8)
+ if err != nil {
+ return 0, err
+ }
+ return uint8(v), nil
+}
+
+// GetUint8 return the uint8 value of a flag with the given name
+func (f *FlagSet) GetUint8(name string) (uint8, error) {
+ val, err := f.getFlagType(name, "uint8", uint8Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint8), nil
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) {
+ f.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+ f.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func Uint8Var(p *uint8, name string, value uint8, usage string) {
+ CommandLine.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+ CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 {
+ p := new(uint8)
+ f.Uint8VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+ p := new(uint8)
+ f.Uint8VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func Uint8(name string, value uint8, usage string) *uint8 {
+ return CommandLine.Uint8P(name, "", value, usage)
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+ return CommandLine.Uint8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go
new file mode 100644
index 000000000..edd94c600
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint_slice.go
@@ -0,0 +1,126 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- uintSlice Value
+type uintSliceValue struct {
+ value *[]uint
+ changed bool
+}
+
+func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue {
+ uisv := new(uintSliceValue)
+ uisv.value = p
+ *uisv.value = val
+ return uisv
+}
+
+func (s *uintSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]uint, len(ss))
+ for i, d := range ss {
+ u, err := strconv.ParseUint(d, 10, 0)
+ if err != nil {
+ return err
+ }
+ out[i] = uint(u)
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *uintSliceValue) Type() string {
+ return "uintSlice"
+}
+
+func (s *uintSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func uintSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []uint{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]uint, len(ss))
+ for i, d := range ss {
+ u, err := strconv.ParseUint(d, 10, 0)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = uint(u)
+ }
+ return out, nil
+}
+
+// GetUintSlice returns the []uint value of a flag with the given name.
+func (f *FlagSet) GetUintSlice(name string) ([]uint, error) {
+ val, err := f.getFlagType(name, "uintSlice", uintSliceConv)
+ if err != nil {
+ return []uint{}, err
+ }
+ return val.([]uint), nil
+}
+
+// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string.
+// The argument p points to a []uint variable in which to store the value of the flag.
+func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+ f.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+ f.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSliceVar defines a uint[] flag with specified name, default value, and usage string.
+// The argument p points to a uint[] variable in which to store the value of the flag.
+func UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+ CommandLine.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+ CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint {
+ p := []uint{}
+ f.UintSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+ p := []uint{}
+ f.UintSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func UintSlice(name string, value []uint, usage string) *[]uint {
+ return CommandLine.UintSliceP(name, "", value, usage)
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+ return CommandLine.UintSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/syndtr/gocapability/LICENSE b/vendor/github.com/syndtr/gocapability/LICENSE
new file mode 100644
index 000000000..80dd96de7
--- /dev/null
+++ b/vendor/github.com/syndtr/gocapability/LICENSE
@@ -0,0 +1,24 @@
+Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/syndtr/gocapability/capability/capability.go b/vendor/github.com/syndtr/gocapability/capability/capability.go
new file mode 100644
index 000000000..c07c55794
--- /dev/null
+++ b/vendor/github.com/syndtr/gocapability/capability/capability.go
@@ -0,0 +1,72 @@
+// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package capability provides utilities for manipulating POSIX capabilities.
+package capability
+
+type Capabilities interface {
+ // Get check whether a capability present in the given
+ // capabilities set. The 'which' value should be one of EFFECTIVE,
+ // PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
+ Get(which CapType, what Cap) bool
+
+ // Empty check whether all capability bits of the given capabilities
+ // set are zero. The 'which' value should be one of EFFECTIVE,
+ // PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
+ Empty(which CapType) bool
+
+ // Full check whether all capability bits of the given capabilities
+ // set are one. The 'which' value should be one of EFFECTIVE,
+ // PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
+ Full(which CapType) bool
+
+ // Set sets capabilities of the given capabilities sets. The
+ // 'which' value should be one or combination (OR'ed) of EFFECTIVE,
+ // PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
+ Set(which CapType, caps ...Cap)
+
+ // Unset unsets capabilities of the given capabilities sets. The
+ // 'which' value should be one or combination (OR'ed) of EFFECTIVE,
+ // PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
+ Unset(which CapType, caps ...Cap)
+
+ // Fill sets all bits of the given capabilities kind to one. The
+ // 'kind' value should be one or combination (OR'ed) of CAPS,
+ // BOUNDS or AMBS.
+ Fill(kind CapType)
+
+ // Clear sets all bits of the given capabilities kind to zero. The
+ // 'kind' value should be one or combination (OR'ed) of CAPS,
+ // BOUNDS or AMBS.
+ Clear(kind CapType)
+
+ // String return current capabilities state of the given capabilities
+ // set as string. The 'which' value should be one of EFFECTIVE,
+ // PERMITTED, INHERITABLE BOUNDING or AMBIENT
+ StringCap(which CapType) string
+
+ // String return current capabilities state as string.
+ String() string
+
+ // Load load actual capabilities value. This will overwrite all
+ // outstanding changes.
+ Load() error
+
+ // Apply apply the capabilities settings, so all changes will take
+ // effect.
+ Apply(kind CapType) error
+}
+
+// NewPid create new initialized Capabilities object for given pid when it
+// is nonzero, or for the current pid if pid is 0
+func NewPid(pid int) (Capabilities, error) {
+ return newPid(pid)
+}
+
+// NewFile create new initialized Capabilities object for given named file.
+func NewFile(name string) (Capabilities, error) {
+ return newFile(name)
+}
diff --git a/vendor/github.com/syndtr/gocapability/capability/capability_linux.go b/vendor/github.com/syndtr/gocapability/capability/capability_linux.go
new file mode 100644
index 000000000..6d2135ac5
--- /dev/null
+++ b/vendor/github.com/syndtr/gocapability/capability/capability_linux.go
@@ -0,0 +1,650 @@
+// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package capability
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "syscall"
+)
+
+var errUnknownVers = errors.New("unknown capability version")
+
+const (
+ linuxCapVer1 = 0x19980330
+ linuxCapVer2 = 0x20071026
+ linuxCapVer3 = 0x20080522
+)
+
+var (
+ capVers uint32
+ capLastCap Cap
+)
+
+func init() {
+ var hdr capHeader
+ capget(&hdr, nil)
+ capVers = hdr.version
+
+ if initLastCap() == nil {
+ CAP_LAST_CAP = capLastCap
+ if capLastCap > 31 {
+ capUpperMask = (uint32(1) << (uint(capLastCap) - 31)) - 1
+ } else {
+ capUpperMask = 0
+ }
+ }
+}
+
+func initLastCap() error {
+ if capLastCap != 0 {
+ return nil
+ }
+
+ f, err := os.Open("/proc/sys/kernel/cap_last_cap")
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ var b []byte = make([]byte, 11)
+ _, err = f.Read(b)
+ if err != nil {
+ return err
+ }
+
+ fmt.Sscanf(string(b), "%d", &capLastCap)
+
+ return nil
+}
+
+func mkStringCap(c Capabilities, which CapType) (ret string) {
+ for i, first := Cap(0), true; i <= CAP_LAST_CAP; i++ {
+ if !c.Get(which, i) {
+ continue
+ }
+ if first {
+ first = false
+ } else {
+ ret += ", "
+ }
+ ret += i.String()
+ }
+ return
+}
+
+func mkString(c Capabilities, max CapType) (ret string) {
+ ret = "{"
+ for i := CapType(1); i <= max; i <<= 1 {
+ ret += " " + i.String() + "=\""
+ if c.Empty(i) {
+ ret += "empty"
+ } else if c.Full(i) {
+ ret += "full"
+ } else {
+ ret += c.StringCap(i)
+ }
+ ret += "\""
+ }
+ ret += " }"
+ return
+}
+
+func newPid(pid int) (c Capabilities, err error) {
+ switch capVers {
+ case linuxCapVer1:
+ p := new(capsV1)
+ p.hdr.version = capVers
+ p.hdr.pid = pid
+ c = p
+ case linuxCapVer2, linuxCapVer3:
+ p := new(capsV3)
+ p.hdr.version = capVers
+ p.hdr.pid = pid
+ c = p
+ default:
+ err = errUnknownVers
+ return
+ }
+ err = c.Load()
+ if err != nil {
+ c = nil
+ }
+ return
+}
+
+type capsV1 struct {
+ hdr capHeader
+ data capData
+}
+
+func (c *capsV1) Get(which CapType, what Cap) bool {
+ if what > 32 {
+ return false
+ }
+
+ switch which {
+ case EFFECTIVE:
+ return (1<<uint(what))&c.data.effective != 0
+ case PERMITTED:
+ return (1<<uint(what))&c.data.permitted != 0
+ case INHERITABLE:
+ return (1<<uint(what))&c.data.inheritable != 0
+ }
+
+ return false
+}
+
+func (c *capsV1) getData(which CapType) (ret uint32) {
+ switch which {
+ case EFFECTIVE:
+ ret = c.data.effective
+ case PERMITTED:
+ ret = c.data.permitted
+ case INHERITABLE:
+ ret = c.data.inheritable
+ }
+ return
+}
+
+func (c *capsV1) Empty(which CapType) bool {
+ return c.getData(which) == 0
+}
+
+func (c *capsV1) Full(which CapType) bool {
+ return (c.getData(which) & 0x7fffffff) == 0x7fffffff
+}
+
+func (c *capsV1) Set(which CapType, caps ...Cap) {
+ for _, what := range caps {
+ if what > 32 {
+ continue
+ }
+
+ if which&EFFECTIVE != 0 {
+ c.data.effective |= 1 << uint(what)
+ }
+ if which&PERMITTED != 0 {
+ c.data.permitted |= 1 << uint(what)
+ }
+ if which&INHERITABLE != 0 {
+ c.data.inheritable |= 1 << uint(what)
+ }
+ }
+}
+
+func (c *capsV1) Unset(which CapType, caps ...Cap) {
+ for _, what := range caps {
+ if what > 32 {
+ continue
+ }
+
+ if which&EFFECTIVE != 0 {
+ c.data.effective &= ^(1 << uint(what))
+ }
+ if which&PERMITTED != 0 {
+ c.data.permitted &= ^(1 << uint(what))
+ }
+ if which&INHERITABLE != 0 {
+ c.data.inheritable &= ^(1 << uint(what))
+ }
+ }
+}
+
+func (c *capsV1) Fill(kind CapType) {
+ if kind&CAPS == CAPS {
+ c.data.effective = 0x7fffffff
+ c.data.permitted = 0x7fffffff
+ c.data.inheritable = 0
+ }
+}
+
+func (c *capsV1) Clear(kind CapType) {
+ if kind&CAPS == CAPS {
+ c.data.effective = 0
+ c.data.permitted = 0
+ c.data.inheritable = 0
+ }
+}
+
+func (c *capsV1) StringCap(which CapType) (ret string) {
+ return mkStringCap(c, which)
+}
+
+func (c *capsV1) String() (ret string) {
+ return mkString(c, BOUNDING)
+}
+
+func (c *capsV1) Load() (err error) {
+ return capget(&c.hdr, &c.data)
+}
+
+func (c *capsV1) Apply(kind CapType) error {
+ if kind&CAPS == CAPS {
+ return capset(&c.hdr, &c.data)
+ }
+ return nil
+}
+
+type capsV3 struct {
+ hdr capHeader
+ data [2]capData
+ bounds [2]uint32
+ ambient [2]uint32
+}
+
+func (c *capsV3) Get(which CapType, what Cap) bool {
+ var i uint
+ if what > 31 {
+ i = uint(what) >> 5
+ what %= 32
+ }
+
+ switch which {
+ case EFFECTIVE:
+ return (1<<uint(what))&c.data[i].effective != 0
+ case PERMITTED:
+ return (1<<uint(what))&c.data[i].permitted != 0
+ case INHERITABLE:
+ return (1<<uint(what))&c.data[i].inheritable != 0
+ case BOUNDING:
+ return (1<<uint(what))&c.bounds[i] != 0
+ case AMBIENT:
+ return (1<<uint(what))&c.ambient[i] != 0
+ }
+
+ return false
+}
+
+func (c *capsV3) getData(which CapType, dest []uint32) {
+ switch which {
+ case EFFECTIVE:
+ dest[0] = c.data[0].effective
+ dest[1] = c.data[1].effective
+ case PERMITTED:
+ dest[0] = c.data[0].permitted
+ dest[1] = c.data[1].permitted
+ case INHERITABLE:
+ dest[0] = c.data[0].inheritable
+ dest[1] = c.data[1].inheritable
+ case BOUNDING:
+ dest[0] = c.bounds[0]
+ dest[1] = c.bounds[1]
+ case AMBIENT:
+ dest[0] = c.ambient[0]
+ dest[1] = c.ambient[1]
+ }
+}
+
+func (c *capsV3) Empty(which CapType) bool {
+ var data [2]uint32
+ c.getData(which, data[:])
+ return data[0] == 0 && data[1] == 0
+}
+
+func (c *capsV3) Full(which CapType) bool {
+ var data [2]uint32
+ c.getData(which, data[:])
+ if (data[0] & 0xffffffff) != 0xffffffff {
+ return false
+ }
+ return (data[1] & capUpperMask) == capUpperMask
+}
+
+func (c *capsV3) Set(which CapType, caps ...Cap) {
+ for _, what := range caps {
+ var i uint
+ if what > 31 {
+ i = uint(what) >> 5
+ what %= 32
+ }
+
+ if which&EFFECTIVE != 0 {
+ c.data[i].effective |= 1 << uint(what)
+ }
+ if which&PERMITTED != 0 {
+ c.data[i].permitted |= 1 << uint(what)
+ }
+ if which&INHERITABLE != 0 {
+ c.data[i].inheritable |= 1 << uint(what)
+ }
+ if which&BOUNDING != 0 {
+ c.bounds[i] |= 1 << uint(what)
+ }
+ if which&AMBIENT != 0 {
+ c.ambient[i] |= 1 << uint(what)
+ }
+ }
+}
+
+func (c *capsV3) Unset(which CapType, caps ...Cap) {
+ for _, what := range caps {
+ var i uint
+ if what > 31 {
+ i = uint(what) >> 5
+ what %= 32
+ }
+
+ if which&EFFECTIVE != 0 {
+ c.data[i].effective &= ^(1 << uint(what))
+ }
+ if which&PERMITTED != 0 {
+ c.data[i].permitted &= ^(1 << uint(what))
+ }
+ if which&INHERITABLE != 0 {
+ c.data[i].inheritable &= ^(1 << uint(what))
+ }
+ if which&BOUNDING != 0 {
+ c.bounds[i] &= ^(1 << uint(what))
+ }
+ if which&AMBIENT != 0 {
+ c.ambient[i] &= ^(1 << uint(what))
+ }
+ }
+}
+
+func (c *capsV3) Fill(kind CapType) {
+ if kind&CAPS == CAPS {
+ c.data[0].effective = 0xffffffff
+ c.data[0].permitted = 0xffffffff
+ c.data[0].inheritable = 0
+ c.data[1].effective = 0xffffffff
+ c.data[1].permitted = 0xffffffff
+ c.data[1].inheritable = 0
+ }
+
+ if kind&BOUNDS == BOUNDS {
+ c.bounds[0] = 0xffffffff
+ c.bounds[1] = 0xffffffff
+ }
+ if kind&AMBS == AMBS {
+ c.ambient[0] = 0xffffffff
+ c.ambient[1] = 0xffffffff
+ }
+}
+
+func (c *capsV3) Clear(kind CapType) {
+ if kind&CAPS == CAPS {
+ c.data[0].effective = 0
+ c.data[0].permitted = 0
+ c.data[0].inheritable = 0
+ c.data[1].effective = 0
+ c.data[1].permitted = 0
+ c.data[1].inheritable = 0
+ }
+
+ if kind&BOUNDS == BOUNDS {
+ c.bounds[0] = 0
+ c.bounds[1] = 0
+ }
+ if kind&AMBS == AMBS {
+ c.ambient[0] = 0
+ c.ambient[1] = 0
+ }
+}
+
+func (c *capsV3) StringCap(which CapType) (ret string) {
+ return mkStringCap(c, which)
+}
+
+func (c *capsV3) String() (ret string) {
+ return mkString(c, BOUNDING)
+}
+
+func (c *capsV3) Load() (err error) {
+ err = capget(&c.hdr, &c.data[0])
+ if err != nil {
+ return
+ }
+
+ var status_path string
+
+ if c.hdr.pid == 0 {
+ status_path = fmt.Sprintf("/proc/self/status")
+ } else {
+ status_path = fmt.Sprintf("/proc/%d/status", c.hdr.pid)
+ }
+
+ f, err := os.Open(status_path)
+ if err != nil {
+ return
+ }
+ b := bufio.NewReader(f)
+ for {
+ line, e := b.ReadString('\n')
+ if e != nil {
+ if e != io.EOF {
+ err = e
+ }
+ break
+ }
+ if strings.HasPrefix(line, "CapB") {
+ fmt.Sscanf(line[4:], "nd: %08x%08x", &c.bounds[1], &c.bounds[0])
+ break
+ }
+ if strings.HasPrefix(line, "CapA") {
+ fmt.Sscanf(line[4:], "mb: %08x%08x", &c.ambient[1], &c.ambient[0])
+ break
+ }
+ }
+ f.Close()
+
+ return
+}
+
+func (c *capsV3) Apply(kind CapType) (err error) {
+ if kind&BOUNDS == BOUNDS {
+ var data [2]capData
+ err = capget(&c.hdr, &data[0])
+ if err != nil {
+ return
+ }
+ if (1<<uint(CAP_SETPCAP))&data[0].effective != 0 {
+ for i := Cap(0); i <= CAP_LAST_CAP; i++ {
+ if c.Get(BOUNDING, i) {
+ continue
+ }
+ err = prctl(syscall.PR_CAPBSET_DROP, uintptr(i), 0, 0, 0)
+ if err != nil {
+ // Ignore EINVAL since the capability may not be supported in this system.
+ if errno, ok := err.(syscall.Errno); ok && errno == syscall.EINVAL {
+ err = nil
+ continue
+ }
+ return
+ }
+ }
+ }
+ }
+
+ if kind&CAPS == CAPS {
+ err = capset(&c.hdr, &c.data[0])
+ if err != nil {
+ return
+ }
+ }
+
+ if kind&AMBS == AMBS {
+ for i := Cap(0); i <= CAP_LAST_CAP; i++ {
+ action := pr_CAP_AMBIENT_LOWER
+ if c.Get(AMBIENT, i) {
+ action = pr_CAP_AMBIENT_RAISE
+ }
+ err := prctl(pr_CAP_AMBIENT, action, uintptr(i), 0, 0)
+ // Ignore EINVAL as not supported on kernels before 4.3
+ if errno, ok := err.(syscall.Errno); ok && errno == syscall.EINVAL {
+ err = nil
+ continue
+ }
+ }
+ }
+
+ return
+}
+
+func newFile(path string) (c Capabilities, err error) {
+ c = &capsFile{path: path}
+ err = c.Load()
+ if err != nil {
+ c = nil
+ }
+ return
+}
+
+type capsFile struct {
+ path string
+ data vfscapData
+}
+
+func (c *capsFile) Get(which CapType, what Cap) bool {
+ var i uint
+ if what > 31 {
+ if c.data.version == 1 {
+ return false
+ }
+ i = uint(what) >> 5
+ what %= 32
+ }
+
+ switch which {
+ case EFFECTIVE:
+ return (1<<uint(what))&c.data.effective[i] != 0
+ case PERMITTED:
+ return (1<<uint(what))&c.data.data[i].permitted != 0
+ case INHERITABLE:
+ return (1<<uint(what))&c.data.data[i].inheritable != 0
+ }
+
+ return false
+}
+
+func (c *capsFile) getData(which CapType, dest []uint32) {
+ switch which {
+ case EFFECTIVE:
+ dest[0] = c.data.effective[0]
+ dest[1] = c.data.effective[1]
+ case PERMITTED:
+ dest[0] = c.data.data[0].permitted
+ dest[1] = c.data.data[1].permitted
+ case INHERITABLE:
+ dest[0] = c.data.data[0].inheritable
+ dest[1] = c.data.data[1].inheritable
+ }
+}
+
+func (c *capsFile) Empty(which CapType) bool {
+ var data [2]uint32
+ c.getData(which, data[:])
+ return data[0] == 0 && data[1] == 0
+}
+
+func (c *capsFile) Full(which CapType) bool {
+ var data [2]uint32
+ c.getData(which, data[:])
+ if c.data.version == 0 {
+ return (data[0] & 0x7fffffff) == 0x7fffffff
+ }
+ if (data[0] & 0xffffffff) != 0xffffffff {
+ return false
+ }
+ return (data[1] & capUpperMask) == capUpperMask
+}
+
+func (c *capsFile) Set(which CapType, caps ...Cap) {
+ for _, what := range caps {
+ var i uint
+ if what > 31 {
+ if c.data.version == 1 {
+ continue
+ }
+ i = uint(what) >> 5
+ what %= 32
+ }
+
+ if which&EFFECTIVE != 0 {
+ c.data.effective[i] |= 1 << uint(what)
+ }
+ if which&PERMITTED != 0 {
+ c.data.data[i].permitted |= 1 << uint(what)
+ }
+ if which&INHERITABLE != 0 {
+ c.data.data[i].inheritable |= 1 << uint(what)
+ }
+ }
+}
+
+func (c *capsFile) Unset(which CapType, caps ...Cap) {
+ for _, what := range caps {
+ var i uint
+ if what > 31 {
+ if c.data.version == 1 {
+ continue
+ }
+ i = uint(what) >> 5
+ what %= 32
+ }
+
+ if which&EFFECTIVE != 0 {
+ c.data.effective[i] &= ^(1 << uint(what))
+ }
+ if which&PERMITTED != 0 {
+ c.data.data[i].permitted &= ^(1 << uint(what))
+ }
+ if which&INHERITABLE != 0 {
+ c.data.data[i].inheritable &= ^(1 << uint(what))
+ }
+ }
+}
+
+func (c *capsFile) Fill(kind CapType) {
+ if kind&CAPS == CAPS {
+ c.data.effective[0] = 0xffffffff
+ c.data.data[0].permitted = 0xffffffff
+ c.data.data[0].inheritable = 0
+ if c.data.version == 2 {
+ c.data.effective[1] = 0xffffffff
+ c.data.data[1].permitted = 0xffffffff
+ c.data.data[1].inheritable = 0
+ }
+ }
+}
+
+func (c *capsFile) Clear(kind CapType) {
+ if kind&CAPS == CAPS {
+ c.data.effective[0] = 0
+ c.data.data[0].permitted = 0
+ c.data.data[0].inheritable = 0
+ if c.data.version == 2 {
+ c.data.effective[1] = 0
+ c.data.data[1].permitted = 0
+ c.data.data[1].inheritable = 0
+ }
+ }
+}
+
+func (c *capsFile) StringCap(which CapType) (ret string) {
+ return mkStringCap(c, which)
+}
+
+func (c *capsFile) String() (ret string) {
+ return mkString(c, INHERITABLE)
+}
+
+func (c *capsFile) Load() (err error) {
+ return getVfsCap(c.path, &c.data)
+}
+
+func (c *capsFile) Apply(kind CapType) (err error) {
+ if kind&CAPS == CAPS {
+ return setVfsCap(c.path, &c.data)
+ }
+ return
+}
diff --git a/vendor/github.com/syndtr/gocapability/capability/capability_noop.go b/vendor/github.com/syndtr/gocapability/capability/capability_noop.go
new file mode 100644
index 000000000..9bb3070c5
--- /dev/null
+++ b/vendor/github.com/syndtr/gocapability/capability/capability_noop.go
@@ -0,0 +1,19 @@
+// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build !linux
+
+package capability
+
+import "errors"
+
+func newPid(pid int) (Capabilities, error) {
+ return nil, errors.New("not supported")
+}
+
+func newFile(path string) (Capabilities, error) {
+ return nil, errors.New("not supported")
+}
diff --git a/vendor/github.com/syndtr/gocapability/capability/enum.go b/vendor/github.com/syndtr/gocapability/capability/enum.go
new file mode 100644
index 000000000..693817317
--- /dev/null
+++ b/vendor/github.com/syndtr/gocapability/capability/enum.go
@@ -0,0 +1,268 @@
+// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package capability
+
+type CapType uint
+
+func (c CapType) String() string {
+ switch c {
+ case EFFECTIVE:
+ return "effective"
+ case PERMITTED:
+ return "permitted"
+ case INHERITABLE:
+ return "inheritable"
+ case BOUNDING:
+ return "bounding"
+ case CAPS:
+ return "caps"
+ case AMBIENT:
+ return "ambient"
+ }
+ return "unknown"
+}
+
+const (
+ EFFECTIVE CapType = 1 << iota
+ PERMITTED
+ INHERITABLE
+ BOUNDING
+ AMBIENT
+
+ CAPS = EFFECTIVE | PERMITTED | INHERITABLE
+ BOUNDS = BOUNDING
+ AMBS = AMBIENT
+)
+
+//go:generate go run enumgen/gen.go
+type Cap int
+
+// POSIX-draft defined capabilities.
+const (
+ // In a system with the [_POSIX_CHOWN_RESTRICTED] option defined, this
+ // overrides the restriction of changing file ownership and group
+ // ownership.
+ CAP_CHOWN = Cap(0)
+
+ // Override all DAC access, including ACL execute access if
+ // [_POSIX_ACL] is defined. Excluding DAC access covered by
+ // CAP_LINUX_IMMUTABLE.
+ CAP_DAC_OVERRIDE = Cap(1)
+
+ // Overrides all DAC restrictions regarding read and search on files
+ // and directories, including ACL restrictions if [_POSIX_ACL] is
+ // defined. Excluding DAC access covered by CAP_LINUX_IMMUTABLE.
+ CAP_DAC_READ_SEARCH = Cap(2)
+
+ // Overrides all restrictions about allowed operations on files, where
+ // file owner ID must be equal to the user ID, except where CAP_FSETID
+ // is applicable. It doesn't override MAC and DAC restrictions.
+ CAP_FOWNER = Cap(3)
+
+ // Overrides the following restrictions that the effective user ID
+ // shall match the file owner ID when setting the S_ISUID and S_ISGID
+ // bits on that file; that the effective group ID (or one of the
+ // supplementary group IDs) shall match the file owner ID when setting
+ // the S_ISGID bit on that file; that the S_ISUID and S_ISGID bits are
+ // cleared on successful return from chown(2) (not implemented).
+ CAP_FSETID = Cap(4)
+
+ // Overrides the restriction that the real or effective user ID of a
+ // process sending a signal must match the real or effective user ID
+ // of the process receiving the signal.
+ CAP_KILL = Cap(5)
+
+ // Allows setgid(2) manipulation
+ // Allows setgroups(2)
+ // Allows forged gids on socket credentials passing.
+ CAP_SETGID = Cap(6)
+
+ // Allows set*uid(2) manipulation (including fsuid).
+ // Allows forged pids on socket credentials passing.
+ CAP_SETUID = Cap(7)
+
+ // Linux-specific capabilities
+
+ // Without VFS support for capabilities:
+ // Transfer any capability in your permitted set to any pid,
+ // remove any capability in your permitted set from any pid
+ // With VFS support for capabilities (neither of above, but)
+ // Add any capability from current's capability bounding set
+ // to the current process' inheritable set
+ // Allow taking bits out of capability bounding set
+ // Allow modification of the securebits for a process
+ CAP_SETPCAP = Cap(8)
+
+ // Allow modification of S_IMMUTABLE and S_APPEND file attributes
+ CAP_LINUX_IMMUTABLE = Cap(9)
+
+ // Allows binding to TCP/UDP sockets below 1024
+ // Allows binding to ATM VCIs below 32
+ CAP_NET_BIND_SERVICE = Cap(10)
+
+ // Allow broadcasting, listen to multicast
+ CAP_NET_BROADCAST = Cap(11)
+
+ // Allow interface configuration
+ // Allow administration of IP firewall, masquerading and accounting
+ // Allow setting debug option on sockets
+ // Allow modification of routing tables
+ // Allow setting arbitrary process / process group ownership on
+ // sockets
+ // Allow binding to any address for transparent proxying (also via NET_RAW)
+ // Allow setting TOS (type of service)
+ // Allow setting promiscuous mode
+ // Allow clearing driver statistics
+ // Allow multicasting
+ // Allow read/write of device-specific registers
+ // Allow activation of ATM control sockets
+ CAP_NET_ADMIN = Cap(12)
+
+ // Allow use of RAW sockets
+ // Allow use of PACKET sockets
+ // Allow binding to any address for transparent proxying (also via NET_ADMIN)
+ CAP_NET_RAW = Cap(13)
+
+ // Allow locking of shared memory segments
+ // Allow mlock and mlockall (which doesn't really have anything to do
+ // with IPC)
+ CAP_IPC_LOCK = Cap(14)
+
+ // Override IPC ownership checks
+ CAP_IPC_OWNER = Cap(15)
+
+ // Insert and remove kernel modules - modify kernel without limit
+ CAP_SYS_MODULE = Cap(16)
+
+ // Allow ioperm/iopl access
+ // Allow sending USB messages to any device via /proc/bus/usb
+ CAP_SYS_RAWIO = Cap(17)
+
+ // Allow use of chroot()
+ CAP_SYS_CHROOT = Cap(18)
+
+ // Allow ptrace() of any process
+ CAP_SYS_PTRACE = Cap(19)
+
+ // Allow configuration of process accounting
+ CAP_SYS_PACCT = Cap(20)
+
+ // Allow configuration of the secure attention key
+ // Allow administration of the random device
+ // Allow examination and configuration of disk quotas
+ // Allow setting the domainname
+ // Allow setting the hostname
+ // Allow calling bdflush()
+ // Allow mount() and umount(), setting up new smb connection
+ // Allow some autofs root ioctls
+ // Allow nfsservctl
+ // Allow VM86_REQUEST_IRQ
+ // Allow to read/write pci config on alpha
+ // Allow irix_prctl on mips (setstacksize)
+ // Allow flushing all cache on m68k (sys_cacheflush)
+ // Allow removing semaphores
+ // Used instead of CAP_CHOWN to "chown" IPC message queues, semaphores
+ // and shared memory
+ // Allow locking/unlocking of shared memory segment
+ // Allow turning swap on/off
+ // Allow forged pids on socket credentials passing
+ // Allow setting readahead and flushing buffers on block devices
+ // Allow setting geometry in floppy driver
+ // Allow turning DMA on/off in xd driver
+ // Allow administration of md devices (mostly the above, but some
+ // extra ioctls)
+ // Allow tuning the ide driver
+ // Allow access to the nvram device
+ // Allow administration of apm_bios, serial and bttv (TV) device
+ // Allow manufacturer commands in isdn CAPI support driver
+ // Allow reading non-standardized portions of pci configuration space
+ // Allow DDI debug ioctl on sbpcd driver
+ // Allow setting up serial ports
+ // Allow sending raw qic-117 commands
+ // Allow enabling/disabling tagged queuing on SCSI controllers and sending
+ // arbitrary SCSI commands
+ // Allow setting encryption key on loopback filesystem
+ // Allow setting zone reclaim policy
+ CAP_SYS_ADMIN = Cap(21)
+
+ // Allow use of reboot()
+ CAP_SYS_BOOT = Cap(22)
+
+ // Allow raising priority and setting priority on other (different
+ // UID) processes
+ // Allow use of FIFO and round-robin (realtime) scheduling on own
+ // processes and setting the scheduling algorithm used by another
+ // process.
+ // Allow setting cpu affinity on other processes
+ CAP_SYS_NICE = Cap(23)
+
+ // Override resource limits. Set resource limits.
+ // Override quota limits.
+ // Override reserved space on ext2 filesystem
+ // Modify data journaling mode on ext3 filesystem (uses journaling
+ // resources)
+ // NOTE: ext2 honors fsuid when checking for resource overrides, so
+ // you can override using fsuid too
+ // Override size restrictions on IPC message queues
+ // Allow more than 64hz interrupts from the real-time clock
+ // Override max number of consoles on console allocation
+ // Override max number of keymaps
+ CAP_SYS_RESOURCE = Cap(24)
+
+ // Allow manipulation of system clock
+ // Allow irix_stime on mips
+ // Allow setting the real-time clock
+ CAP_SYS_TIME = Cap(25)
+
+ // Allow configuration of tty devices
+ // Allow vhangup() of tty
+ CAP_SYS_TTY_CONFIG = Cap(26)
+
+ // Allow the privileged aspects of mknod()
+ CAP_MKNOD = Cap(27)
+
+ // Allow taking of leases on files
+ CAP_LEASE = Cap(28)
+
+ CAP_AUDIT_WRITE = Cap(29)
+ CAP_AUDIT_CONTROL = Cap(30)
+ CAP_SETFCAP = Cap(31)
+
+ // Override MAC access.
+ // The base kernel enforces no MAC policy.
+ // An LSM may enforce a MAC policy, and if it does and it chooses
+ // to implement capability based overrides of that policy, this is
+ // the capability it should use to do so.
+ CAP_MAC_OVERRIDE = Cap(32)
+
+ // Allow MAC configuration or state changes.
+ // The base kernel requires no MAC configuration.
+ // An LSM may enforce a MAC policy, and if it does and it chooses
+ // to implement capability based checks on modifications to that
+ // policy or the data required to maintain it, this is the
+ // capability it should use to do so.
+ CAP_MAC_ADMIN = Cap(33)
+
+ // Allow configuring the kernel's syslog (printk behaviour)
+ CAP_SYSLOG = Cap(34)
+
+ // Allow triggering something that will wake the system
+ CAP_WAKE_ALARM = Cap(35)
+
+ // Allow preventing system suspends
+ CAP_BLOCK_SUSPEND = Cap(36)
+
+ // Allow reading audit messages from the kernel
+ CAP_AUDIT_READ = Cap(37)
+)
+
+var (
+ // Highest valid capability of the running kernel.
+ CAP_LAST_CAP = Cap(63)
+
+ capUpperMask = ^uint32(0)
+)
diff --git a/vendor/github.com/syndtr/gocapability/capability/enum_gen.go b/vendor/github.com/syndtr/gocapability/capability/enum_gen.go
new file mode 100644
index 000000000..b9e6d2d5e
--- /dev/null
+++ b/vendor/github.com/syndtr/gocapability/capability/enum_gen.go
@@ -0,0 +1,129 @@
+// generated file; DO NOT EDIT - use go generate in directory with source
+
+package capability
+
+func (c Cap) String() string {
+ switch c {
+ case CAP_CHOWN:
+ return "chown"
+ case CAP_DAC_OVERRIDE:
+ return "dac_override"
+ case CAP_DAC_READ_SEARCH:
+ return "dac_read_search"
+ case CAP_FOWNER:
+ return "fowner"
+ case CAP_FSETID:
+ return "fsetid"
+ case CAP_KILL:
+ return "kill"
+ case CAP_SETGID:
+ return "setgid"
+ case CAP_SETUID:
+ return "setuid"
+ case CAP_SETPCAP:
+ return "setpcap"
+ case CAP_LINUX_IMMUTABLE:
+ return "linux_immutable"
+ case CAP_NET_BIND_SERVICE:
+ return "net_bind_service"
+ case CAP_NET_BROADCAST:
+ return "net_broadcast"
+ case CAP_NET_ADMIN:
+ return "net_admin"
+ case CAP_NET_RAW:
+ return "net_raw"
+ case CAP_IPC_LOCK:
+ return "ipc_lock"
+ case CAP_IPC_OWNER:
+ return "ipc_owner"
+ case CAP_SYS_MODULE:
+ return "sys_module"
+ case CAP_SYS_RAWIO:
+ return "sys_rawio"
+ case CAP_SYS_CHROOT:
+ return "sys_chroot"
+ case CAP_SYS_PTRACE:
+ return "sys_ptrace"
+ case CAP_SYS_PACCT:
+ return "sys_pacct"
+ case CAP_SYS_ADMIN:
+ return "sys_admin"
+ case CAP_SYS_BOOT:
+ return "sys_boot"
+ case CAP_SYS_NICE:
+ return "sys_nice"
+ case CAP_SYS_RESOURCE:
+ return "sys_resource"
+ case CAP_SYS_TIME:
+ return "sys_time"
+ case CAP_SYS_TTY_CONFIG:
+ return "sys_tty_config"
+ case CAP_MKNOD:
+ return "mknod"
+ case CAP_LEASE:
+ return "lease"
+ case CAP_AUDIT_WRITE:
+ return "audit_write"
+ case CAP_AUDIT_CONTROL:
+ return "audit_control"
+ case CAP_SETFCAP:
+ return "setfcap"
+ case CAP_MAC_OVERRIDE:
+ return "mac_override"
+ case CAP_MAC_ADMIN:
+ return "mac_admin"
+ case CAP_SYSLOG:
+ return "syslog"
+ case CAP_WAKE_ALARM:
+ return "wake_alarm"
+ case CAP_BLOCK_SUSPEND:
+ return "block_suspend"
+ case CAP_AUDIT_READ:
+ return "audit_read"
+ }
+ return "unknown"
+}
+
+// List returns list of all supported capabilities
+func List() []Cap {
+ return []Cap{
+ CAP_CHOWN,
+ CAP_DAC_OVERRIDE,
+ CAP_DAC_READ_SEARCH,
+ CAP_FOWNER,
+ CAP_FSETID,
+ CAP_KILL,
+ CAP_SETGID,
+ CAP_SETUID,
+ CAP_SETPCAP,
+ CAP_LINUX_IMMUTABLE,
+ CAP_NET_BIND_SERVICE,
+ CAP_NET_BROADCAST,
+ CAP_NET_ADMIN,
+ CAP_NET_RAW,
+ CAP_IPC_LOCK,
+ CAP_IPC_OWNER,
+ CAP_SYS_MODULE,
+ CAP_SYS_RAWIO,
+ CAP_SYS_CHROOT,
+ CAP_SYS_PTRACE,
+ CAP_SYS_PACCT,
+ CAP_SYS_ADMIN,
+ CAP_SYS_BOOT,
+ CAP_SYS_NICE,
+ CAP_SYS_RESOURCE,
+ CAP_SYS_TIME,
+ CAP_SYS_TTY_CONFIG,
+ CAP_MKNOD,
+ CAP_LEASE,
+ CAP_AUDIT_WRITE,
+ CAP_AUDIT_CONTROL,
+ CAP_SETFCAP,
+ CAP_MAC_OVERRIDE,
+ CAP_MAC_ADMIN,
+ CAP_SYSLOG,
+ CAP_WAKE_ALARM,
+ CAP_BLOCK_SUSPEND,
+ CAP_AUDIT_READ,
+ }
+}
diff --git a/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go b/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go
new file mode 100644
index 000000000..eb7170083
--- /dev/null
+++ b/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go
@@ -0,0 +1,154 @@
+// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package capability
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+type capHeader struct {
+ version uint32
+ pid int
+}
+
+type capData struct {
+ effective uint32
+ permitted uint32
+ inheritable uint32
+}
+
+func capget(hdr *capHeader, data *capData) (err error) {
+ _, _, e1 := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func capset(hdr *capHeader, data *capData) (err error) {
+ _, _, e1 := syscall.Syscall(syscall.SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// not yet in syscall
+const (
+ pr_CAP_AMBIENT = 47
+ pr_CAP_AMBIENT_IS_SET = uintptr(1)
+ pr_CAP_AMBIENT_RAISE = uintptr(2)
+ pr_CAP_AMBIENT_LOWER = uintptr(3)
+ pr_CAP_AMBIENT_CLEAR_ALL = uintptr(4)
+)
+
+func prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) {
+ _, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+const (
+ vfsXattrName = "security.capability"
+
+ vfsCapVerMask = 0xff000000
+ vfsCapVer1 = 0x01000000
+ vfsCapVer2 = 0x02000000
+
+ vfsCapFlagMask = ^vfsCapVerMask
+ vfsCapFlageffective = 0x000001
+
+ vfscapDataSizeV1 = 4 * (1 + 2*1)
+ vfscapDataSizeV2 = 4 * (1 + 2*2)
+)
+
+type vfscapData struct {
+ magic uint32
+ data [2]struct {
+ permitted uint32
+ inheritable uint32
+ }
+ effective [2]uint32
+ version int8
+}
+
+var (
+ _vfsXattrName *byte
+)
+
+func init() {
+ _vfsXattrName, _ = syscall.BytePtrFromString(vfsXattrName)
+}
+
+func getVfsCap(path string, dest *vfscapData) (err error) {
+ var _p0 *byte
+ _p0, err = syscall.BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(dest)), vfscapDataSizeV2, 0, 0)
+ if e1 != 0 {
+ if e1 == syscall.ENODATA {
+ dest.version = 2
+ return
+ }
+ err = e1
+ }
+ switch dest.magic & vfsCapVerMask {
+ case vfsCapVer1:
+ dest.version = 1
+ if r0 != vfscapDataSizeV1 {
+ return syscall.EINVAL
+ }
+ dest.data[1].permitted = 0
+ dest.data[1].inheritable = 0
+ case vfsCapVer2:
+ dest.version = 2
+ if r0 != vfscapDataSizeV2 {
+ return syscall.EINVAL
+ }
+ default:
+ return syscall.EINVAL
+ }
+ if dest.magic&vfsCapFlageffective != 0 {
+ dest.effective[0] = dest.data[0].permitted | dest.data[0].inheritable
+ dest.effective[1] = dest.data[1].permitted | dest.data[1].inheritable
+ } else {
+ dest.effective[0] = 0
+ dest.effective[1] = 0
+ }
+ return
+}
+
+func setVfsCap(path string, data *vfscapData) (err error) {
+ var _p0 *byte
+ _p0, err = syscall.BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var size uintptr
+ if data.version == 1 {
+ data.magic = vfsCapVer1
+ size = vfscapDataSizeV1
+ } else if data.version == 2 {
+ data.magic = vfsCapVer2
+ if data.effective[0] != 0 || data.effective[1] != 0 {
+ data.magic |= vfsCapFlageffective
+ }
+ size = vfscapDataSizeV2
+ } else {
+ return syscall.EINVAL
+ }
+ _, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(data)), size, 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/vendor/github.com/tchap/go-patricia/LICENSE b/vendor/github.com/tchap/go-patricia/LICENSE
new file mode 100644
index 000000000..e50d398e9
--- /dev/null
+++ b/vendor/github.com/tchap/go-patricia/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 The AUTHORS
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/tchap/go-patricia/README.md b/vendor/github.com/tchap/go-patricia/README.md
new file mode 100644
index 000000000..d9cbf624f
--- /dev/null
+++ b/vendor/github.com/tchap/go-patricia/README.md
@@ -0,0 +1,123 @@
+# go-patricia #
+
+**Documentation**: [GoDoc](http://godoc.org/github.com/tchap/go-patricia/patricia)<br />
+**Build Status**: [![Build
+Status](https://drone.io/github.com/tchap/go-patricia/status.png)](https://drone.io/github.com/tchap/go-patricia/latest)<br />
+**Test Coverage**: [![Coverage
+Status](https://coveralls.io/repos/tchap/go-patricia/badge.png)](https://coveralls.io/r/tchap/go-patricia)
+
+## About ##
+
+A generic patricia trie (also called radix tree) implemented in Go (Golang).
+
+The patricia trie as implemented in this library enables fast visiting of items
+in some particular ways:
+
+1. visit all items saved in the tree,
+2. visit all items matching particular prefix (visit subtree), or
+3. given a string, visit all items matching some prefix of that string.
+
+`[]byte` type is used for keys, `interface{}` for values.
+
+`Trie` is not thread safe. Synchronize the access yourself.
+
+### State of the Project ###
+
+Apparently some people are using this, so the API should not change often.
+Any ideas on how to make the library better are still welcome.
+
+More (unit) testing would be cool as well...
+
+## Usage ##
+
+Import the package from GitHub first.
+
+```go
+import "github.com/tchap/go-patricia/patricia"
+```
+
+You can as well use gopkg.in thingie:
+
+```go
+import "gopkg.in/tchap/go-patricia.v2/patricia"
+```
+
+Then you can start having fun.
+
+```go
+printItem := func(prefix patricia.Prefix, item patricia.Item) error {
+ fmt.Printf("%q: %v\n", prefix, item)
+ return nil
+}
+
+// Create a new default trie (using the default parameter values).
+trie := NewTrie()
+
+// Create a new custom trie.
+trie := NewTrie(MaxPrefixPerNode(16), MaxChildrenPerSparseNode(10))
+
+// Insert some items.
+trie.Insert(Prefix("Pepa Novak"), 1)
+trie.Insert(Prefix("Pepa Sindelar"), 2)
+trie.Insert(Prefix("Karel Macha"), 3)
+trie.Insert(Prefix("Karel Hynek Macha"), 4)
+
+// Just check if some things are present in the tree.
+key := Prefix("Pepa Novak")
+fmt.Printf("%q present? %v\n", key, trie.Match(key))
+// "Pepa Novak" present? true
+key = Prefix("Karel")
+fmt.Printf("Anybody called %q here? %v\n", key, trie.MatchSubtree(key))
+// Anybody called "Karel" here? true
+
+// Walk the tree in alphabetical order.
+trie.Visit(printItem)
+// "Karel Hynek Macha": 4
+// "Karel Macha": 3
+// "Pepa Novak": 1
+// "Pepa Sindelar": 2
+
+// Walk a subtree.
+trie.VisitSubtree(Prefix("Pepa"), printItem)
+// "Pepa Novak": 1
+// "Pepa Sindelar": 2
+
+// Modify an item, then fetch it from the tree.
+trie.Set(Prefix("Karel Hynek Macha"), 10)
+key = Prefix("Karel Hynek Macha")
+fmt.Printf("%q: %v\n", key, trie.Get(key))
+// "Karel Hynek Macha": 10
+
+// Walk prefixes.
+prefix := Prefix("Karel Hynek Macha je kouzelnik")
+trie.VisitPrefixes(prefix, printItem)
+// "Karel Hynek Macha": 10
+
+// Delete some items.
+trie.Delete(Prefix("Pepa Novak"))
+trie.Delete(Prefix("Karel Macha"))
+
+// Walk again.
+trie.Visit(printItem)
+// "Karel Hynek Macha": 10
+// "Pepa Sindelar": 2
+
+// Delete a subtree.
+trie.DeleteSubtree(Prefix("Pepa"))
+
+// Print what is left.
+trie.Visit(printItem)
+// "Karel Hynek Macha": 10
+```
+
+## License ##
+
+MIT, check the `LICENSE` file.
+
+[![Gittip
+Badge](http://img.shields.io/gittip/alanhamlett.png)](https://www.gittip.com/tchap/
+"Gittip Badge")
+
+[![Bitdeli
+Badge](https://d2weczhvl823v0.cloudfront.net/tchap/go-patricia/trend.png)](https://bitdeli.com/free
+"Bitdeli Badge")
diff --git a/vendor/github.com/tchap/go-patricia/patricia/children.go b/vendor/github.com/tchap/go-patricia/patricia/children.go
new file mode 100644
index 000000000..a5677c335
--- /dev/null
+++ b/vendor/github.com/tchap/go-patricia/patricia/children.go
@@ -0,0 +1,325 @@
+// Copyright (c) 2014 The go-patricia AUTHORS
+//
+// Use of this source code is governed by The MIT License
+// that can be found in the LICENSE file.
+
+package patricia
+
+import (
+ "fmt"
+ "io"
+ "sort"
+)
+
+type childList interface {
+ length() int
+ head() *Trie
+ add(child *Trie) childList
+ remove(b byte)
+ replace(b byte, child *Trie)
+ next(b byte) *Trie
+ walk(prefix *Prefix, visitor VisitorFunc) error
+ print(w io.Writer, indent int)
+ total() int
+}
+
+type tries []*Trie
+
+func (t tries) Len() int {
+ return len(t)
+}
+
+func (t tries) Less(i, j int) bool {
+ strings := sort.StringSlice{string(t[i].prefix), string(t[j].prefix)}
+ return strings.Less(0, 1)
+}
+
+func (t tries) Swap(i, j int) {
+ t[i], t[j] = t[j], t[i]
+}
+
+type sparseChildList struct {
+ children tries
+}
+
+func newSparseChildList(maxChildrenPerSparseNode int) childList {
+ return &sparseChildList{
+ children: make(tries, 0, maxChildrenPerSparseNode),
+ }
+}
+
+func (list *sparseChildList) length() int {
+ return len(list.children)
+}
+
+func (list *sparseChildList) head() *Trie {
+ return list.children[0]
+}
+
+func (list *sparseChildList) add(child *Trie) childList {
+ // Search for an empty spot and insert the child if possible.
+ if len(list.children) != cap(list.children) {
+ list.children = append(list.children, child)
+ return list
+ }
+
+ // Otherwise we have to transform to the dense list type.
+ return newDenseChildList(list, child)
+}
+
+func (list *sparseChildList) remove(b byte) {
+ for i, node := range list.children {
+ if node.prefix[0] == b {
+ list.children[i] = list.children[len(list.children)-1]
+ list.children[len(list.children)-1] = nil
+ list.children = list.children[:len(list.children)-1]
+ return
+ }
+ }
+
+ // This is not supposed to be reached.
+ panic("removing non-existent child")
+}
+
+func (list *sparseChildList) replace(b byte, child *Trie) {
+ // Make a consistency check.
+ if p0 := child.prefix[0]; p0 != b {
+ panic(fmt.Errorf("child prefix mismatch: %v != %v", p0, b))
+ }
+
+ // Seek the child and replace it.
+ for i, node := range list.children {
+ if node.prefix[0] == b {
+ list.children[i] = child
+ return
+ }
+ }
+}
+
+func (list *sparseChildList) next(b byte) *Trie {
+ for _, child := range list.children {
+ if child.prefix[0] == b {
+ return child
+ }
+ }
+ return nil
+}
+
+func (list *sparseChildList) walk(prefix *Prefix, visitor VisitorFunc) error {
+
+ sort.Sort(list.children)
+
+ for _, child := range list.children {
+ *prefix = append(*prefix, child.prefix...)
+ if child.item != nil {
+ err := visitor(*prefix, child.item)
+ if err != nil {
+ if err == SkipSubtree {
+ *prefix = (*prefix)[:len(*prefix)-len(child.prefix)]
+ continue
+ }
+ *prefix = (*prefix)[:len(*prefix)-len(child.prefix)]
+ return err
+ }
+ }
+
+ err := child.children.walk(prefix, visitor)
+ *prefix = (*prefix)[:len(*prefix)-len(child.prefix)]
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (list *sparseChildList) total() int {
+ tot := 0
+ for _, child := range list.children {
+ if child != nil {
+ tot = tot + child.total()
+ }
+ }
+ return tot
+}
+
+func (list *sparseChildList) print(w io.Writer, indent int) {
+ for _, child := range list.children {
+ if child != nil {
+ child.print(w, indent)
+ }
+ }
+}
+
+type denseChildList struct {
+ min int
+ max int
+ numChildren int
+ headIndex int
+ children []*Trie
+}
+
+func newDenseChildList(list *sparseChildList, child *Trie) childList {
+ var (
+ min int = 255
+ max int = 0
+ )
+ for _, child := range list.children {
+ b := int(child.prefix[0])
+ if b < min {
+ min = b
+ }
+ if b > max {
+ max = b
+ }
+ }
+
+ b := int(child.prefix[0])
+ if b < min {
+ min = b
+ }
+ if b > max {
+ max = b
+ }
+
+ children := make([]*Trie, max-min+1)
+ for _, child := range list.children {
+ children[int(child.prefix[0])-min] = child
+ }
+ children[int(child.prefix[0])-min] = child
+
+ return &denseChildList{
+ min: min,
+ max: max,
+ numChildren: list.length() + 1,
+ headIndex: 0,
+ children: children,
+ }
+}
+
+func (list *denseChildList) length() int {
+ return list.numChildren
+}
+
+func (list *denseChildList) head() *Trie {
+ return list.children[list.headIndex]
+}
+
+func (list *denseChildList) add(child *Trie) childList {
+ b := int(child.prefix[0])
+ var i int
+
+ switch {
+ case list.min <= b && b <= list.max:
+ if list.children[b-list.min] != nil {
+ panic("dense child list collision detected")
+ }
+ i = b - list.min
+ list.children[i] = child
+
+ case b < list.min:
+ children := make([]*Trie, list.max-b+1)
+ i = 0
+ children[i] = child
+ copy(children[list.min-b:], list.children)
+ list.children = children
+ list.min = b
+
+ default: // b > list.max
+ children := make([]*Trie, b-list.min+1)
+ i = b - list.min
+ children[i] = child
+ copy(children, list.children)
+ list.children = children
+ list.max = b
+ }
+
+ list.numChildren++
+ if i < list.headIndex {
+ list.headIndex = i
+ }
+ return list
+}
+
+func (list *denseChildList) remove(b byte) {
+ i := int(b) - list.min
+ if list.children[i] == nil {
+ // This is not supposed to be reached.
+ panic("removing non-existent child")
+ }
+ list.numChildren--
+ list.children[i] = nil
+
+ // Update head index.
+ if i == list.headIndex {
+ for ; i < len(list.children); i++ {
+ if list.children[i] != nil {
+ list.headIndex = i
+ return
+ }
+ }
+ }
+}
+
+func (list *denseChildList) replace(b byte, child *Trie) {
+ // Make a consistency check.
+ if p0 := child.prefix[0]; p0 != b {
+ panic(fmt.Errorf("child prefix mismatch: %v != %v", p0, b))
+ }
+
+ // Replace the child.
+ list.children[int(b)-list.min] = child
+}
+
+func (list *denseChildList) next(b byte) *Trie {
+ i := int(b)
+ if i < list.min || list.max < i {
+ return nil
+ }
+ return list.children[i-list.min]
+}
+
+func (list *denseChildList) walk(prefix *Prefix, visitor VisitorFunc) error {
+ for _, child := range list.children {
+ if child == nil {
+ continue
+ }
+ *prefix = append(*prefix, child.prefix...)
+ if child.item != nil {
+ if err := visitor(*prefix, child.item); err != nil {
+ if err == SkipSubtree {
+ *prefix = (*prefix)[:len(*prefix)-len(child.prefix)]
+ continue
+ }
+ *prefix = (*prefix)[:len(*prefix)-len(child.prefix)]
+ return err
+ }
+ }
+
+ err := child.children.walk(prefix, visitor)
+ *prefix = (*prefix)[:len(*prefix)-len(child.prefix)]
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (list *denseChildList) print(w io.Writer, indent int) {
+ for _, child := range list.children {
+ if child != nil {
+ child.print(w, indent)
+ }
+ }
+}
+
+func (list *denseChildList) total() int {
+ tot := 0
+ for _, child := range list.children {
+ if child != nil {
+ tot = tot + child.total()
+ }
+ }
+ return tot
+}
diff --git a/vendor/github.com/tchap/go-patricia/patricia/patricia.go b/vendor/github.com/tchap/go-patricia/patricia/patricia.go
new file mode 100644
index 000000000..a1fc53d5d
--- /dev/null
+++ b/vendor/github.com/tchap/go-patricia/patricia/patricia.go
@@ -0,0 +1,594 @@
+// Copyright (c) 2014 The go-patricia AUTHORS
+//
+// Use of this source code is governed by The MIT License
+// that can be found in the LICENSE file.
+
+package patricia
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+)
+
+//------------------------------------------------------------------------------
+// Trie
+//------------------------------------------------------------------------------
+
+const (
+ DefaultMaxPrefixPerNode = 10
+ DefaultMaxChildrenPerSparseNode = 8
+)
+
+type (
+ Prefix []byte
+ Item interface{}
+ VisitorFunc func(prefix Prefix, item Item) error
+)
+
+// Trie is a generic patricia trie that allows fast retrieval of items by prefix.
+// and other funky stuff.
+//
+// Trie is not thread-safe.
+type Trie struct {
+ prefix Prefix
+ item Item
+
+ maxPrefixPerNode int
+ maxChildrenPerSparseNode int
+
+ children childList
+}
+
+// Public API ------------------------------------------------------------------
+
+type Option func(*Trie)
+
+// Trie constructor.
+func NewTrie(options ...Option) *Trie {
+ trie := &Trie{}
+
+ for _, opt := range options {
+ opt(trie)
+ }
+
+ if trie.maxPrefixPerNode <= 0 {
+ trie.maxPrefixPerNode = DefaultMaxPrefixPerNode
+ }
+ if trie.maxChildrenPerSparseNode <= 0 {
+ trie.maxChildrenPerSparseNode = DefaultMaxChildrenPerSparseNode
+ }
+
+ trie.children = newSparseChildList(trie.maxChildrenPerSparseNode)
+ return trie
+}
+
+func MaxPrefixPerNode(value int) Option {
+ return func(trie *Trie) {
+ trie.maxPrefixPerNode = value
+ }
+}
+
+func MaxChildrenPerSparseNode(value int) Option {
+ return func(trie *Trie) {
+ trie.maxChildrenPerSparseNode = value
+ }
+}
+
+// Item returns the item stored in the root of this trie.
+func (trie *Trie) Item() Item {
+ return trie.item
+}
+
+// Insert inserts a new item into the trie using the given prefix. Insert does
+// not replace existing items. It returns false if an item was already in place.
+func (trie *Trie) Insert(key Prefix, item Item) (inserted bool) {
+ return trie.put(key, item, false)
+}
+
+// Set works much like Insert, but it always sets the item, possibly replacing
+// the item previously inserted.
+func (trie *Trie) Set(key Prefix, item Item) {
+ trie.put(key, item, true)
+}
+
+// Get returns the item located at key.
+//
+// This method is a bit dangerous, because Get can as well end up in an internal
+// node that is not really representing any user-defined value. So when nil is
+// a valid value being used, it is not possible to tell if the value was inserted
+// into the tree by the user or not. A possible workaround for this is not to use
+// nil interface as a valid value, even using zero value of any type is enough
+// to prevent this bad behaviour.
+func (trie *Trie) Get(key Prefix) (item Item) {
+ _, node, found, leftover := trie.findSubtree(key)
+ if !found || len(leftover) != 0 {
+ return nil
+ }
+ return node.item
+}
+
+// Match returns what Get(prefix) != nil would return. The same warning as for
+// Get applies here as well.
+func (trie *Trie) Match(prefix Prefix) (matchedExactly bool) {
+ return trie.Get(prefix) != nil
+}
+
+// MatchSubtree returns true when there is a subtree representing extensions
+// to key, that is if there are any keys in the tree which have key as prefix.
+func (trie *Trie) MatchSubtree(key Prefix) (matched bool) {
+ _, _, matched, _ = trie.findSubtree(key)
+ return
+}
+
+// Visit calls visitor on every node containing a non-nil item
+// in alphabetical order.
+//
+// If an error is returned from visitor, the function stops visiting the tree
+// and returns that error, unless it is a special error - SkipSubtree. In that
+// case Visit skips the subtree represented by the current node and continues
+// elsewhere.
+func (trie *Trie) Visit(visitor VisitorFunc) error {
+ return trie.walk(nil, visitor)
+}
+
+func (trie *Trie) size() int {
+ n := 0
+
+ trie.walk(nil, func(prefix Prefix, item Item) error {
+ n++
+ return nil
+ })
+
+ return n
+}
+
+func (trie *Trie) total() int {
+ return 1 + trie.children.total()
+}
+
+// VisitSubtree works much like Visit, but it only visits nodes matching prefix.
+func (trie *Trie) VisitSubtree(prefix Prefix, visitor VisitorFunc) error {
+ // Nil prefix not allowed.
+ if prefix == nil {
+ panic(ErrNilPrefix)
+ }
+
+ // Empty trie must be handled explicitly.
+ if trie.prefix == nil {
+ return nil
+ }
+
+ // Locate the relevant subtree.
+ _, root, found, leftover := trie.findSubtree(prefix)
+ if !found {
+ return nil
+ }
+ prefix = append(prefix, leftover...)
+
+ // Visit it.
+ return root.walk(prefix, visitor)
+}
+
+// VisitPrefixes visits only nodes that represent prefixes of key.
+// To say the obvious, returning SkipSubtree from visitor makes no sense here.
+func (trie *Trie) VisitPrefixes(key Prefix, visitor VisitorFunc) error {
+ // Nil key not allowed.
+ if key == nil {
+ panic(ErrNilPrefix)
+ }
+
+ // Empty trie must be handled explicitly.
+ if trie.prefix == nil {
+ return nil
+ }
+
+ // Walk the path matching key prefixes.
+ node := trie
+ prefix := key
+ offset := 0
+ for {
+ // Compute what part of prefix matches.
+ common := node.longestCommonPrefixLength(key)
+ key = key[common:]
+ offset += common
+
+ // Partial match means that there is no subtree matching prefix.
+ if common < len(node.prefix) {
+ return nil
+ }
+
+ // Call the visitor.
+ if item := node.item; item != nil {
+ if err := visitor(prefix[:offset], item); err != nil {
+ return err
+ }
+ }
+
+ if len(key) == 0 {
+ // This node represents key, we are finished.
+ return nil
+ }
+
+ // There is some key suffix left, move to the children.
+ child := node.children.next(key[0])
+ if child == nil {
+ // There is nowhere to continue, return.
+ return nil
+ }
+
+ node = child
+ }
+}
+
+// Delete deletes the item represented by the given prefix.
+//
+// True is returned if the matching node was found and deleted.
+func (trie *Trie) Delete(key Prefix) (deleted bool) {
+ // Nil prefix not allowed.
+ if key == nil {
+ panic(ErrNilPrefix)
+ }
+
+ // Empty trie must be handled explicitly.
+ if trie.prefix == nil {
+ return false
+ }
+
+ // Find the relevant node.
+ path, found, _ := trie.findSubtreePath(key)
+ if !found {
+ return false
+ }
+
+ node := path[len(path)-1]
+ var parent *Trie
+ if len(path) != 1 {
+ parent = path[len(path)-2]
+ }
+
+ // If the item is already set to nil, there is nothing to do.
+ if node.item == nil {
+ return false
+ }
+
+ // Delete the item.
+ node.item = nil
+
+ // Initialise i before goto.
+ // Will be used later in a loop.
+ i := len(path) - 1
+
+ // In case there are some child nodes, we cannot drop the whole subtree.
+ // We can try to compact nodes, though.
+ if node.children.length() != 0 {
+ goto Compact
+ }
+
+ // In case we are at the root, just reset it and we are done.
+ if parent == nil {
+ node.reset()
+ return true
+ }
+
+ // We can drop a subtree.
+ // Find the first ancestor that has its value set or it has 2 or more child nodes.
+ // That will be the node where to drop the subtree at.
+ for ; i >= 0; i-- {
+ if current := path[i]; current.item != nil || current.children.length() >= 2 {
+ break
+ }
+ }
+
+ // Handle the case when there is no such node.
+ // In other words, we can reset the whole tree.
+ if i == -1 {
+ path[0].reset()
+ return true
+ }
+
+ // We can just remove the subtree here.
+ node = path[i]
+ if i == 0 {
+ parent = nil
+ } else {
+ parent = path[i-1]
+ }
+ // i+1 is always a valid index since i is never pointing to the last node.
+ // The loop above skips at least the last node since we are sure that the item
+ // is set to nil and it has no children, othewise we would be compacting instead.
+ node.children.remove(path[i+1].prefix[0])
+
+Compact:
+ // The node is set to the first non-empty ancestor,
+ // so try to compact since that might be possible now.
+ if compacted := node.compact(); compacted != node {
+ if parent == nil {
+ *node = *compacted
+ } else {
+ parent.children.replace(node.prefix[0], compacted)
+ *parent = *parent.compact()
+ }
+ }
+
+ return true
+}
+
+// DeleteSubtree finds the subtree exactly matching prefix and deletes it.
+//
+// True is returned if the subtree was found and deleted.
+func (trie *Trie) DeleteSubtree(prefix Prefix) (deleted bool) {
+ // Nil prefix not allowed.
+ if prefix == nil {
+ panic(ErrNilPrefix)
+ }
+
+ // Empty trie must be handled explicitly.
+ if trie.prefix == nil {
+ return false
+ }
+
+ // Locate the relevant subtree.
+ parent, root, found, _ := trie.findSubtree(prefix)
+ if !found {
+ return false
+ }
+
+ // If we are in the root of the trie, reset the trie.
+ if parent == nil {
+ root.reset()
+ return true
+ }
+
+ // Otherwise remove the root node from its parent.
+ parent.children.remove(root.prefix[0])
+ return true
+}
+
+// Internal helper methods -----------------------------------------------------
+
+func (trie *Trie) empty() bool {
+ return trie.item == nil && trie.children.length() == 0
+}
+
+func (trie *Trie) reset() {
+ trie.prefix = nil
+ trie.children = newSparseChildList(trie.maxPrefixPerNode)
+}
+
+func (trie *Trie) put(key Prefix, item Item, replace bool) (inserted bool) {
+ // Nil prefix not allowed.
+ if key == nil {
+ panic(ErrNilPrefix)
+ }
+
+ var (
+ common int
+ node *Trie = trie
+ child *Trie
+ )
+
+ if node.prefix == nil {
+ if len(key) <= trie.maxPrefixPerNode {
+ node.prefix = key
+ goto InsertItem
+ }
+ node.prefix = key[:trie.maxPrefixPerNode]
+ key = key[trie.maxPrefixPerNode:]
+ goto AppendChild
+ }
+
+ for {
+ // Compute the longest common prefix length.
+ common = node.longestCommonPrefixLength(key)
+ key = key[common:]
+
+ // Only a part matches, split.
+ if common < len(node.prefix) {
+ goto SplitPrefix
+ }
+
+ // common == len(node.prefix) since never (common > len(node.prefix))
+ // common == len(former key) <-> 0 == len(key)
+ // -> former key == node.prefix
+ if len(key) == 0 {
+ goto InsertItem
+ }
+
+ // Check children for matching prefix.
+ child = node.children.next(key[0])
+ if child == nil {
+ goto AppendChild
+ }
+ node = child
+ }
+
+SplitPrefix:
+ // Split the prefix if necessary.
+ child = new(Trie)
+ *child = *node
+ *node = *NewTrie()
+ node.prefix = child.prefix[:common]
+ child.prefix = child.prefix[common:]
+ child = child.compact()
+ node.children = node.children.add(child)
+
+AppendChild:
+ // Keep appending children until whole prefix is inserted.
+ // This loop starts with empty node.prefix that needs to be filled.
+ for len(key) != 0 {
+ child := NewTrie()
+ if len(key) <= trie.maxPrefixPerNode {
+ child.prefix = key
+ node.children = node.children.add(child)
+ node = child
+ goto InsertItem
+ } else {
+ child.prefix = key[:trie.maxPrefixPerNode]
+ key = key[trie.maxPrefixPerNode:]
+ node.children = node.children.add(child)
+ node = child
+ }
+ }
+
+InsertItem:
+ // Try to insert the item if possible.
+ if replace || node.item == nil {
+ node.item = item
+ return true
+ }
+ return false
+}
+
+func (trie *Trie) compact() *Trie {
+ // Only a node with a single child can be compacted.
+ if trie.children.length() != 1 {
+ return trie
+ }
+
+ child := trie.children.head()
+
+ // If any item is set, we cannot compact since we want to retain
+ // the ability to do searching by key. This makes compaction less usable,
+ // but that simply cannot be avoided.
+ if trie.item != nil || child.item != nil {
+ return trie
+ }
+
+ // Make sure the combined prefixes fit into a single node.
+ if len(trie.prefix)+len(child.prefix) > trie.maxPrefixPerNode {
+ return trie
+ }
+
+ // Concatenate the prefixes, move the items.
+ child.prefix = append(trie.prefix, child.prefix...)
+ if trie.item != nil {
+ child.item = trie.item
+ }
+
+ return child
+}
+
+func (trie *Trie) findSubtree(prefix Prefix) (parent *Trie, root *Trie, found bool, leftover Prefix) {
+ // Find the subtree matching prefix.
+ root = trie
+ for {
+ // Compute what part of prefix matches.
+ common := root.longestCommonPrefixLength(prefix)
+ prefix = prefix[common:]
+
+ // We used up the whole prefix, subtree found.
+ if len(prefix) == 0 {
+ found = true
+ leftover = root.prefix[common:]
+ return
+ }
+
+ // Partial match means that there is no subtree matching prefix.
+ if common < len(root.prefix) {
+ leftover = root.prefix[common:]
+ return
+ }
+
+ // There is some prefix left, move to the children.
+ child := root.children.next(prefix[0])
+ if child == nil {
+ // There is nowhere to continue, there is no subtree matching prefix.
+ return
+ }
+
+ parent = root
+ root = child
+ }
+}
+
+func (trie *Trie) findSubtreePath(prefix Prefix) (path []*Trie, found bool, leftover Prefix) {
+ // Find the subtree matching prefix.
+ root := trie
+ var subtreePath []*Trie
+ for {
+ // Append the current root to the path.
+ subtreePath = append(subtreePath, root)
+
+ // Compute what part of prefix matches.
+ common := root.longestCommonPrefixLength(prefix)
+ prefix = prefix[common:]
+
+ // We used up the whole prefix, subtree found.
+ if len(prefix) == 0 {
+ path = subtreePath
+ found = true
+ leftover = root.prefix[common:]
+ return
+ }
+
+ // Partial match means that there is no subtree matching prefix.
+ if common < len(root.prefix) {
+ leftover = root.prefix[common:]
+ return
+ }
+
+ // There is some prefix left, move to the children.
+ child := root.children.next(prefix[0])
+ if child == nil {
+ // There is nowhere to continue, there is no subtree matching prefix.
+ return
+ }
+
+ root = child
+ }
+}
+
+func (trie *Trie) walk(actualRootPrefix Prefix, visitor VisitorFunc) error {
+ var prefix Prefix
+ // Allocate a bit more space for prefix at the beginning.
+ if actualRootPrefix == nil {
+ prefix = make(Prefix, 32+len(trie.prefix))
+ copy(prefix, trie.prefix)
+ prefix = prefix[:len(trie.prefix)]
+ } else {
+ prefix = make(Prefix, 32+len(actualRootPrefix))
+ copy(prefix, actualRootPrefix)
+ prefix = prefix[:len(actualRootPrefix)]
+ }
+
+ // Visit the root first. Not that this works for empty trie as well since
+ // in that case item == nil && len(children) == 0.
+ if trie.item != nil {
+ if err := visitor(prefix, trie.item); err != nil {
+ if err == SkipSubtree {
+ return nil
+ }
+ return err
+ }
+ }
+
+ // Then continue to the children.
+ return trie.children.walk(&prefix, visitor)
+}
+
+func (trie *Trie) longestCommonPrefixLength(prefix Prefix) (i int) {
+ for ; i < len(prefix) && i < len(trie.prefix) && prefix[i] == trie.prefix[i]; i++ {
+ }
+ return
+}
+
+func (trie *Trie) dump() string {
+ writer := &bytes.Buffer{}
+ trie.print(writer, 0)
+ return writer.String()
+}
+
+func (trie *Trie) print(writer io.Writer, indent int) {
+ fmt.Fprintf(writer, "%s%s %v\n", strings.Repeat(" ", indent), string(trie.prefix), trie.item)
+ trie.children.print(writer, indent+2)
+}
+
+// Errors ----------------------------------------------------------------------
+
+var (
+ SkipSubtree = errors.New("Skip this subtree")
+ ErrNilPrefix = errors.New("Nil prefix passed into a method call")
+)
diff --git a/vendor/github.com/ugorji/go/LICENSE b/vendor/github.com/ugorji/go/LICENSE
new file mode 100644
index 000000000..95a0f0541
--- /dev/null
+++ b/vendor/github.com/ugorji/go/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2012-2015 Ugorji Nwoke.
+All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/ugorji/go/README.md b/vendor/github.com/ugorji/go/README.md
new file mode 100644
index 000000000..6bb74742f
--- /dev/null
+++ b/vendor/github.com/ugorji/go/README.md
@@ -0,0 +1,20 @@
+# go/codec
+
+This repository contains the `go-codec` library,
+a High Performance and Feature-Rich Idiomatic encode/decode and rpc library for
+
+ - msgpack: https://github.com/msgpack/msgpack
+ - binc: http://github.com/ugorji/binc
+ - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
+ - json: http://json.org http://tools.ietf.org/html/rfc7159
+
+For more information:
+
+ - [see the codec/Readme for quick usage information](https://github.com/ugorji/go/tree/master/codec#readme)
+ - [view the API on godoc](http://godoc.org/github.com/ugorji/go/codec)
+ - [read the detailed usage/how-to primer](http://ugorji.net/blog/go-codec-primer)
+
+Install using:
+
+ go get github.com/ugorji/go/codec
+
diff --git a/vendor/github.com/ugorji/go/codec/0doc.go b/vendor/github.com/ugorji/go/codec/0doc.go
new file mode 100644
index 000000000..209f9ebad
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/0doc.go
@@ -0,0 +1,199 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+/*
+High Performance, Feature-Rich Idiomatic Go codec/encoding library for
+binc, msgpack, cbor, json.
+
+Supported Serialization formats are:
+
+ - msgpack: https://github.com/msgpack/msgpack
+ - binc: http://github.com/ugorji/binc
+ - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
+ - json: http://json.org http://tools.ietf.org/html/rfc7159
+ - simple:
+
+To install:
+
+ go get github.com/ugorji/go/codec
+
+This package understands the 'unsafe' tag, to allow using unsafe semantics:
+
+ - When decoding into a struct, you need to read the field name as a string
+ so you can find the struct field it is mapped to.
+ Using `unsafe` will bypass the allocation and copying overhead of []byte->string conversion.
+
+To install using unsafe, pass the 'unsafe' tag:
+
+ go get -tags=unsafe github.com/ugorji/go/codec
+
+For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer .
+
+The idiomatic Go support is as seen in other encoding packages in
+the standard library (ie json, xml, gob, etc).
+
+Rich Feature Set includes:
+
+ - Simple but extremely powerful and feature-rich API
+ - Very High Performance.
+ Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
+ - Multiple conversions:
+ Package coerces types where appropriate
+ e.g. decode an int in the stream into a float, etc.
+ - Corner Cases:
+ Overflows, nil maps/slices, nil values in streams are handled correctly
+ - Standard field renaming via tags
+ - Support for omitting empty fields during an encoding
+ - Encoding from any value and decoding into pointer to any value
+ (struct, slice, map, primitives, pointers, interface{}, etc)
+ - Extensions to support efficient encoding/decoding of any named types
+ - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
+ - Decoding without a schema (into a interface{}).
+ Includes Options to configure what specific map or slice type to use
+ when decoding an encoded list or map into a nil interface{}
+ - Encode a struct as an array, and decode struct from an array in the data stream
+ - Comprehensive support for anonymous fields
+ - Fast (no-reflection) encoding/decoding of common maps and slices
+ - Code-generation for faster performance.
+ - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
+ - Support indefinite-length formats to enable true streaming
+ (for formats which support it e.g. json, cbor)
+ - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
+ This mostly applies to maps, where iteration order is non-deterministic.
+ - NIL in data stream decoded as zero value
+ - Never silently skip data when decoding.
+ User decides whether to return an error or silently skip data when keys or indexes
+ in the data stream do not map to fields in the struct.
+ - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown)
+ - Encode/Decode from/to chan types (for iterative streaming support)
+ - Drop-in replacement for encoding/json. `json:` key in struct tag supported.
+ - Provides a RPC Server and Client Codec for net/rpc communication protocol.
+ - Handle unique idiosyncrasies of codecs e.g.
+ - For messagepack, configure how ambiguities in handling raw bytes are resolved
+ - For messagepack, provide rpc server/client codec to support
+ msgpack-rpc protocol defined at:
+ https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+
+Extension Support
+
+Users can register a function to handle the encoding or decoding of
+their custom types.
+
+There are no restrictions on what the custom type can be. Some examples:
+
+ type BisSet []int
+ type BitSet64 uint64
+ type UUID string
+ type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
+ type GifImage struct { ... }
+
+As an illustration, MyStructWithUnexportedFields would normally be
+encoded as an empty map because it has no exported fields, while UUID
+would be encoded as a string. However, with extension support, you can
+encode any of these however you like.
+
+RPC
+
+RPC Client and Server Codecs are implemented, so the codecs can be used
+with the standard net/rpc package.
+
+Usage
+
+The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification.
+
+The Encoder and Decoder are NOT safe for concurrent use.
+
+Consequently, the usage model is basically:
+
+ - Create and initialize the Handle before any use.
+ Once created, DO NOT modify it.
+ - Multiple Encoders or Decoders can now use the Handle concurrently.
+ They only read information off the Handle (never write).
+ - However, each Encoder or Decoder MUST not be used concurrently
+ - To re-use an Encoder/Decoder, call Reset(...) on it first.
+ This allows you use state maintained on the Encoder/Decoder.
+
+Sample usage model:
+
+ // create and configure Handle
+ var (
+ bh codec.BincHandle
+ mh codec.MsgpackHandle
+ ch codec.CborHandle
+ )
+
+ mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
+
+ // configure extensions
+ // e.g. for msgpack, define functions and enable Time support for tag 1
+ // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
+
+ // create and use decoder/encoder
+ var (
+ r io.Reader
+ w io.Writer
+ b []byte
+ h = &bh // or mh to use msgpack
+ )
+
+ dec = codec.NewDecoder(r, h)
+ dec = codec.NewDecoderBytes(b, h)
+ err = dec.Decode(&v)
+
+ enc = codec.NewEncoder(w, h)
+ enc = codec.NewEncoderBytes(&b, h)
+ err = enc.Encode(v)
+
+ //RPC Server
+ go func() {
+ for {
+ conn, err := listener.Accept()
+ rpcCodec := codec.GoRpc.ServerCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
+ rpc.ServeCodec(rpcCodec)
+ }
+ }()
+
+ //RPC Communication (client side)
+ conn, err = net.Dial("tcp", "localhost:5555")
+ rpcCodec := codec.GoRpc.ClientCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
+ client := rpc.NewClientWithCodec(rpcCodec)
+
+*/
+package codec
+
+// Benefits of go-codec:
+//
+// - encoding/json always reads whole file into memory first.
+// This makes it unsuitable for parsing very large files.
+// - encoding/xml cannot parse into a map[string]interface{}
+// I found this out on reading https://github.com/clbanning/mxj
+
+// TODO:
+//
+// - optimization for codecgen:
+// if len of entity is <= 3 words, then support a value receiver for encode.
+// - (En|De)coder should store an error when it occurs.
+// Until reset, subsequent calls return that error that was stored.
+// This means that free panics must go away.
+// All errors must be raised through errorf method.
+// - Decoding using a chan is good, but incurs concurrency costs.
+// This is because there's no fast way to use a channel without it
+// having to switch goroutines constantly.
+// Callback pattern is still the best. Maybe consider supporting something like:
+// type X struct {
+// Name string
+// Ys []Y
+// Ys chan <- Y
+// Ys func(Y) -> call this function for each entry
+// }
+// - Consider adding a isZeroer interface { isZero() bool }
+// It is used within isEmpty, for omitEmpty support.
+// - Consider making Handle used AS-IS within the encoding/decoding session.
+// This means that we don't cache Handle information within the (En|De)coder,
+// except we really need it at Reset(...)
+// - Consider adding math/big support
+// - Consider reducing the size of the generated functions:
+// Maybe use one loop, and put the conditionals in the loop.
+// for ... { if cLen > 0 { if j == cLen { break } } else if dd.CheckBreak() { break } }
diff --git a/vendor/github.com/ugorji/go/codec/README.md b/vendor/github.com/ugorji/go/codec/README.md
new file mode 100644
index 000000000..91cb3a27b
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/README.md
@@ -0,0 +1,148 @@
+# Codec
+
+High Performance, Feature-Rich Idiomatic Go codec/encoding library for
+binc, msgpack, cbor, json.
+
+Supported Serialization formats are:
+
+ - msgpack: https://github.com/msgpack/msgpack
+ - binc: http://github.com/ugorji/binc
+ - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
+ - json: http://json.org http://tools.ietf.org/html/rfc7159
+ - simple:
+
+To install:
+
+ go get github.com/ugorji/go/codec
+
+This package understands the `unsafe` tag, to allow using unsafe semantics:
+
+ - When decoding into a struct, you need to read the field name as a string
+ so you can find the struct field it is mapped to.
+ Using `unsafe` will bypass the allocation and copying overhead of `[]byte->string` conversion.
+
+To use it, you must pass the `unsafe` tag during install:
+
+```
+go install -tags=unsafe github.com/ugorji/go/codec
+```
+
+Online documentation: http://godoc.org/github.com/ugorji/go/codec
+Detailed Usage/How-to Primer: http://ugorji.net/blog/go-codec-primer
+
+The idiomatic Go support is as seen in other encoding packages in
+the standard library (ie json, xml, gob, etc).
+
+Rich Feature Set includes:
+
+ - Simple but extremely powerful and feature-rich API
+ - Very High Performance.
+ Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
+ - Multiple conversions:
+ Package coerces types where appropriate
+ e.g. decode an int in the stream into a float, etc.
+ - Corner Cases:
+ Overflows, nil maps/slices, nil values in streams are handled correctly
+ - Standard field renaming via tags
+ - Support for omitting empty fields during an encoding
+ - Encoding from any value and decoding into pointer to any value
+ (struct, slice, map, primitives, pointers, interface{}, etc)
+ - Extensions to support efficient encoding/decoding of any named types
+ - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
+ - Decoding without a schema (into a interface{}).
+ Includes Options to configure what specific map or slice type to use
+ when decoding an encoded list or map into a nil interface{}
+ - Encode a struct as an array, and decode struct from an array in the data stream
+ - Comprehensive support for anonymous fields
+ - Fast (no-reflection) encoding/decoding of common maps and slices
+ - Code-generation for faster performance.
+ - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
+ - Support indefinite-length formats to enable true streaming
+ (for formats which support it e.g. json, cbor)
+ - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
+ This mostly applies to maps, where iteration order is non-deterministic.
+ - NIL in data stream decoded as zero value
+ - Never silently skip data when decoding.
+ User decides whether to return an error or silently skip data when keys or indexes
+ in the data stream do not map to fields in the struct.
+ - Encode/Decode from/to chan types (for iterative streaming support)
+ - Drop-in replacement for encoding/json. `json:` key in struct tag supported.
+ - Provides a RPC Server and Client Codec for net/rpc communication protocol.
+ - Handle unique idiosyncrasies of codecs e.g.
+ - For messagepack, configure how ambiguities in handling raw bytes are resolved
+ - For messagepack, provide rpc server/client codec to support
+ msgpack-rpc protocol defined at:
+ https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+
+## Extension Support
+
+Users can register a function to handle the encoding or decoding of
+their custom types.
+
+There are no restrictions on what the custom type can be. Some examples:
+
+ type BisSet []int
+ type BitSet64 uint64
+ type UUID string
+ type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
+ type GifImage struct { ... }
+
+As an illustration, MyStructWithUnexportedFields would normally be
+encoded as an empty map because it has no exported fields, while UUID
+would be encoded as a string. However, with extension support, you can
+encode any of these however you like.
+
+## RPC
+
+RPC Client and Server Codecs are implemented, so the codecs can be used
+with the standard net/rpc package.
+
+## Usage
+
+Typical usage model:
+
+ // create and configure Handle
+ var (
+ bh codec.BincHandle
+ mh codec.MsgpackHandle
+ ch codec.CborHandle
+ )
+
+ mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
+
+ // configure extensions
+ // e.g. for msgpack, define functions and enable Time support for tag 1
+ // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
+
+ // create and use decoder/encoder
+ var (
+ r io.Reader
+ w io.Writer
+ b []byte
+ h = &bh // or mh to use msgpack
+ )
+
+ dec = codec.NewDecoder(r, h)
+ dec = codec.NewDecoderBytes(b, h)
+ err = dec.Decode(&v)
+
+ enc = codec.NewEncoder(w, h)
+ enc = codec.NewEncoderBytes(&b, h)
+ err = enc.Encode(v)
+
+ //RPC Server
+ go func() {
+ for {
+ conn, err := listener.Accept()
+ rpcCodec := codec.GoRpc.ServerCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
+ rpc.ServeCodec(rpcCodec)
+ }
+ }()
+
+ //RPC Communication (client side)
+ conn, err = net.Dial("tcp", "localhost:5555")
+ rpcCodec := codec.GoRpc.ClientCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
+ client := rpc.NewClientWithCodec(rpcCodec)
+
diff --git a/vendor/github.com/ugorji/go/codec/binc.go b/vendor/github.com/ugorji/go/codec/binc.go
new file mode 100644
index 000000000..681b2b0da
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/binc.go
@@ -0,0 +1,929 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "math"
+ "reflect"
+ "time"
+)
+
+const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning.
+
+// vd as low 4 bits (there are 16 slots)
+const (
+ bincVdSpecial byte = iota
+ bincVdPosInt
+ bincVdNegInt
+ bincVdFloat
+
+ bincVdString
+ bincVdByteArray
+ bincVdArray
+ bincVdMap
+
+ bincVdTimestamp
+ bincVdSmallInt
+ bincVdUnicodeOther
+ bincVdSymbol
+
+ bincVdDecimal
+ _ // open slot
+ _ // open slot
+ bincVdCustomExt = 0x0f
+)
+
+const (
+ bincSpNil byte = iota
+ bincSpFalse
+ bincSpTrue
+ bincSpNan
+ bincSpPosInf
+ bincSpNegInf
+ bincSpZeroFloat
+ bincSpZero
+ bincSpNegOne
+)
+
+const (
+ bincFlBin16 byte = iota
+ bincFlBin32
+ _ // bincFlBin32e
+ bincFlBin64
+ _ // bincFlBin64e
+ // others not currently supported
+)
+
+type bincEncDriver struct {
+ e *Encoder
+ w encWriter
+ m map[string]uint16 // symbols
+ b [scratchByteArrayLen]byte
+ s uint16 // symbols sequencer
+ encNoSeparator
+}
+
+func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool {
+ return rt == timeTypId
+}
+
+func (e *bincEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {
+ if rt == timeTypId {
+ var bs []byte
+ switch x := v.(type) {
+ case time.Time:
+ bs = encodeTime(x)
+ case *time.Time:
+ bs = encodeTime(*x)
+ default:
+ e.e.errorf("binc error encoding builtin: expect time.Time, received %T", v)
+ }
+ e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs)))
+ e.w.writeb(bs)
+ }
+}
+
+func (e *bincEncDriver) EncodeNil() {
+ e.w.writen1(bincVdSpecial<<4 | bincSpNil)
+}
+
+func (e *bincEncDriver) EncodeBool(b bool) {
+ if b {
+ e.w.writen1(bincVdSpecial<<4 | bincSpTrue)
+ } else {
+ e.w.writen1(bincVdSpecial<<4 | bincSpFalse)
+ }
+}
+
+func (e *bincEncDriver) EncodeFloat32(f float32) {
+ if f == 0 {
+ e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
+ return
+ }
+ e.w.writen1(bincVdFloat<<4 | bincFlBin32)
+ bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *bincEncDriver) EncodeFloat64(f float64) {
+ if f == 0 {
+ e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
+ return
+ }
+ bigen.PutUint64(e.b[:8], math.Float64bits(f))
+ if bincDoPrune {
+ i := 7
+ for ; i >= 0 && (e.b[i] == 0); i-- {
+ }
+ i++
+ if i <= 6 {
+ e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64)
+ e.w.writen1(byte(i))
+ e.w.writeb(e.b[:i])
+ return
+ }
+ }
+ e.w.writen1(bincVdFloat<<4 | bincFlBin64)
+ e.w.writeb(e.b[:8])
+}
+
+func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) {
+ if lim == 4 {
+ bigen.PutUint32(e.b[:lim], uint32(v))
+ } else {
+ bigen.PutUint64(e.b[:lim], v)
+ }
+ if bincDoPrune {
+ i := pruneSignExt(e.b[:lim], pos)
+ e.w.writen1(bd | lim - 1 - byte(i))
+ e.w.writeb(e.b[i:lim])
+ } else {
+ e.w.writen1(bd | lim - 1)
+ e.w.writeb(e.b[:lim])
+ }
+}
+
+func (e *bincEncDriver) EncodeInt(v int64) {
+ const nbd byte = bincVdNegInt << 4
+ if v >= 0 {
+ e.encUint(bincVdPosInt<<4, true, uint64(v))
+ } else if v == -1 {
+ e.w.writen1(bincVdSpecial<<4 | bincSpNegOne)
+ } else {
+ e.encUint(bincVdNegInt<<4, false, uint64(-v))
+ }
+}
+
+func (e *bincEncDriver) EncodeUint(v uint64) {
+ e.encUint(bincVdPosInt<<4, true, v)
+}
+
+func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) {
+ if v == 0 {
+ e.w.writen1(bincVdSpecial<<4 | bincSpZero)
+ } else if pos && v >= 1 && v <= 16 {
+ e.w.writen1(bincVdSmallInt<<4 | byte(v-1))
+ } else if v <= math.MaxUint8 {
+ e.w.writen2(bd|0x0, byte(v))
+ } else if v <= math.MaxUint16 {
+ e.w.writen1(bd | 0x01)
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.encIntegerPrune(bd, pos, v, 4)
+ } else {
+ e.encIntegerPrune(bd, pos, v, 8)
+ }
+}
+
+func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) {
+ bs := ext.WriteExt(rv)
+ if bs == nil {
+ e.EncodeNil()
+ return
+ }
+ e.encodeExtPreamble(uint8(xtag), len(bs))
+ e.w.writeb(bs)
+}
+
+func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
+ e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
+ e.w.writeb(re.Data)
+}
+
+func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) {
+ e.encLen(bincVdCustomExt<<4, uint64(length))
+ e.w.writen1(xtag)
+}
+
+func (e *bincEncDriver) EncodeArrayStart(length int) {
+ e.encLen(bincVdArray<<4, uint64(length))
+}
+
+func (e *bincEncDriver) EncodeMapStart(length int) {
+ e.encLen(bincVdMap<<4, uint64(length))
+}
+
+func (e *bincEncDriver) EncodeString(c charEncoding, v string) {
+ l := uint64(len(v))
+ e.encBytesLen(c, l)
+ if l > 0 {
+ e.w.writestr(v)
+ }
+}
+
+func (e *bincEncDriver) EncodeSymbol(v string) {
+ // if WriteSymbolsNoRefs {
+ // e.encodeString(c_UTF8, v)
+ // return
+ // }
+
+ //symbols only offer benefit when string length > 1.
+ //This is because strings with length 1 take only 2 bytes to store
+ //(bd with embedded length, and single byte for string val).
+
+ l := len(v)
+ if l == 0 {
+ e.encBytesLen(c_UTF8, 0)
+ return
+ } else if l == 1 {
+ e.encBytesLen(c_UTF8, 1)
+ e.w.writen1(v[0])
+ return
+ }
+ if e.m == nil {
+ e.m = make(map[string]uint16, 16)
+ }
+ ui, ok := e.m[v]
+ if ok {
+ if ui <= math.MaxUint8 {
+ e.w.writen2(bincVdSymbol<<4, byte(ui))
+ } else {
+ e.w.writen1(bincVdSymbol<<4 | 0x8)
+ bigenHelper{e.b[:2], e.w}.writeUint16(ui)
+ }
+ } else {
+ e.s++
+ ui = e.s
+ //ui = uint16(atomic.AddUint32(&e.s, 1))
+ e.m[v] = ui
+ var lenprec uint8
+ if l <= math.MaxUint8 {
+ // lenprec = 0
+ } else if l <= math.MaxUint16 {
+ lenprec = 1
+ } else if int64(l) <= math.MaxUint32 {
+ lenprec = 2
+ } else {
+ lenprec = 3
+ }
+ if ui <= math.MaxUint8 {
+ e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui))
+ } else {
+ e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec)
+ bigenHelper{e.b[:2], e.w}.writeUint16(ui)
+ }
+ if lenprec == 0 {
+ e.w.writen1(byte(l))
+ } else if lenprec == 1 {
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l))
+ } else if lenprec == 2 {
+ bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l))
+ } else {
+ bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l))
+ }
+ e.w.writestr(v)
+ }
+}
+
+func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+ l := uint64(len(v))
+ e.encBytesLen(c, l)
+ if l > 0 {
+ e.w.writeb(v)
+ }
+}
+
+func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) {
+ //TODO: support bincUnicodeOther (for now, just use string or bytearray)
+ if c == c_RAW {
+ e.encLen(bincVdByteArray<<4, length)
+ } else {
+ e.encLen(bincVdString<<4, length)
+ }
+}
+
+func (e *bincEncDriver) encLen(bd byte, l uint64) {
+ if l < 12 {
+ e.w.writen1(bd | uint8(l+4))
+ } else {
+ e.encLenNumber(bd, l)
+ }
+}
+
+func (e *bincEncDriver) encLenNumber(bd byte, v uint64) {
+ if v <= math.MaxUint8 {
+ e.w.writen2(bd, byte(v))
+ } else if v <= math.MaxUint16 {
+ e.w.writen1(bd | 0x01)
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.w.writen1(bd | 0x02)
+ bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v))
+ } else {
+ e.w.writen1(bd | 0x03)
+ bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v))
+ }
+}
+
+//------------------------------------
+
+type bincDecSymbol struct {
+ s string
+ b []byte
+ i uint16
+}
+
+type bincDecDriver struct {
+ d *Decoder
+ h *BincHandle
+ r decReader
+ br bool // bytes reader
+ bdRead bool
+ bd byte
+ vd byte
+ vs byte
+ noStreamingCodec
+ decNoSeparator
+ b [scratchByteArrayLen]byte
+
+ // linear searching on this slice is ok,
+ // because we typically expect < 32 symbols in each stream.
+ s []bincDecSymbol
+}
+
+func (d *bincDecDriver) readNextBd() {
+ d.bd = d.r.readn1()
+ d.vd = d.bd >> 4
+ d.vs = d.bd & 0x0f
+ d.bdRead = true
+}
+
+func (d *bincDecDriver) uncacheRead() {
+ if d.bdRead {
+ d.r.unreadn1()
+ d.bdRead = false
+ }
+}
+
+func (d *bincDecDriver) ContainerType() (vt valueType) {
+ if d.vd == bincVdSpecial && d.vs == bincSpNil {
+ return valueTypeNil
+ } else if d.vd == bincVdByteArray {
+ return valueTypeBytes
+ } else if d.vd == bincVdString {
+ return valueTypeString
+ } else if d.vd == bincVdArray {
+ return valueTypeArray
+ } else if d.vd == bincVdMap {
+ return valueTypeMap
+ } else {
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ }
+ return valueTypeUnset
+}
+
+func (d *bincDecDriver) TryDecodeAsNil() bool {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == bincVdSpecial<<4|bincSpNil {
+ d.bdRead = false
+ return true
+ }
+ return false
+}
+
+func (d *bincDecDriver) IsBuiltinType(rt uintptr) bool {
+ return rt == timeTypId
+}
+
+func (d *bincDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if rt == timeTypId {
+ if d.vd != bincVdTimestamp {
+ d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd)
+ return
+ }
+ tt, err := decodeTime(d.r.readx(int(d.vs)))
+ if err != nil {
+ panic(err)
+ }
+ var vt *time.Time = v.(*time.Time)
+ *vt = tt
+ d.bdRead = false
+ }
+}
+
+func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) {
+ if vs&0x8 == 0 {
+ d.r.readb(d.b[0:defaultLen])
+ } else {
+ l := d.r.readn1()
+ if l > 8 {
+ d.d.errorf("At most 8 bytes used to represent float. Received: %v bytes", l)
+ return
+ }
+ for i := l; i < 8; i++ {
+ d.b[i] = 0
+ }
+ d.r.readb(d.b[0:l])
+ }
+}
+
+func (d *bincDecDriver) decFloat() (f float64) {
+ //if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; }
+ if x := d.vs & 0x7; x == bincFlBin32 {
+ d.decFloatPre(d.vs, 4)
+ f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4])))
+ } else if x == bincFlBin64 {
+ d.decFloatPre(d.vs, 8)
+ f = math.Float64frombits(bigen.Uint64(d.b[0:8]))
+ } else {
+ d.d.errorf("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs)
+ return
+ }
+ return
+}
+
+func (d *bincDecDriver) decUint() (v uint64) {
+ // need to inline the code (interface conversion and type assertion expensive)
+ switch d.vs {
+ case 0:
+ v = uint64(d.r.readn1())
+ case 1:
+ d.r.readb(d.b[6:8])
+ v = uint64(bigen.Uint16(d.b[6:8]))
+ case 2:
+ d.b[4] = 0
+ d.r.readb(d.b[5:8])
+ v = uint64(bigen.Uint32(d.b[4:8]))
+ case 3:
+ d.r.readb(d.b[4:8])
+ v = uint64(bigen.Uint32(d.b[4:8]))
+ case 4, 5, 6:
+ lim := int(7 - d.vs)
+ d.r.readb(d.b[lim:8])
+ for i := 0; i < lim; i++ {
+ d.b[i] = 0
+ }
+ v = uint64(bigen.Uint64(d.b[:8]))
+ case 7:
+ d.r.readb(d.b[:8])
+ v = uint64(bigen.Uint64(d.b[:8]))
+ default:
+ d.d.errorf("unsigned integers with greater than 64 bits of precision not supported")
+ return
+ }
+ return
+}
+
+func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ vd, vs := d.vd, d.vs
+ if vd == bincVdPosInt {
+ ui = d.decUint()
+ } else if vd == bincVdNegInt {
+ ui = d.decUint()
+ neg = true
+ } else if vd == bincVdSmallInt {
+ ui = uint64(d.vs) + 1
+ } else if vd == bincVdSpecial {
+ if vs == bincSpZero {
+ //i = 0
+ } else if vs == bincSpNegOne {
+ neg = true
+ ui = 1
+ } else {
+ d.d.errorf("numeric decode fails for special value: d.vs: 0x%x", d.vs)
+ return
+ }
+ } else {
+ d.d.errorf("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd)
+ return
+ }
+ return
+}
+
+func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) {
+ ui, neg := d.decCheckInteger()
+ i, overflow := chkOvf.SignedInt(ui)
+ if overflow {
+ d.d.errorf("simple: overflow converting %v to signed integer", ui)
+ return
+ }
+ if neg {
+ i = -i
+ }
+ if chkOvf.Int(i, bitsize) {
+ d.d.errorf("binc: overflow integer: %v", i)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
+ ui, neg := d.decCheckInteger()
+ if neg {
+ d.d.errorf("Assigning negative signed value to unsigned type")
+ return
+ }
+ if chkOvf.Uint(ui, bitsize) {
+ d.d.errorf("binc: overflow integer: %v", ui)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ vd, vs := d.vd, d.vs
+ if vd == bincVdSpecial {
+ d.bdRead = false
+ if vs == bincSpNan {
+ return math.NaN()
+ } else if vs == bincSpPosInf {
+ return math.Inf(1)
+ } else if vs == bincSpZeroFloat || vs == bincSpZero {
+ return
+ } else if vs == bincSpNegInf {
+ return math.Inf(-1)
+ } else {
+ d.d.errorf("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs)
+ return
+ }
+ } else if vd == bincVdFloat {
+ f = d.decFloat()
+ } else {
+ f = float64(d.DecodeInt(64))
+ }
+ if chkOverflow32 && chkOvf.Float32(f) {
+ d.d.errorf("binc: float32 overflow: %v", f)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool only (single byte).
+func (d *bincDecDriver) DecodeBool() (b bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) {
+ // b = false
+ } else if bd == (bincVdSpecial | bincSpTrue) {
+ b = true
+ } else {
+ d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) ReadMapStart() (length int) {
+ if d.vd != bincVdMap {
+ d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd)
+ return
+ }
+ length = d.decLen()
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) ReadArrayStart() (length int) {
+ if d.vd != bincVdArray {
+ d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd)
+ return
+ }
+ length = d.decLen()
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) decLen() int {
+ if d.vs > 3 {
+ return int(d.vs - 4)
+ }
+ return int(d.decLenNumber())
+}
+
+func (d *bincDecDriver) decLenNumber() (v uint64) {
+ if x := d.vs; x == 0 {
+ v = uint64(d.r.readn1())
+ } else if x == 1 {
+ d.r.readb(d.b[6:8])
+ v = uint64(bigen.Uint16(d.b[6:8]))
+ } else if x == 2 {
+ d.r.readb(d.b[4:8])
+ v = uint64(bigen.Uint32(d.b[4:8]))
+ } else {
+ d.r.readb(d.b[:8])
+ v = bigen.Uint64(d.b[:8])
+ }
+ return
+}
+
+func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (bs2 []byte, s string) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == bincVdSpecial<<4|bincSpNil {
+ d.bdRead = false
+ return
+ }
+ var slen int = -1
+ // var ok bool
+ switch d.vd {
+ case bincVdString, bincVdByteArray:
+ slen = d.decLen()
+ if zerocopy {
+ if d.br {
+ bs2 = d.r.readx(slen)
+ } else if len(bs) == 0 {
+ bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, d.b[:])
+ } else {
+ bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs)
+ }
+ } else {
+ bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs)
+ }
+ if withString {
+ s = string(bs2)
+ }
+ case bincVdSymbol:
+ // zerocopy doesn't apply for symbols,
+ // as the values must be stored in a table for later use.
+ //
+ //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision,
+ //extract symbol
+ //if containsStringVal, read it and put in map
+ //else look in map for string value
+ var symbol uint16
+ vs := d.vs
+ if vs&0x8 == 0 {
+ symbol = uint16(d.r.readn1())
+ } else {
+ symbol = uint16(bigen.Uint16(d.r.readx(2)))
+ }
+ if d.s == nil {
+ d.s = make([]bincDecSymbol, 0, 16)
+ }
+
+ if vs&0x4 == 0 {
+ for i := range d.s {
+ j := &d.s[i]
+ if j.i == symbol {
+ bs2 = j.b
+ if withString {
+ if j.s == "" && bs2 != nil {
+ j.s = string(bs2)
+ }
+ s = j.s
+ }
+ break
+ }
+ }
+ } else {
+ switch vs & 0x3 {
+ case 0:
+ slen = int(d.r.readn1())
+ case 1:
+ slen = int(bigen.Uint16(d.r.readx(2)))
+ case 2:
+ slen = int(bigen.Uint32(d.r.readx(4)))
+ case 3:
+ slen = int(bigen.Uint64(d.r.readx(8)))
+ }
+ // since using symbols, do not store any part of
+ // the parameter bs in the map, as it might be a shared buffer.
+ // bs2 = decByteSlice(d.r, slen, bs)
+ bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, nil)
+ if withString {
+ s = string(bs2)
+ }
+ d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2})
+ }
+ default:
+ d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x",
+ bincVdString, bincVdByteArray, bincVdSymbol, d.vd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeString() (s string) {
+ // DecodeBytes does not accommodate symbols, whose impl stores string version in map.
+ // Use decStringAndBytes directly.
+ // return string(d.DecodeBytes(d.b[:], true, true))
+ _, s = d.decStringAndBytes(d.b[:], true, true)
+ return
+}
+
+func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
+ if isstring {
+ bsOut, _ = d.decStringAndBytes(bs, false, zerocopy)
+ return
+ }
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == bincVdSpecial<<4|bincSpNil {
+ d.bdRead = false
+ return nil
+ }
+ var clen int
+ if d.vd == bincVdString || d.vd == bincVdByteArray {
+ clen = d.decLen()
+ } else {
+ d.d.errorf("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x",
+ bincVdString, bincVdByteArray, d.vd)
+ return
+ }
+ d.bdRead = false
+ if zerocopy {
+ if d.br {
+ return d.r.readx(clen)
+ } else if len(bs) == 0 {
+ bs = d.b[:]
+ }
+ }
+ return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
+}
+
+func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if xtag > 0xff {
+ d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
+ return
+ }
+ realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
+ realxtag = uint64(realxtag1)
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
+ } else {
+ ext.ReadExt(rv, xbs)
+ }
+ return
+}
+
+func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.vd == bincVdCustomExt {
+ l := d.decLen()
+ xtag = d.r.readn1()
+ if verifyTag && xtag != tag {
+ d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+ return
+ }
+ xbs = d.r.readx(l)
+ } else if d.vd == bincVdByteArray {
+ xbs = d.DecodeBytes(nil, false, true)
+ } else {
+ d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+
+ n := &d.d.n
+ var decodeFurther bool
+
+ switch d.vd {
+ case bincVdSpecial:
+ switch d.vs {
+ case bincSpNil:
+ n.v = valueTypeNil
+ case bincSpFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case bincSpTrue:
+ n.v = valueTypeBool
+ n.b = true
+ case bincSpNan:
+ n.v = valueTypeFloat
+ n.f = math.NaN()
+ case bincSpPosInf:
+ n.v = valueTypeFloat
+ n.f = math.Inf(1)
+ case bincSpNegInf:
+ n.v = valueTypeFloat
+ n.f = math.Inf(-1)
+ case bincSpZeroFloat:
+ n.v = valueTypeFloat
+ n.f = float64(0)
+ case bincSpZero:
+ n.v = valueTypeUint
+ n.u = uint64(0) // int8(0)
+ case bincSpNegOne:
+ n.v = valueTypeInt
+ n.i = int64(-1) // int8(-1)
+ default:
+ d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs)
+ }
+ case bincVdSmallInt:
+ n.v = valueTypeUint
+ n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1
+ case bincVdPosInt:
+ n.v = valueTypeUint
+ n.u = d.decUint()
+ case bincVdNegInt:
+ n.v = valueTypeInt
+ n.i = -(int64(d.decUint()))
+ case bincVdFloat:
+ n.v = valueTypeFloat
+ n.f = d.decFloat()
+ case bincVdSymbol:
+ n.v = valueTypeSymbol
+ n.s = d.DecodeString()
+ case bincVdString:
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ case bincVdByteArray:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
+ case bincVdTimestamp:
+ n.v = valueTypeTimestamp
+ tt, err := decodeTime(d.r.readx(int(d.vs)))
+ if err != nil {
+ panic(err)
+ }
+ n.t = tt
+ case bincVdCustomExt:
+ n.v = valueTypeExt
+ l := d.decLen()
+ n.u = uint64(d.r.readn1())
+ n.l = d.r.readx(l)
+ case bincVdArray:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case bincVdMap:
+ n.v = valueTypeMap
+ decodeFurther = true
+ default:
+ d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd)
+ }
+
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ if n.v == valueTypeUint && d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = int64(n.u)
+ }
+ return
+}
+
+//------------------------------------
+
+//BincHandle is a Handle for the Binc Schema-Free Encoding Format
+//defined at https://github.com/ugorji/binc .
+//
+//BincHandle currently supports all Binc features with the following EXCEPTIONS:
+// - only integers up to 64 bits of precision are supported.
+// big integers are unsupported.
+// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types).
+// extended precision and decimal IEEE 754 floats are unsupported.
+// - Only UTF-8 strings supported.
+// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported.
+//
+//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon.
+type BincHandle struct {
+ BasicHandle
+ binaryEncodingType
+}
+
+func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+ return h.SetExt(rt, tag, &setExtWrapper{b: ext})
+}
+
+func (h *BincHandle) newEncDriver(e *Encoder) encDriver {
+ return &bincEncDriver{e: e, w: e.w}
+}
+
+func (h *BincHandle) newDecDriver(d *Decoder) decDriver {
+ return &bincDecDriver{d: d, r: d.r, h: h, br: d.bytes}
+}
+
+func (e *bincEncDriver) reset() {
+ e.w = e.e.w
+ e.s = 0
+ e.m = nil
+}
+
+func (d *bincDecDriver) reset() {
+ d.r = d.d.r
+ d.s = nil
+ d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0
+}
+
+var _ decDriver = (*bincDecDriver)(nil)
+var _ encDriver = (*bincEncDriver)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/cbor.go b/vendor/github.com/ugorji/go/codec/cbor.go
new file mode 100644
index 000000000..c87e3f4d4
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/cbor.go
@@ -0,0 +1,592 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "math"
+ "reflect"
+)
+
+const (
+ cborMajorUint byte = iota
+ cborMajorNegInt
+ cborMajorBytes
+ cborMajorText
+ cborMajorArray
+ cborMajorMap
+ cborMajorTag
+ cborMajorOther
+)
+
+const (
+ cborBdFalse byte = 0xf4 + iota
+ cborBdTrue
+ cborBdNil
+ cborBdUndefined
+ cborBdExt
+ cborBdFloat16
+ cborBdFloat32
+ cborBdFloat64
+)
+
+const (
+ cborBdIndefiniteBytes byte = 0x5f
+ cborBdIndefiniteString = 0x7f
+ cborBdIndefiniteArray = 0x9f
+ cborBdIndefiniteMap = 0xbf
+ cborBdBreak = 0xff
+)
+
+const (
+ CborStreamBytes byte = 0x5f
+ CborStreamString = 0x7f
+ CborStreamArray = 0x9f
+ CborStreamMap = 0xbf
+ CborStreamBreak = 0xff
+)
+
+const (
+ cborBaseUint byte = 0x00
+ cborBaseNegInt = 0x20
+ cborBaseBytes = 0x40
+ cborBaseString = 0x60
+ cborBaseArray = 0x80
+ cborBaseMap = 0xa0
+ cborBaseTag = 0xc0
+ cborBaseSimple = 0xe0
+)
+
+// -------------------
+
+type cborEncDriver struct {
+ noBuiltInTypes
+ encNoSeparator
+ e *Encoder
+ w encWriter
+ h *CborHandle
+ x [8]byte
+}
+
+func (e *cborEncDriver) EncodeNil() {
+ e.w.writen1(cborBdNil)
+}
+
+func (e *cborEncDriver) EncodeBool(b bool) {
+ if b {
+ e.w.writen1(cborBdTrue)
+ } else {
+ e.w.writen1(cborBdFalse)
+ }
+}
+
+func (e *cborEncDriver) EncodeFloat32(f float32) {
+ e.w.writen1(cborBdFloat32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *cborEncDriver) EncodeFloat64(f float64) {
+ e.w.writen1(cborBdFloat64)
+ bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
+}
+
+func (e *cborEncDriver) encUint(v uint64, bd byte) {
+ if v <= 0x17 {
+ e.w.writen1(byte(v) + bd)
+ } else if v <= math.MaxUint8 {
+ e.w.writen2(bd+0x18, uint8(v))
+ } else if v <= math.MaxUint16 {
+ e.w.writen1(bd + 0x19)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.w.writen1(bd + 0x1a)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v))
+ } else { // if v <= math.MaxUint64 {
+ e.w.writen1(bd + 0x1b)
+ bigenHelper{e.x[:8], e.w}.writeUint64(v)
+ }
+}
+
+func (e *cborEncDriver) EncodeInt(v int64) {
+ if v < 0 {
+ e.encUint(uint64(-1-v), cborBaseNegInt)
+ } else {
+ e.encUint(uint64(v), cborBaseUint)
+ }
+}
+
+func (e *cborEncDriver) EncodeUint(v uint64) {
+ e.encUint(v, cborBaseUint)
+}
+
+func (e *cborEncDriver) encLen(bd byte, length int) {
+ e.encUint(uint64(length), bd)
+}
+
+func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
+ e.encUint(uint64(xtag), cborBaseTag)
+ if v := ext.ConvertExt(rv); v == nil {
+ e.EncodeNil()
+ } else {
+ en.encode(v)
+ }
+}
+
+func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
+ e.encUint(uint64(re.Tag), cborBaseTag)
+ if re.Data != nil {
+ en.encode(re.Data)
+ } else if re.Value == nil {
+ e.EncodeNil()
+ } else {
+ en.encode(re.Value)
+ }
+}
+
+func (e *cborEncDriver) EncodeArrayStart(length int) {
+ e.encLen(cborBaseArray, length)
+}
+
+func (e *cborEncDriver) EncodeMapStart(length int) {
+ e.encLen(cborBaseMap, length)
+}
+
+func (e *cborEncDriver) EncodeString(c charEncoding, v string) {
+ e.encLen(cborBaseString, len(v))
+ e.w.writestr(v)
+}
+
+func (e *cborEncDriver) EncodeSymbol(v string) {
+ e.EncodeString(c_UTF8, v)
+}
+
+func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+ if c == c_RAW {
+ e.encLen(cborBaseBytes, len(v))
+ } else {
+ e.encLen(cborBaseString, len(v))
+ }
+ e.w.writeb(v)
+}
+
+// ----------------------
+
+type cborDecDriver struct {
+ d *Decoder
+ h *CborHandle
+ r decReader
+ b [scratchByteArrayLen]byte
+ br bool // bytes reader
+ bdRead bool
+ bd byte
+ noBuiltInTypes
+ decNoSeparator
+}
+
+func (d *cborDecDriver) readNextBd() {
+ d.bd = d.r.readn1()
+ d.bdRead = true
+}
+
+func (d *cborDecDriver) uncacheRead() {
+ if d.bdRead {
+ d.r.unreadn1()
+ d.bdRead = false
+ }
+}
+
+func (d *cborDecDriver) ContainerType() (vt valueType) {
+ if d.bd == cborBdNil {
+ return valueTypeNil
+ } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) {
+ return valueTypeBytes
+ } else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) {
+ return valueTypeString
+ } else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) {
+ return valueTypeArray
+ } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) {
+ return valueTypeMap
+ } else {
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ }
+ return valueTypeUnset
+}
+
+func (d *cborDecDriver) TryDecodeAsNil() bool {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ // treat Nil and Undefined as nil values
+ if d.bd == cborBdNil || d.bd == cborBdUndefined {
+ d.bdRead = false
+ return true
+ }
+ return false
+}
+
+func (d *cborDecDriver) CheckBreak() bool {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == cborBdBreak {
+ d.bdRead = false
+ return true
+ }
+ return false
+}
+
+func (d *cborDecDriver) decUint() (ui uint64) {
+ v := d.bd & 0x1f
+ if v <= 0x17 {
+ ui = uint64(v)
+ } else {
+ if v == 0x18 {
+ ui = uint64(d.r.readn1())
+ } else if v == 0x19 {
+ ui = uint64(bigen.Uint16(d.r.readx(2)))
+ } else if v == 0x1a {
+ ui = uint64(bigen.Uint32(d.r.readx(4)))
+ } else if v == 0x1b {
+ ui = uint64(bigen.Uint64(d.r.readx(8)))
+ } else {
+ d.d.errorf("decUint: Invalid descriptor: %v", d.bd)
+ return
+ }
+ }
+ return
+}
+
+func (d *cborDecDriver) decCheckInteger() (neg bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ major := d.bd >> 5
+ if major == cborMajorUint {
+ } else if major == cborMajorNegInt {
+ neg = true
+ } else {
+ d.d.errorf("invalid major: %v (bd: %v)", major, d.bd)
+ return
+ }
+ return
+}
+
+func (d *cborDecDriver) DecodeInt(bitsize uint8) (i int64) {
+ neg := d.decCheckInteger()
+ ui := d.decUint()
+ // check if this number can be converted to an int without overflow
+ var overflow bool
+ if neg {
+ if i, overflow = chkOvf.SignedInt(ui + 1); overflow {
+ d.d.errorf("cbor: overflow converting %v to signed integer", ui+1)
+ return
+ }
+ i = -i
+ } else {
+ if i, overflow = chkOvf.SignedInt(ui); overflow {
+ d.d.errorf("cbor: overflow converting %v to signed integer", ui)
+ return
+ }
+ }
+ if chkOvf.Int(i, bitsize) {
+ d.d.errorf("cbor: overflow integer: %v", i)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *cborDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
+ if d.decCheckInteger() {
+ d.d.errorf("Assigning negative signed value to unsigned type")
+ return
+ }
+ ui = d.decUint()
+ if chkOvf.Uint(ui, bitsize) {
+ d.d.errorf("cbor: overflow integer: %v", ui)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *cborDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if bd := d.bd; bd == cborBdFloat16 {
+ f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2)))))
+ } else if bd == cborBdFloat32 {
+ f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+ } else if bd == cborBdFloat64 {
+ f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+ } else if bd >= cborBaseUint && bd < cborBaseBytes {
+ f = float64(d.DecodeInt(64))
+ } else {
+ d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd)
+ return
+ }
+ if chkOverflow32 && chkOvf.Float32(f) {
+ d.d.errorf("cbor: float32 overflow: %v", f)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool only (single byte).
+func (d *cborDecDriver) DecodeBool() (b bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if bd := d.bd; bd == cborBdTrue {
+ b = true
+ } else if bd == cborBdFalse {
+ } else {
+ d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *cborDecDriver) ReadMapStart() (length int) {
+ d.bdRead = false
+ if d.bd == cborBdIndefiniteMap {
+ return -1
+ }
+ return d.decLen()
+}
+
+func (d *cborDecDriver) ReadArrayStart() (length int) {
+ d.bdRead = false
+ if d.bd == cborBdIndefiniteArray {
+ return -1
+ }
+ return d.decLen()
+}
+
+func (d *cborDecDriver) decLen() int {
+ return int(d.decUint())
+}
+
+func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte {
+ d.bdRead = false
+ for {
+ if d.CheckBreak() {
+ break
+ }
+ if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText {
+ d.d.errorf("cbor: expect bytes or string major type in indefinite string/bytes; got: %v, byte: %v", major, d.bd)
+ return nil
+ }
+ n := d.decLen()
+ oldLen := len(bs)
+ newLen := oldLen + n
+ if newLen > cap(bs) {
+ bs2 := make([]byte, newLen, 2*cap(bs)+n)
+ copy(bs2, bs)
+ bs = bs2
+ } else {
+ bs = bs[:newLen]
+ }
+ d.r.readb(bs[oldLen:newLen])
+ // bs = append(bs, d.r.readn()...)
+ d.bdRead = false
+ }
+ d.bdRead = false
+ return bs
+}
+
+func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == cborBdNil || d.bd == cborBdUndefined {
+ d.bdRead = false
+ return nil
+ }
+ if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString {
+ if bs == nil {
+ return d.decAppendIndefiniteBytes(nil)
+ }
+ return d.decAppendIndefiniteBytes(bs[:0])
+ }
+ clen := d.decLen()
+ d.bdRead = false
+ if zerocopy {
+ if d.br {
+ return d.r.readx(clen)
+ } else if len(bs) == 0 {
+ bs = d.b[:]
+ }
+ }
+ return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
+}
+
+func (d *cborDecDriver) DecodeString() (s string) {
+ return string(d.DecodeBytes(d.b[:], true, true))
+}
+
+func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ u := d.decUint()
+ d.bdRead = false
+ realxtag = u
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ d.d.decode(&re.Value)
+ } else if xtag != realxtag {
+ d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag)
+ return
+ } else {
+ var v interface{}
+ d.d.decode(&v)
+ ext.UpdateExt(rv, v)
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *cborDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+
+ n := &d.d.n
+ var decodeFurther bool
+
+ switch d.bd {
+ case cborBdNil:
+ n.v = valueTypeNil
+ case cborBdFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case cborBdTrue:
+ n.v = valueTypeBool
+ n.b = true
+ case cborBdFloat16, cborBdFloat32:
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat(true)
+ case cborBdFloat64:
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat(false)
+ case cborBdIndefiniteBytes:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
+ case cborBdIndefiniteString:
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ case cborBdIndefiniteArray:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case cborBdIndefiniteMap:
+ n.v = valueTypeMap
+ decodeFurther = true
+ default:
+ switch {
+ case d.bd >= cborBaseUint && d.bd < cborBaseNegInt:
+ if d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = d.DecodeInt(64)
+ } else {
+ n.v = valueTypeUint
+ n.u = d.DecodeUint(64)
+ }
+ case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
+ n.v = valueTypeInt
+ n.i = d.DecodeInt(64)
+ case d.bd >= cborBaseBytes && d.bd < cborBaseString:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
+ case d.bd >= cborBaseString && d.bd < cborBaseArray:
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ case d.bd >= cborBaseArray && d.bd < cborBaseMap:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case d.bd >= cborBaseMap && d.bd < cborBaseTag:
+ n.v = valueTypeMap
+ decodeFurther = true
+ case d.bd >= cborBaseTag && d.bd < cborBaseSimple:
+ n.v = valueTypeExt
+ n.u = d.decUint()
+ n.l = nil
+ // d.bdRead = false
+ // d.d.decode(&re.Value) // handled by decode itself.
+ // decodeFurther = true
+ default:
+ d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
+ return
+ }
+ }
+
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ return
+}
+
+// -------------------------
+
+// CborHandle is a Handle for the CBOR encoding format,
+// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io .
+//
+// CBOR is comprehensively supported, including support for:
+// - indefinite-length arrays/maps/bytes/strings
+// - (extension) tags in range 0..0xffff (0 .. 65535)
+// - half, single and double-precision floats
+// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers)
+// - nil, true, false, ...
+// - arrays and maps, bytes and text strings
+//
+// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box.
+// Users can implement them as needed (using SetExt), including spec-documented ones:
+// - timestamp, BigNum, BigFloat, Decimals, Encoded Text (e.g. URL, regexp, base64, MIME Message), etc.
+//
+// To encode with indefinite lengths (streaming), users will use
+// (Must)Encode methods of *Encoder, along with writing CborStreamXXX constants.
+//
+// For example, to encode "one-byte" as an indefinite length string:
+// var buf bytes.Buffer
+// e := NewEncoder(&buf, new(CborHandle))
+// buf.WriteByte(CborStreamString)
+// e.MustEncode("one-")
+// e.MustEncode("byte")
+// buf.WriteByte(CborStreamBreak)
+// encodedBytes := buf.Bytes()
+// var vv interface{}
+// NewDecoderBytes(buf.Bytes(), new(CborHandle)).MustDecode(&vv)
+// // Now, vv contains the same string "one-byte"
+//
+type CborHandle struct {
+ binaryEncodingType
+ BasicHandle
+}
+
+func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
+ return h.SetExt(rt, tag, &setExtWrapper{i: ext})
+}
+
+func (h *CborHandle) newEncDriver(e *Encoder) encDriver {
+ return &cborEncDriver{e: e, w: e.w, h: h}
+}
+
+func (h *CborHandle) newDecDriver(d *Decoder) decDriver {
+ return &cborDecDriver{d: d, r: d.r, h: h, br: d.bytes}
+}
+
+func (e *cborEncDriver) reset() {
+ e.w = e.e.w
+}
+
+func (d *cborDecDriver) reset() {
+ d.r = d.d.r
+ d.bd, d.bdRead = 0, false
+}
+
+var _ decDriver = (*cborDecDriver)(nil)
+var _ encDriver = (*cborEncDriver)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/decode.go b/vendor/github.com/ugorji/go/codec/decode.go
new file mode 100644
index 000000000..e30e8e8c1
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/decode.go
@@ -0,0 +1,2066 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "time"
+)
+
+// Some tagging information for error messages.
+const (
+ msgBadDesc = "Unrecognized descriptor byte"
+ msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v"
+)
+
+var (
+ onlyMapOrArrayCanDecodeIntoStructErr = errors.New("only encoded map or array can be decoded into a struct")
+ cannotDecodeIntoNilErr = errors.New("cannot decode into nil")
+)
+
+// decReader abstracts the reading source, allowing implementations that can
+// read from an io.Reader or directly off a byte slice with zero-copying.
+type decReader interface {
+ unreadn1()
+
+ // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR
+ // just return a view of the []byte being decoded from.
+ // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control.
+ readx(n int) []byte
+ readb([]byte)
+ readn1() uint8
+ readn1eof() (v uint8, eof bool)
+ numread() int // number of bytes read
+ track()
+ stopTrack() []byte
+}
+
+type decReaderByteScanner interface {
+ io.Reader
+ io.ByteScanner
+}
+
+type decDriver interface {
+ // this will check if the next token is a break.
+ CheckBreak() bool
+ TryDecodeAsNil() bool
+ // vt is one of: Bytes, String, Nil, Slice or Map. Return unSet if not known.
+ ContainerType() (vt valueType)
+ IsBuiltinType(rt uintptr) bool
+ DecodeBuiltin(rt uintptr, v interface{})
+
+ // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt.
+ // For maps and arrays, it will not do the decoding in-band, but will signal
+ // the decoder, so that is done later, by setting the decNaked.valueType field.
+ //
+ // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types).
+ // for extensions, DecodeNaked must read the tag and the []byte if it exists.
+ // if the []byte is not read, then kInterfaceNaked will treat it as a Handle
+ // that stores the subsequent value in-band, and complete reading the RawExt.
+ //
+ // extensions should also use readx to decode them, for efficiency.
+ // kInterface will extract the detached byte slice if it has to pass it outside its realm.
+ DecodeNaked()
+ DecodeInt(bitsize uint8) (i int64)
+ DecodeUint(bitsize uint8) (ui uint64)
+ DecodeFloat(chkOverflow32 bool) (f float64)
+ DecodeBool() (b bool)
+ // DecodeString can also decode symbols.
+ // It looks redundant as DecodeBytes is available.
+ // However, some codecs (e.g. binc) support symbols and can
+ // return a pre-stored string value, meaning that it can bypass
+ // the cost of []byte->string conversion.
+ DecodeString() (s string)
+
+ // DecodeBytes may be called directly, without going through reflection.
+ // Consequently, it must be designed to handle possible nil.
+ DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte)
+
+ // decodeExt will decode into a *RawExt or into an extension.
+ DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64)
+ // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte)
+ ReadMapStart() int
+ ReadArrayStart() int
+
+ reset()
+ uncacheRead()
+}
+
+type decNoSeparator struct {
+}
+
+func (_ decNoSeparator) ReadEnd() {}
+
+// func (_ decNoSeparator) uncacheRead() {}
+
+type DecodeOptions struct {
+ // MapType specifies type to use during schema-less decoding of a map in the stream.
+ // If nil, we use map[interface{}]interface{}
+ MapType reflect.Type
+
+ // SliceType specifies type to use during schema-less decoding of an array in the stream.
+ // If nil, we use []interface{}
+ SliceType reflect.Type
+
+ // MaxInitLen defines the maxinum initial length that we "make" a collection (string, slice, map, chan).
+ // If 0 or negative, we default to a sensible value based on the size of an element in the collection.
+ //
+ // For example, when decoding, a stream may say that it has 2^64 elements.
+ // We should not auto-matically provision a slice of that length, to prevent Out-Of-Memory crash.
+ // Instead, we provision up to MaxInitLen, fill that up, and start appending after that.
+ MaxInitLen int
+
+ // If ErrorIfNoField, return an error when decoding a map
+ // from a codec stream into a struct, and no matching struct field is found.
+ ErrorIfNoField bool
+
+ // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded.
+ // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array,
+ // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set).
+ ErrorIfNoArrayExpand bool
+
+ // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64).
+ SignedInteger bool
+
+ // MapValueReset controls how we decode into a map value.
+ //
+ // By default, we MAY retrieve the mapping for a key, and then decode into that.
+ // However, especially with big maps, that retrieval may be expensive and unnecessary
+ // if the stream already contains all that is necessary to recreate the value.
+ //
+ // If true, we will never retrieve the previous mapping,
+ // but rather decode into a new value and set that in the map.
+ //
+ // If false, we will retrieve the previous mapping if necessary e.g.
+ // the previous mapping is a pointer, or is a struct or array with pre-set state,
+ // or is an interface.
+ MapValueReset bool
+
+ // InterfaceReset controls how we decode into an interface.
+ //
+ // By default, when we see a field that is an interface{...},
+ // or a map with interface{...} value, we will attempt decoding into the
+ // "contained" value.
+ //
+ // However, this prevents us from reading a string into an interface{}
+ // that formerly contained a number.
+ //
+ // If true, we will decode into a new "blank" value, and set that in the interface.
+ // If false, we will decode into whatever is contained in the interface.
+ InterfaceReset bool
+
+ // InternString controls interning of strings during decoding.
+ //
+ // Some handles, e.g. json, typically will read map keys as strings.
+ // If the set of keys are finite, it may help reduce allocation to
+ // look them up from a map (than to allocate them afresh).
+ //
+ // Note: Handles will be smart when using the intern functionality.
+ // So everything will not be interned.
+ InternString bool
+
+ // PreferArrayOverSlice controls whether to decode to an array or a slice.
+ //
+ // This only impacts decoding into a nil interface{}.
+ // Consequently, it has no effect on codecgen.
+ //
+ // *Note*: This only applies if using go1.5 and above,
+ // as it requires reflect.ArrayOf support which was absent before go1.5.
+ PreferArrayOverSlice bool
+}
+
+// ------------------------------------
+
+// ioDecByteScanner implements Read(), ReadByte(...), UnreadByte(...) methods
+// of io.Reader, io.ByteScanner.
+type ioDecByteScanner struct {
+ r io.Reader
+ l byte // last byte
+ ls byte // last byte status. 0: init-canDoNothing, 1: canRead, 2: canUnread
+ b [1]byte // tiny buffer for reading single bytes
+}
+
+func (z *ioDecByteScanner) Read(p []byte) (n int, err error) {
+ var firstByte bool
+ if z.ls == 1 {
+ z.ls = 2
+ p[0] = z.l
+ if len(p) == 1 {
+ n = 1
+ return
+ }
+ firstByte = true
+ p = p[1:]
+ }
+ n, err = z.r.Read(p)
+ if n > 0 {
+ if err == io.EOF && n == len(p) {
+ err = nil // read was successful, so postpone EOF (till next time)
+ }
+ z.l = p[n-1]
+ z.ls = 2
+ }
+ if firstByte {
+ n++
+ }
+ return
+}
+
+func (z *ioDecByteScanner) ReadByte() (c byte, err error) {
+ n, err := z.Read(z.b[:])
+ if n == 1 {
+ c = z.b[0]
+ if err == io.EOF {
+ err = nil // read was successful, so postpone EOF (till next time)
+ }
+ }
+ return
+}
+
+func (z *ioDecByteScanner) UnreadByte() (err error) {
+ x := z.ls
+ if x == 0 {
+ err = errors.New("cannot unread - nothing has been read")
+ } else if x == 1 {
+ err = errors.New("cannot unread - last byte has not been read")
+ } else if x == 2 {
+ z.ls = 1
+ }
+ return
+}
+
+// ioDecReader is a decReader that reads off an io.Reader
+type ioDecReader struct {
+ br decReaderByteScanner
+ // temp byte array re-used internally for efficiency during read.
+ // shares buffer with Decoder, so we keep size of struct within 8 words.
+ x *[scratchByteArrayLen]byte
+ bs ioDecByteScanner
+ n int // num read
+ tr []byte // tracking bytes read
+ trb bool
+}
+
+func (z *ioDecReader) numread() int {
+ return z.n
+}
+
+func (z *ioDecReader) readx(n int) (bs []byte) {
+ if n <= 0 {
+ return
+ }
+ if n < len(z.x) {
+ bs = z.x[:n]
+ } else {
+ bs = make([]byte, n)
+ }
+ if _, err := io.ReadAtLeast(z.br, bs, n); err != nil {
+ panic(err)
+ }
+ z.n += len(bs)
+ if z.trb {
+ z.tr = append(z.tr, bs...)
+ }
+ return
+}
+
+func (z *ioDecReader) readb(bs []byte) {
+ if len(bs) == 0 {
+ return
+ }
+ n, err := io.ReadAtLeast(z.br, bs, len(bs))
+ z.n += n
+ if err != nil {
+ panic(err)
+ }
+ if z.trb {
+ z.tr = append(z.tr, bs...)
+ }
+}
+
+func (z *ioDecReader) readn1() (b uint8) {
+ b, err := z.br.ReadByte()
+ if err != nil {
+ panic(err)
+ }
+ z.n++
+ if z.trb {
+ z.tr = append(z.tr, b)
+ }
+ return b
+}
+
+func (z *ioDecReader) readn1eof() (b uint8, eof bool) {
+ b, err := z.br.ReadByte()
+ if err == nil {
+ z.n++
+ if z.trb {
+ z.tr = append(z.tr, b)
+ }
+ } else if err == io.EOF {
+ eof = true
+ } else {
+ panic(err)
+ }
+ return
+}
+
+func (z *ioDecReader) unreadn1() {
+ err := z.br.UnreadByte()
+ if err != nil {
+ panic(err)
+ }
+ z.n--
+ if z.trb {
+ if l := len(z.tr) - 1; l >= 0 {
+ z.tr = z.tr[:l]
+ }
+ }
+}
+
+func (z *ioDecReader) track() {
+ if z.tr != nil {
+ z.tr = z.tr[:0]
+ }
+ z.trb = true
+}
+
+func (z *ioDecReader) stopTrack() (bs []byte) {
+ z.trb = false
+ return z.tr
+}
+
+// ------------------------------------
+
+var bytesDecReaderCannotUnreadErr = errors.New("cannot unread last byte read")
+
+// bytesDecReader is a decReader that reads off a byte slice with zero copying
+type bytesDecReader struct {
+ b []byte // data
+ c int // cursor
+ a int // available
+ t int // track start
+}
+
+func (z *bytesDecReader) reset(in []byte) {
+ z.b = in
+ z.a = len(in)
+ z.c = 0
+ z.t = 0
+}
+
+func (z *bytesDecReader) numread() int {
+ return z.c
+}
+
+func (z *bytesDecReader) unreadn1() {
+ if z.c == 0 || len(z.b) == 0 {
+ panic(bytesDecReaderCannotUnreadErr)
+ }
+ z.c--
+ z.a++
+ return
+}
+
+func (z *bytesDecReader) readx(n int) (bs []byte) {
+ // slicing from a non-constant start position is more expensive,
+ // as more computation is required to decipher the pointer start position.
+ // However, we do it only once, and it's better than reslicing both z.b and return value.
+
+ if n <= 0 {
+ } else if z.a == 0 {
+ panic(io.EOF)
+ } else if n > z.a {
+ panic(io.ErrUnexpectedEOF)
+ } else {
+ c0 := z.c
+ z.c = c0 + n
+ z.a = z.a - n
+ bs = z.b[c0:z.c]
+ }
+ return
+}
+
+func (z *bytesDecReader) readn1() (v uint8) {
+ if z.a == 0 {
+ panic(io.EOF)
+ }
+ v = z.b[z.c]
+ z.c++
+ z.a--
+ return
+}
+
+func (z *bytesDecReader) readn1eof() (v uint8, eof bool) {
+ if z.a == 0 {
+ eof = true
+ return
+ }
+ v = z.b[z.c]
+ z.c++
+ z.a--
+ return
+}
+
+func (z *bytesDecReader) readb(bs []byte) {
+ copy(bs, z.readx(len(bs)))
+}
+
+func (z *bytesDecReader) track() {
+ z.t = z.c
+}
+
+func (z *bytesDecReader) stopTrack() (bs []byte) {
+ return z.b[z.t:z.c]
+}
+
+// ------------------------------------
+
+type decFnInfo struct {
+ d *Decoder
+ ti *typeInfo
+ xfFn Ext
+ xfTag uint64
+ seq seqType
+}
+
+// ----------------------------------------
+
+type decFn struct {
+ i decFnInfo
+ f func(*decFnInfo, reflect.Value)
+}
+
+func (f *decFnInfo) builtin(rv reflect.Value) {
+ f.d.d.DecodeBuiltin(f.ti.rtid, rv.Addr().Interface())
+}
+
+func (f *decFnInfo) rawExt(rv reflect.Value) {
+ f.d.d.DecodeExt(rv.Addr().Interface(), 0, nil)
+}
+
+func (f *decFnInfo) raw(rv reflect.Value) {
+ rv.SetBytes(f.d.raw())
+}
+
+func (f *decFnInfo) ext(rv reflect.Value) {
+ f.d.d.DecodeExt(rv.Addr().Interface(), f.xfTag, f.xfFn)
+}
+
+func (f *decFnInfo) getValueForUnmarshalInterface(rv reflect.Value, indir int8) (v interface{}) {
+ if indir == -1 {
+ v = rv.Addr().Interface()
+ } else if indir == 0 {
+ v = rv.Interface()
+ } else {
+ for j := int8(0); j < indir; j++ {
+ if rv.IsNil() {
+ rv.Set(reflect.New(rv.Type().Elem()))
+ }
+ rv = rv.Elem()
+ }
+ v = rv.Interface()
+ }
+ return
+}
+
+func (f *decFnInfo) selferUnmarshal(rv reflect.Value) {
+ f.getValueForUnmarshalInterface(rv, f.ti.csIndir).(Selfer).CodecDecodeSelf(f.d)
+}
+
+func (f *decFnInfo) binaryUnmarshal(rv reflect.Value) {
+ bm := f.getValueForUnmarshalInterface(rv, f.ti.bunmIndir).(encoding.BinaryUnmarshaler)
+ xbs := f.d.d.DecodeBytes(nil, false, true)
+ if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+func (f *decFnInfo) textUnmarshal(rv reflect.Value) {
+ tm := f.getValueForUnmarshalInterface(rv, f.ti.tunmIndir).(encoding.TextUnmarshaler)
+ fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true))
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+func (f *decFnInfo) jsonUnmarshal(rv reflect.Value) {
+ tm := f.getValueForUnmarshalInterface(rv, f.ti.junmIndir).(jsonUnmarshaler)
+ // bs := f.d.d.DecodeBytes(f.d.b[:], true, true)
+ // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
+ fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+func (f *decFnInfo) kErr(rv reflect.Value) {
+ f.d.errorf("no decoding function defined for kind %v", rv.Kind())
+}
+
+func (f *decFnInfo) kString(rv reflect.Value) {
+ rv.SetString(f.d.d.DecodeString())
+}
+
+func (f *decFnInfo) kBool(rv reflect.Value) {
+ rv.SetBool(f.d.d.DecodeBool())
+}
+
+func (f *decFnInfo) kInt(rv reflect.Value) {
+ rv.SetInt(f.d.d.DecodeInt(intBitsize))
+}
+
+func (f *decFnInfo) kInt64(rv reflect.Value) {
+ rv.SetInt(f.d.d.DecodeInt(64))
+}
+
+func (f *decFnInfo) kInt32(rv reflect.Value) {
+ rv.SetInt(f.d.d.DecodeInt(32))
+}
+
+func (f *decFnInfo) kInt8(rv reflect.Value) {
+ rv.SetInt(f.d.d.DecodeInt(8))
+}
+
+func (f *decFnInfo) kInt16(rv reflect.Value) {
+ rv.SetInt(f.d.d.DecodeInt(16))
+}
+
+func (f *decFnInfo) kFloat32(rv reflect.Value) {
+ rv.SetFloat(f.d.d.DecodeFloat(true))
+}
+
+func (f *decFnInfo) kFloat64(rv reflect.Value) {
+ rv.SetFloat(f.d.d.DecodeFloat(false))
+}
+
+func (f *decFnInfo) kUint8(rv reflect.Value) {
+ rv.SetUint(f.d.d.DecodeUint(8))
+}
+
+func (f *decFnInfo) kUint64(rv reflect.Value) {
+ rv.SetUint(f.d.d.DecodeUint(64))
+}
+
+func (f *decFnInfo) kUint(rv reflect.Value) {
+ rv.SetUint(f.d.d.DecodeUint(uintBitsize))
+}
+
+func (f *decFnInfo) kUintptr(rv reflect.Value) {
+ rv.SetUint(f.d.d.DecodeUint(uintBitsize))
+}
+
+func (f *decFnInfo) kUint32(rv reflect.Value) {
+ rv.SetUint(f.d.d.DecodeUint(32))
+}
+
+func (f *decFnInfo) kUint16(rv reflect.Value) {
+ rv.SetUint(f.d.d.DecodeUint(16))
+}
+
+// func (f *decFnInfo) kPtr(rv reflect.Value) {
+// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called")
+// if rv.IsNil() {
+// rv.Set(reflect.New(rv.Type().Elem()))
+// }
+// f.d.decodeValue(rv.Elem())
+// }
+
+// var kIntfCtr uint64
+
+func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) {
+ // nil interface:
+ // use some hieristics to decode it appropriately
+ // based on the detected next value in the stream.
+ d := f.d
+ d.d.DecodeNaked()
+ n := &d.n
+ if n.v == valueTypeNil {
+ return
+ }
+ // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader).
+ // if num := f.ti.rt.NumMethod(); num > 0 {
+ if f.ti.numMeth > 0 {
+ d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth)
+ return
+ }
+ // var useRvn bool
+ switch n.v {
+ case valueTypeMap:
+ // if d.h.MapType == nil || d.h.MapType == mapIntfIntfTyp {
+ // } else if d.h.MapType == mapStrIntfTyp { // for json performance
+ // }
+ if d.mtid == 0 || d.mtid == mapIntfIntfTypId {
+ l := len(n.ms)
+ n.ms = append(n.ms, nil)
+ var v2 interface{} = &n.ms[l]
+ d.decode(v2)
+ rvn = reflect.ValueOf(v2).Elem()
+ n.ms = n.ms[:l]
+ } else if d.mtid == mapStrIntfTypId { // for json performance
+ l := len(n.ns)
+ n.ns = append(n.ns, nil)
+ var v2 interface{} = &n.ns[l]
+ d.decode(v2)
+ rvn = reflect.ValueOf(v2).Elem()
+ n.ns = n.ns[:l]
+ } else {
+ rvn = reflect.New(d.h.MapType).Elem()
+ d.decodeValue(rvn, nil)
+ }
+ case valueTypeArray:
+ // if d.h.SliceType == nil || d.h.SliceType == intfSliceTyp {
+ if d.stid == 0 || d.stid == intfSliceTypId {
+ l := len(n.ss)
+ n.ss = append(n.ss, nil)
+ var v2 interface{} = &n.ss[l]
+ d.decode(v2)
+ n.ss = n.ss[:l]
+ rvn = reflect.ValueOf(v2).Elem()
+ if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice {
+ rvn = reflectArrayOf(rvn)
+ }
+ } else {
+ rvn = reflect.New(d.h.SliceType).Elem()
+ d.decodeValue(rvn, nil)
+ }
+ case valueTypeExt:
+ var v interface{}
+ tag, bytes := n.u, n.l // calling decode below might taint the values
+ if bytes == nil {
+ l := len(n.is)
+ n.is = append(n.is, nil)
+ v2 := &n.is[l]
+ d.decode(v2)
+ v = *v2
+ n.is = n.is[:l]
+ }
+ bfn := d.h.getExtForTag(tag)
+ if bfn == nil {
+ var re RawExt
+ re.Tag = tag
+ re.Data = detachZeroCopyBytes(d.bytes, nil, bytes)
+ rvn = reflect.ValueOf(re)
+ } else {
+ rvnA := reflect.New(bfn.rt)
+ rvn = rvnA.Elem()
+ if bytes != nil {
+ bfn.ext.ReadExt(rvnA.Interface(), bytes)
+ } else {
+ bfn.ext.UpdateExt(rvnA.Interface(), v)
+ }
+ }
+ case valueTypeNil:
+ // no-op
+ case valueTypeInt:
+ rvn = reflect.ValueOf(&n.i).Elem()
+ case valueTypeUint:
+ rvn = reflect.ValueOf(&n.u).Elem()
+ case valueTypeFloat:
+ rvn = reflect.ValueOf(&n.f).Elem()
+ case valueTypeBool:
+ rvn = reflect.ValueOf(&n.b).Elem()
+ case valueTypeString, valueTypeSymbol:
+ rvn = reflect.ValueOf(&n.s).Elem()
+ case valueTypeBytes:
+ rvn = reflect.ValueOf(&n.l).Elem()
+ case valueTypeTimestamp:
+ rvn = reflect.ValueOf(&n.t).Elem()
+ default:
+ panic(fmt.Errorf("kInterfaceNaked: unexpected valueType: %d", n.v))
+ }
+ return
+}
+
+func (f *decFnInfo) kInterface(rv reflect.Value) {
+ // debugf("\t===> kInterface")
+
+ // Note:
+ // A consequence of how kInterface works, is that
+ // if an interface already contains something, we try
+ // to decode into what was there before.
+ // We do not replace with a generic value (as got from decodeNaked).
+
+ var rvn reflect.Value
+ if rv.IsNil() {
+ rvn = f.kInterfaceNaked()
+ if rvn.IsValid() {
+ rv.Set(rvn)
+ }
+ } else if f.d.h.InterfaceReset {
+ rvn = f.kInterfaceNaked()
+ if rvn.IsValid() {
+ rv.Set(rvn)
+ } else {
+ // reset to zero value based on current type in there.
+ rv.Set(reflect.Zero(rv.Elem().Type()))
+ }
+ } else {
+ rvn = rv.Elem()
+ // Note: interface{} is settable, but underlying type may not be.
+ // Consequently, we have to set the reflect.Value directly.
+ // if underlying type is settable (e.g. ptr or interface),
+ // we just decode into it.
+ // Else we create a settable value, decode into it, and set on the interface.
+ if rvn.CanSet() {
+ f.d.decodeValue(rvn, nil)
+ } else {
+ rvn2 := reflect.New(rvn.Type()).Elem()
+ rvn2.Set(rvn)
+ f.d.decodeValue(rvn2, nil)
+ rv.Set(rvn2)
+ }
+ }
+}
+
+func (f *decFnInfo) kStruct(rv reflect.Value) {
+ fti := f.ti
+ d := f.d
+ dd := d.d
+ cr := d.cr
+ ctyp := dd.ContainerType()
+ if ctyp == valueTypeMap {
+ containerLen := dd.ReadMapStart()
+ if containerLen == 0 {
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return
+ }
+ tisfi := fti.sfi
+ hasLen := containerLen >= 0
+ if hasLen {
+ for j := 0; j < containerLen; j++ {
+ // rvkencname := dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true))
+ // rvksi := ti.getForEncName(rvkencname)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if k := fti.indexForEncName(rvkencname); k > -1 {
+ si := tisfi[k]
+ if dd.TryDecodeAsNil() {
+ si.setToZeroValue(rv)
+ } else {
+ d.decodeValue(si.field(rv, true), nil)
+ }
+ } else {
+ d.structFieldNotFound(-1, rvkencname)
+ }
+ }
+ } else {
+ for j := 0; !dd.CheckBreak(); j++ {
+ // rvkencname := dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true))
+ // rvksi := ti.getForEncName(rvkencname)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if k := fti.indexForEncName(rvkencname); k > -1 {
+ si := tisfi[k]
+ if dd.TryDecodeAsNil() {
+ si.setToZeroValue(rv)
+ } else {
+ d.decodeValue(si.field(rv, true), nil)
+ }
+ } else {
+ d.structFieldNotFound(-1, rvkencname)
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ } else if ctyp == valueTypeArray {
+ containerLen := dd.ReadArrayStart()
+ if containerLen == 0 {
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+ return
+ }
+ // Not much gain from doing it two ways for array.
+ // Arrays are not used as much for structs.
+ hasLen := containerLen >= 0
+ for j, si := range fti.sfip {
+ if hasLen {
+ if j == containerLen {
+ break
+ }
+ } else if dd.CheckBreak() {
+ break
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ if dd.TryDecodeAsNil() {
+ si.setToZeroValue(rv)
+ } else {
+ d.decodeValue(si.field(rv, true), nil)
+ }
+ }
+ if containerLen > len(fti.sfip) {
+ // read remaining values and throw away
+ for j := len(fti.sfip); j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ d.structFieldNotFound(j, "")
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+ } else {
+ f.d.error(onlyMapOrArrayCanDecodeIntoStructErr)
+ return
+ }
+}
+
+func (f *decFnInfo) kSlice(rv reflect.Value) {
+ // A slice can be set from a map or array in stream.
+ // This way, the order can be kept (as order is lost with map).
+ ti := f.ti
+ d := f.d
+ dd := d.d
+ rtelem0 := ti.rt.Elem()
+ ctyp := dd.ContainerType()
+ if ctyp == valueTypeBytes || ctyp == valueTypeString {
+ // you can only decode bytes or string in the stream into a slice or array of bytes
+ if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) {
+ f.d.errorf("bytes or string in the stream must be decoded into a slice or array of bytes, not %v", ti.rt)
+ }
+ if f.seq == seqTypeChan {
+ bs2 := dd.DecodeBytes(nil, false, true)
+ ch := rv.Interface().(chan<- byte)
+ for _, b := range bs2 {
+ ch <- b
+ }
+ } else {
+ rvbs := rv.Bytes()
+ bs2 := dd.DecodeBytes(rvbs, false, false)
+ if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) {
+ if rv.CanSet() {
+ rv.SetBytes(bs2)
+ } else {
+ copy(rvbs, bs2)
+ }
+ }
+ }
+ return
+ }
+
+ // array := f.seq == seqTypeChan
+
+ slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map)
+
+ // // an array can never return a nil slice. so no need to check f.array here.
+ if containerLenS == 0 {
+ if f.seq == seqTypeSlice {
+ if rv.IsNil() {
+ rv.Set(reflect.MakeSlice(ti.rt, 0, 0))
+ } else {
+ rv.SetLen(0)
+ }
+ } else if f.seq == seqTypeChan {
+ if rv.IsNil() {
+ rv.Set(reflect.MakeChan(ti.rt, 0))
+ }
+ }
+ slh.End()
+ return
+ }
+
+ rtelem := rtelem0
+ for rtelem.Kind() == reflect.Ptr {
+ rtelem = rtelem.Elem()
+ }
+ fn := d.getDecFn(rtelem, true, true)
+
+ var rv0, rv9 reflect.Value
+ rv0 = rv
+ rvChanged := false
+
+ // for j := 0; j < containerLenS; j++ {
+ var rvlen int
+ if containerLenS > 0 { // hasLen
+ if f.seq == seqTypeChan {
+ if rv.IsNil() {
+ rvlen, _ = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size()))
+ rv.Set(reflect.MakeChan(ti.rt, rvlen))
+ }
+ // handle chan specially:
+ for j := 0; j < containerLenS; j++ {
+ rv9 = reflect.New(rtelem0).Elem()
+ slh.ElemContainerState(j)
+ d.decodeValue(rv9, fn)
+ rv.Send(rv9)
+ }
+ } else { // slice or array
+ var truncated bool // says len of sequence is not same as expected number of elements
+ numToRead := containerLenS // if truncated, reset numToRead
+
+ rvcap := rv.Cap()
+ rvlen = rv.Len()
+ if containerLenS > rvcap {
+ if f.seq == seqTypeArray {
+ d.arrayCannotExpand(rvlen, containerLenS)
+ } else {
+ oldRvlenGtZero := rvlen > 0
+ rvlen, truncated = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size()))
+ if truncated {
+ if rvlen <= rvcap {
+ rv.SetLen(rvlen)
+ } else {
+ rv = reflect.MakeSlice(ti.rt, rvlen, rvlen)
+ rvChanged = true
+ }
+ } else {
+ rv = reflect.MakeSlice(ti.rt, rvlen, rvlen)
+ rvChanged = true
+ }
+ if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) {
+ reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap)
+ }
+ rvcap = rvlen
+ }
+ numToRead = rvlen
+ } else if containerLenS != rvlen {
+ if f.seq == seqTypeSlice {
+ rv.SetLen(containerLenS)
+ rvlen = containerLenS
+ }
+ }
+ j := 0
+ // we read up to the numToRead
+ for ; j < numToRead; j++ {
+ slh.ElemContainerState(j)
+ d.decodeValue(rv.Index(j), fn)
+ }
+
+ // if slice, expand and read up to containerLenS (or EOF) iff truncated
+ // if array, swallow all the rest.
+
+ if f.seq == seqTypeArray {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ } else if truncated { // slice was truncated, as chan NOT in this block
+ for ; j < containerLenS; j++ {
+ rv = expandSliceValue(rv, 1)
+ rv9 = rv.Index(j)
+ if resetSliceElemToZeroValue {
+ rv9.Set(reflect.Zero(rtelem0))
+ }
+ slh.ElemContainerState(j)
+ d.decodeValue(rv9, fn)
+ }
+ }
+ }
+ } else {
+ rvlen = rv.Len()
+ j := 0
+ for ; !dd.CheckBreak(); j++ {
+ if f.seq == seqTypeChan {
+ slh.ElemContainerState(j)
+ rv9 = reflect.New(rtelem0).Elem()
+ d.decodeValue(rv9, fn)
+ rv.Send(rv9)
+ } else {
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= rvlen {
+ if f.seq == seqTypeArray {
+ d.arrayCannotExpand(rvlen, j+1)
+ decodeIntoBlank = true
+ } else { // if f.seq == seqTypeSlice
+ // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // uses append logic, plus varargs
+ rv = expandSliceValue(rv, 1)
+ rv9 = rv.Index(j)
+ // rv.Index(rv.Len() - 1).Set(reflect.Zero(rtelem0))
+ if resetSliceElemToZeroValue {
+ rv9.Set(reflect.Zero(rtelem0))
+ }
+ rvlen++
+ rvChanged = true
+ }
+ } else { // slice or array
+ rv9 = rv.Index(j)
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else { // seqTypeSlice
+ d.decodeValue(rv9, fn)
+ }
+ }
+ }
+ if f.seq == seqTypeSlice {
+ if j < rvlen {
+ rv.SetLen(j)
+ } else if j == 0 && rv.IsNil() {
+ rv = reflect.MakeSlice(ti.rt, 0, 0)
+ rvChanged = true
+ }
+ }
+ }
+ slh.End()
+
+ if rvChanged {
+ rv0.Set(rv)
+ }
+}
+
+func (f *decFnInfo) kArray(rv reflect.Value) {
+ // f.d.decodeValue(rv.Slice(0, rv.Len()))
+ f.kSlice(rv.Slice(0, rv.Len()))
+}
+
+func (f *decFnInfo) kMap(rv reflect.Value) {
+ d := f.d
+ dd := d.d
+ containerLen := dd.ReadMapStart()
+ cr := d.cr
+ ti := f.ti
+ if rv.IsNil() {
+ rv.Set(reflect.MakeMap(ti.rt))
+ }
+
+ if containerLen == 0 {
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return
+ }
+
+ ktype, vtype := ti.rt.Key(), ti.rt.Elem()
+ ktypeId := reflect.ValueOf(ktype).Pointer()
+ vtypeKind := vtype.Kind()
+ var keyFn, valFn *decFn
+ var xtyp reflect.Type
+ for xtyp = ktype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() {
+ }
+ keyFn = d.getDecFn(xtyp, true, true)
+ for xtyp = vtype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() {
+ }
+ valFn = d.getDecFn(xtyp, true, true)
+ var mapGet, mapSet bool
+ if !f.d.h.MapValueReset {
+ // if pointer, mapGet = true
+ // if interface, mapGet = true if !DecodeNakedAlways (else false)
+ // if builtin, mapGet = false
+ // else mapGet = true
+ if vtypeKind == reflect.Ptr {
+ mapGet = true
+ } else if vtypeKind == reflect.Interface {
+ if !f.d.h.InterfaceReset {
+ mapGet = true
+ }
+ } else if !isImmutableKind(vtypeKind) {
+ mapGet = true
+ }
+ }
+
+ var rvk, rvv, rvz reflect.Value
+
+ // for j := 0; j < containerLen; j++ {
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ rvk = reflect.New(ktype).Elem()
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ d.decodeValue(rvk, keyFn)
+
+ // special case if a byte array.
+ if ktypeId == intfTypId {
+ rvk = rvk.Elem()
+ if rvk.Type() == uint8SliceTyp {
+ rvk = reflect.ValueOf(d.string(rvk.Bytes()))
+ }
+ }
+ mapSet = true // set to false if u do a get, and its a pointer, and exists
+ if mapGet {
+ rvv = rv.MapIndex(rvk)
+ if rvv.IsValid() {
+ if vtypeKind == reflect.Ptr {
+ mapSet = false
+ }
+ } else {
+ if rvz.IsValid() {
+ rvz.Set(reflect.Zero(vtype))
+ } else {
+ rvz = reflect.New(vtype).Elem()
+ }
+ rvv = rvz
+ }
+ } else {
+ if rvz.IsValid() {
+ rvz.Set(reflect.Zero(vtype))
+ } else {
+ rvz = reflect.New(vtype).Elem()
+ }
+ rvv = rvz
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ d.decodeValue(rvv, valFn)
+ if mapSet {
+ rv.SetMapIndex(rvk, rvv)
+ }
+ }
+ } else {
+ for j := 0; !dd.CheckBreak(); j++ {
+ rvk = reflect.New(ktype).Elem()
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ d.decodeValue(rvk, keyFn)
+
+ // special case if a byte array.
+ if ktypeId == intfTypId {
+ rvk = rvk.Elem()
+ if rvk.Type() == uint8SliceTyp {
+ rvk = reflect.ValueOf(d.string(rvk.Bytes()))
+ }
+ }
+ mapSet = true // set to false if u do a get, and its a pointer, and exists
+ if mapGet {
+ rvv = rv.MapIndex(rvk)
+ if rvv.IsValid() {
+ if vtypeKind == reflect.Ptr {
+ mapSet = false
+ }
+ } else {
+ if rvz.IsValid() {
+ rvz.Set(reflect.Zero(vtype))
+ } else {
+ rvz = reflect.New(vtype).Elem()
+ }
+ rvv = rvz
+ }
+ } else {
+ if rvz.IsValid() {
+ rvz.Set(reflect.Zero(vtype))
+ } else {
+ rvz = reflect.New(vtype).Elem()
+ }
+ rvv = rvz
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ d.decodeValue(rvv, valFn)
+ if mapSet {
+ rv.SetMapIndex(rvk, rvv)
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+type decRtidFn struct {
+ rtid uintptr
+ fn decFn
+}
+
+// decNaked is used to keep track of the primitives decoded.
+// Without it, we would have to decode each primitive and wrap it
+// in an interface{}, causing an allocation.
+// In this model, the primitives are decoded in a "pseudo-atomic" fashion,
+// so we can rest assured that no other decoding happens while these
+// primitives are being decoded.
+//
+// maps and arrays are not handled by this mechanism.
+// However, RawExt is, and we accommodate for extensions that decode
+// RawExt from DecodeNaked, but need to decode the value subsequently.
+// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat.
+//
+// However, decNaked also keeps some arrays of default maps and slices
+// used in DecodeNaked. This way, we can get a pointer to it
+// without causing a new heap allocation.
+//
+// kInterfaceNaked will ensure that there is no allocation for the common
+// uses.
+type decNaked struct {
+ // r RawExt // used for RawExt, uint, []byte.
+ u uint64
+ i int64
+ f float64
+ l []byte
+ s string
+ t time.Time
+ b bool
+ v valueType
+
+ // stacks for reducing allocation
+ is []interface{}
+ ms []map[interface{}]interface{}
+ ns []map[string]interface{}
+ ss [][]interface{}
+ // rs []RawExt
+
+ // keep arrays at the bottom? Chance is that they are not used much.
+ ia [4]interface{}
+ ma [4]map[interface{}]interface{}
+ na [4]map[string]interface{}
+ sa [4][]interface{}
+ // ra [2]RawExt
+}
+
+func (n *decNaked) reset() {
+ if n.ss != nil {
+ n.ss = n.ss[:0]
+ }
+ if n.is != nil {
+ n.is = n.is[:0]
+ }
+ if n.ms != nil {
+ n.ms = n.ms[:0]
+ }
+ if n.ns != nil {
+ n.ns = n.ns[:0]
+ }
+}
+
+// A Decoder reads and decodes an object from an input stream in the codec format.
+type Decoder struct {
+ // hopefully, reduce derefencing cost by laying the decReader inside the Decoder.
+ // Try to put things that go together to fit within a cache line (8 words).
+
+ d decDriver
+ // NOTE: Decoder shouldn't call it's read methods,
+ // as the handler MAY need to do some coordination.
+ r decReader
+ // sa [initCollectionCap]decRtidFn
+ h *BasicHandle
+ hh Handle
+
+ be bool // is binary encoding
+ bytes bool // is bytes reader
+ js bool // is json handle
+
+ rb bytesDecReader
+ ri ioDecReader
+ cr containerStateRecv
+
+ s []decRtidFn
+ f map[uintptr]*decFn
+
+ // _ uintptr // for alignment purposes, so next one starts from a cache line
+
+ // cache the mapTypeId and sliceTypeId for faster comparisons
+ mtid uintptr
+ stid uintptr
+
+ n decNaked
+ b [scratchByteArrayLen]byte
+ is map[string]string // used for interning strings
+}
+
+// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader.
+//
+// For efficiency, Users are encouraged to pass in a memory buffered reader
+// (eg bufio.Reader, bytes.Buffer).
+func NewDecoder(r io.Reader, h Handle) *Decoder {
+ d := newDecoder(h)
+ d.Reset(r)
+ return d
+}
+
+// NewDecoderBytes returns a Decoder which efficiently decodes directly
+// from a byte slice with zero copying.
+func NewDecoderBytes(in []byte, h Handle) *Decoder {
+ d := newDecoder(h)
+ d.ResetBytes(in)
+ return d
+}
+
+func newDecoder(h Handle) *Decoder {
+ d := &Decoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()}
+ n := &d.n
+ // n.rs = n.ra[:0]
+ n.ms = n.ma[:0]
+ n.is = n.ia[:0]
+ n.ns = n.na[:0]
+ n.ss = n.sa[:0]
+ _, d.js = h.(*JsonHandle)
+ if d.h.InternString {
+ d.is = make(map[string]string, 32)
+ }
+ d.d = h.newDecDriver(d)
+ d.cr, _ = d.d.(containerStateRecv)
+ // d.d = h.newDecDriver(decReaderT{true, &d.rb, &d.ri})
+ return d
+}
+
+func (d *Decoder) resetCommon() {
+ d.n.reset()
+ d.d.reset()
+ // reset all things which were cached from the Handle,
+ // but could be changed.
+ d.mtid, d.stid = 0, 0
+ if d.h.MapType != nil {
+ d.mtid = reflect.ValueOf(d.h.MapType).Pointer()
+ }
+ if d.h.SliceType != nil {
+ d.stid = reflect.ValueOf(d.h.SliceType).Pointer()
+ }
+}
+
+func (d *Decoder) Reset(r io.Reader) {
+ d.ri.x = &d.b
+ // d.s = d.sa[:0]
+ d.ri.bs.r = r
+ var ok bool
+ d.ri.br, ok = r.(decReaderByteScanner)
+ if !ok {
+ d.ri.br = &d.ri.bs
+ }
+ d.r = &d.ri
+ d.resetCommon()
+}
+
+func (d *Decoder) ResetBytes(in []byte) {
+ // d.s = d.sa[:0]
+ d.rb.reset(in)
+ d.r = &d.rb
+ d.resetCommon()
+}
+
+// func (d *Decoder) sendContainerState(c containerState) {
+// if d.cr != nil {
+// d.cr.sendContainerState(c)
+// }
+// }
+
+// Decode decodes the stream from reader and stores the result in the
+// value pointed to by v. v cannot be a nil pointer. v can also be
+// a reflect.Value of a pointer.
+//
+// Note that a pointer to a nil interface is not a nil pointer.
+// If you do not know what type of stream it is, pass in a pointer to a nil interface.
+// We will decode and store a value in that nil interface.
+//
+// Sample usages:
+// // Decoding into a non-nil typed value
+// var f float32
+// err = codec.NewDecoder(r, handle).Decode(&f)
+//
+// // Decoding into nil interface
+// var v interface{}
+// dec := codec.NewDecoder(r, handle)
+// err = dec.Decode(&v)
+//
+// When decoding into a nil interface{}, we will decode into an appropriate value based
+// on the contents of the stream:
+// - Numbers are decoded as float64, int64 or uint64.
+// - Other values are decoded appropriately depending on the type:
+// bool, string, []byte, time.Time, etc
+// - Extensions are decoded as RawExt (if no ext function registered for the tag)
+// Configurations exist on the Handle to override defaults
+// (e.g. for MapType, SliceType and how to decode raw bytes).
+//
+// When decoding into a non-nil interface{} value, the mode of encoding is based on the
+// type of the value. When a value is seen:
+// - If an extension is registered for it, call that extension function
+// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error
+// - Else decode it based on its reflect.Kind
+//
+// There are some special rules when decoding into containers (slice/array/map/struct).
+// Decode will typically use the stream contents to UPDATE the container.
+// - A map can be decoded from a stream map, by updating matching keys.
+// - A slice can be decoded from a stream array,
+// by updating the first n elements, where n is length of the stream.
+// - A slice can be decoded from a stream map, by decoding as if
+// it contains a sequence of key-value pairs.
+// - A struct can be decoded from a stream map, by updating matching fields.
+// - A struct can be decoded from a stream array,
+// by updating fields as they occur in the struct (by index).
+//
+// When decoding a stream map or array with length of 0 into a nil map or slice,
+// we reset the destination map or slice to a zero-length value.
+//
+// However, when decoding a stream nil, we reset the destination container
+// to its "zero" value (e.g. nil for slice/map, etc).
+//
+func (d *Decoder) Decode(v interface{}) (err error) {
+ defer panicToErr(&err)
+ d.decode(v)
+ return
+}
+
+// this is not a smart swallow, as it allocates objects and does unnecessary work.
+func (d *Decoder) swallowViaHammer() {
+ var blank interface{}
+ d.decodeValue(reflect.ValueOf(&blank).Elem(), nil)
+}
+
+func (d *Decoder) swallow() {
+ // smarter decode that just swallows the content
+ dd := d.d
+ if dd.TryDecodeAsNil() {
+ return
+ }
+ cr := d.cr
+ switch dd.ContainerType() {
+ case valueTypeMap:
+ containerLen := dd.ReadMapStart()
+ clenGtEqualZero := containerLen >= 0
+ for j := 0; ; j++ {
+ if clenGtEqualZero {
+ if j >= containerLen {
+ break
+ }
+ } else if dd.CheckBreak() {
+ break
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ d.swallow()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ d.swallow()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ case valueTypeArray:
+ containerLenS := dd.ReadArrayStart()
+ clenGtEqualZero := containerLenS >= 0
+ for j := 0; ; j++ {
+ if clenGtEqualZero {
+ if j >= containerLenS {
+ break
+ }
+ } else if dd.CheckBreak() {
+ break
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ d.swallow()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+ case valueTypeBytes:
+ dd.DecodeBytes(d.b[:], false, true)
+ case valueTypeString:
+ dd.DecodeBytes(d.b[:], true, true)
+ // dd.DecodeStringAsBytes(d.b[:])
+ default:
+ // these are all primitives, which we can get from decodeNaked
+ // if RawExt using Value, complete the processing.
+ dd.DecodeNaked()
+ if n := &d.n; n.v == valueTypeExt && n.l == nil {
+ l := len(n.is)
+ n.is = append(n.is, nil)
+ v2 := &n.is[l]
+ d.decode(v2)
+ n.is = n.is[:l]
+ }
+ }
+}
+
+// MustDecode is like Decode, but panics if unable to Decode.
+// This provides insight to the code location that triggered the error.
+func (d *Decoder) MustDecode(v interface{}) {
+ d.decode(v)
+}
+
+func (d *Decoder) decode(iv interface{}) {
+ // if ics, ok := iv.(Selfer); ok {
+ // ics.CodecDecodeSelf(d)
+ // return
+ // }
+
+ if d.d.TryDecodeAsNil() {
+ switch v := iv.(type) {
+ case nil:
+ case *string:
+ *v = ""
+ case *bool:
+ *v = false
+ case *int:
+ *v = 0
+ case *int8:
+ *v = 0
+ case *int16:
+ *v = 0
+ case *int32:
+ *v = 0
+ case *int64:
+ *v = 0
+ case *uint:
+ *v = 0
+ case *uint8:
+ *v = 0
+ case *uint16:
+ *v = 0
+ case *uint32:
+ *v = 0
+ case *uint64:
+ *v = 0
+ case *float32:
+ *v = 0
+ case *float64:
+ *v = 0
+ case *[]uint8:
+ *v = nil
+ case *Raw:
+ *v = nil
+ case reflect.Value:
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ d.errNotValidPtrValue(v)
+ }
+ // d.chkPtrValue(v)
+ v = v.Elem()
+ if v.IsValid() {
+ v.Set(reflect.Zero(v.Type()))
+ }
+ default:
+ rv := reflect.ValueOf(iv)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ d.errNotValidPtrValue(rv)
+ }
+ // d.chkPtrValue(rv)
+ rv = rv.Elem()
+ if rv.IsValid() {
+ rv.Set(reflect.Zero(rv.Type()))
+ }
+ }
+ return
+ }
+
+ switch v := iv.(type) {
+ case nil:
+ d.error(cannotDecodeIntoNilErr)
+ return
+
+ case Selfer:
+ v.CodecDecodeSelf(d)
+
+ case reflect.Value:
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ d.errNotValidPtrValue(v)
+ }
+ // d.chkPtrValue(v)
+ d.decodeValueNotNil(v.Elem(), nil)
+
+ case *string:
+ *v = d.d.DecodeString()
+ case *bool:
+ *v = d.d.DecodeBool()
+ case *int:
+ *v = int(d.d.DecodeInt(intBitsize))
+ case *int8:
+ *v = int8(d.d.DecodeInt(8))
+ case *int16:
+ *v = int16(d.d.DecodeInt(16))
+ case *int32:
+ *v = int32(d.d.DecodeInt(32))
+ case *int64:
+ *v = d.d.DecodeInt(64)
+ case *uint:
+ *v = uint(d.d.DecodeUint(uintBitsize))
+ case *uint8:
+ *v = uint8(d.d.DecodeUint(8))
+ case *uint16:
+ *v = uint16(d.d.DecodeUint(16))
+ case *uint32:
+ *v = uint32(d.d.DecodeUint(32))
+ case *uint64:
+ *v = d.d.DecodeUint(64)
+ case *float32:
+ *v = float32(d.d.DecodeFloat(true))
+ case *float64:
+ *v = d.d.DecodeFloat(false)
+ case *[]uint8:
+ *v = d.d.DecodeBytes(*v, false, false)
+
+ case *Raw:
+ *v = d.raw()
+
+ case *interface{}:
+ d.decodeValueNotNil(reflect.ValueOf(iv).Elem(), nil)
+
+ default:
+ if !fastpathDecodeTypeSwitch(iv, d) {
+ d.decodeI(iv, true, false, false, false)
+ }
+ }
+}
+
+func (d *Decoder) preDecodeValue(rv reflect.Value, tryNil bool) (rv2 reflect.Value, proceed bool) {
+ if tryNil && d.d.TryDecodeAsNil() {
+ // No need to check if a ptr, recursively, to determine
+ // whether to set value to nil.
+ // Just always set value to its zero type.
+ if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid
+ rv.Set(reflect.Zero(rv.Type()))
+ }
+ return
+ }
+
+ // If stream is not containing a nil value, then we can deref to the base
+ // non-pointer value, and decode into that.
+ for rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ rv.Set(reflect.New(rv.Type().Elem()))
+ }
+ rv = rv.Elem()
+ }
+ return rv, true
+}
+
+func (d *Decoder) decodeI(iv interface{}, checkPtr, tryNil, checkFastpath, checkCodecSelfer bool) {
+ rv := reflect.ValueOf(iv)
+ if checkPtr {
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ d.errNotValidPtrValue(rv)
+ }
+ // d.chkPtrValue(rv)
+ }
+ rv, proceed := d.preDecodeValue(rv, tryNil)
+ if proceed {
+ fn := d.getDecFn(rv.Type(), checkFastpath, checkCodecSelfer)
+ fn.f(&fn.i, rv)
+ }
+}
+
+func (d *Decoder) decodeValue(rv reflect.Value, fn *decFn) {
+ if rv, proceed := d.preDecodeValue(rv, true); proceed {
+ if fn == nil {
+ fn = d.getDecFn(rv.Type(), true, true)
+ }
+ fn.f(&fn.i, rv)
+ }
+}
+
+func (d *Decoder) decodeValueNotNil(rv reflect.Value, fn *decFn) {
+ if rv, proceed := d.preDecodeValue(rv, false); proceed {
+ if fn == nil {
+ fn = d.getDecFn(rv.Type(), true, true)
+ }
+ fn.f(&fn.i, rv)
+ }
+}
+
+func (d *Decoder) getDecFn(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *decFn) {
+ rtid := reflect.ValueOf(rt).Pointer()
+
+ // retrieve or register a focus'ed function for this type
+ // to eliminate need to do the retrieval multiple times
+
+ // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) }
+ var ok bool
+ if useMapForCodecCache {
+ fn, ok = d.f[rtid]
+ } else {
+ for i := range d.s {
+ v := &(d.s[i])
+ if v.rtid == rtid {
+ fn, ok = &(v.fn), true
+ break
+ }
+ }
+ }
+ if ok {
+ return
+ }
+
+ if useMapForCodecCache {
+ if d.f == nil {
+ d.f = make(map[uintptr]*decFn, initCollectionCap)
+ }
+ fn = new(decFn)
+ d.f[rtid] = fn
+ } else {
+ if d.s == nil {
+ d.s = make([]decRtidFn, 0, initCollectionCap)
+ }
+ d.s = append(d.s, decRtidFn{rtid: rtid})
+ fn = &(d.s[len(d.s)-1]).fn
+ }
+
+ // debugf("\tCreating new dec fn for type: %v\n", rt)
+ ti := d.h.getTypeInfo(rtid, rt)
+ fi := &(fn.i)
+ fi.d = d
+ fi.ti = ti
+
+ // An extension can be registered for any type, regardless of the Kind
+ // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc.
+ //
+ // We can't check if it's an extension byte here first, because the user may have
+ // registered a pointer or non-pointer type, meaning we may have to recurse first
+ // before matching a mapped type, even though the extension byte is already detected.
+ //
+ // NOTE: if decoding into a nil interface{}, we return a non-nil
+ // value except even if the container registers a length of 0.
+ if checkCodecSelfer && ti.cs {
+ fn.f = (*decFnInfo).selferUnmarshal
+ } else if rtid == rawExtTypId {
+ fn.f = (*decFnInfo).rawExt
+ } else if rtid == rawTypId {
+ fn.f = (*decFnInfo).raw
+ } else if d.d.IsBuiltinType(rtid) {
+ fn.f = (*decFnInfo).builtin
+ } else if xfFn := d.h.getExt(rtid); xfFn != nil {
+ fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
+ fn.f = (*decFnInfo).ext
+ } else if supportMarshalInterfaces && d.be && ti.bunm {
+ fn.f = (*decFnInfo).binaryUnmarshal
+ } else if supportMarshalInterfaces && !d.be && d.js && ti.junm {
+ //If JSON, we should check JSONUnmarshal before textUnmarshal
+ fn.f = (*decFnInfo).jsonUnmarshal
+ } else if supportMarshalInterfaces && !d.be && ti.tunm {
+ fn.f = (*decFnInfo).textUnmarshal
+ } else {
+ rk := rt.Kind()
+ if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) {
+ if rt.PkgPath() == "" {
+ if idx := fastpathAV.index(rtid); idx != -1 {
+ fn.f = fastpathAV[idx].decfn
+ }
+ } else {
+ // use mapping for underlying type if there
+ ok = false
+ var rtu reflect.Type
+ if rk == reflect.Map {
+ rtu = reflect.MapOf(rt.Key(), rt.Elem())
+ } else {
+ rtu = reflect.SliceOf(rt.Elem())
+ }
+ rtuid := reflect.ValueOf(rtu).Pointer()
+ if idx := fastpathAV.index(rtuid); idx != -1 {
+ xfnf := fastpathAV[idx].decfn
+ xrt := fastpathAV[idx].rt
+ fn.f = func(xf *decFnInfo, xrv reflect.Value) {
+ // xfnf(xf, xrv.Convert(xrt))
+ xfnf(xf, xrv.Addr().Convert(reflect.PtrTo(xrt)).Elem())
+ }
+ }
+ }
+ }
+ if fn.f == nil {
+ switch rk {
+ case reflect.String:
+ fn.f = (*decFnInfo).kString
+ case reflect.Bool:
+ fn.f = (*decFnInfo).kBool
+ case reflect.Int:
+ fn.f = (*decFnInfo).kInt
+ case reflect.Int64:
+ fn.f = (*decFnInfo).kInt64
+ case reflect.Int32:
+ fn.f = (*decFnInfo).kInt32
+ case reflect.Int8:
+ fn.f = (*decFnInfo).kInt8
+ case reflect.Int16:
+ fn.f = (*decFnInfo).kInt16
+ case reflect.Float32:
+ fn.f = (*decFnInfo).kFloat32
+ case reflect.Float64:
+ fn.f = (*decFnInfo).kFloat64
+ case reflect.Uint8:
+ fn.f = (*decFnInfo).kUint8
+ case reflect.Uint64:
+ fn.f = (*decFnInfo).kUint64
+ case reflect.Uint:
+ fn.f = (*decFnInfo).kUint
+ case reflect.Uint32:
+ fn.f = (*decFnInfo).kUint32
+ case reflect.Uint16:
+ fn.f = (*decFnInfo).kUint16
+ // case reflect.Ptr:
+ // fn.f = (*decFnInfo).kPtr
+ case reflect.Uintptr:
+ fn.f = (*decFnInfo).kUintptr
+ case reflect.Interface:
+ fn.f = (*decFnInfo).kInterface
+ case reflect.Struct:
+ fn.f = (*decFnInfo).kStruct
+ case reflect.Chan:
+ fi.seq = seqTypeChan
+ fn.f = (*decFnInfo).kSlice
+ case reflect.Slice:
+ fi.seq = seqTypeSlice
+ fn.f = (*decFnInfo).kSlice
+ case reflect.Array:
+ fi.seq = seqTypeArray
+ fn.f = (*decFnInfo).kArray
+ case reflect.Map:
+ fn.f = (*decFnInfo).kMap
+ default:
+ fn.f = (*decFnInfo).kErr
+ }
+ }
+ }
+
+ return
+}
+
+func (d *Decoder) structFieldNotFound(index int, rvkencname string) {
+ // NOTE: rvkencname may be a stringView, so don't pass it to another function.
+ if d.h.ErrorIfNoField {
+ if index >= 0 {
+ d.errorf("no matching struct field found when decoding stream array at index %v", index)
+ return
+ } else if rvkencname != "" {
+ d.errorf("no matching struct field found when decoding stream map with key " + rvkencname)
+ return
+ }
+ }
+ d.swallow()
+}
+
+func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) {
+ if d.h.ErrorIfNoArrayExpand {
+ d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen)
+ }
+}
+
+func (d *Decoder) chkPtrValue(rv reflect.Value) {
+ // We can only decode into a non-nil pointer
+ if rv.Kind() == reflect.Ptr && !rv.IsNil() {
+ return
+ }
+ d.errNotValidPtrValue(rv)
+}
+
+func (d *Decoder) errNotValidPtrValue(rv reflect.Value) {
+ if !rv.IsValid() {
+ d.error(cannotDecodeIntoNilErr)
+ return
+ }
+ if !rv.CanInterface() {
+ d.errorf("cannot decode into a value without an interface: %v", rv)
+ return
+ }
+ rvi := rv.Interface()
+ d.errorf("cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", rv.Kind(), rvi, rvi)
+}
+
+func (d *Decoder) error(err error) {
+ panic(err)
+}
+
+func (d *Decoder) errorf(format string, params ...interface{}) {
+ params2 := make([]interface{}, len(params)+1)
+ params2[0] = d.r.numread()
+ copy(params2[1:], params)
+ err := fmt.Errorf("[pos %d]: "+format, params2...)
+ panic(err)
+}
+
+func (d *Decoder) string(v []byte) (s string) {
+ if d.is != nil {
+ s, ok := d.is[string(v)] // no allocation here.
+ if !ok {
+ s = string(v)
+ d.is[s] = s
+ }
+ return s
+ }
+ return string(v) // don't return stringView, as we need a real string here.
+}
+
+func (d *Decoder) intern(s string) {
+ if d.is != nil {
+ d.is[s] = s
+ }
+}
+
+// nextValueBytes returns the next value in the stream as a set of bytes.
+func (d *Decoder) nextValueBytes() []byte {
+ d.d.uncacheRead()
+ d.r.track()
+ d.swallow()
+ return d.r.stopTrack()
+}
+
+func (d *Decoder) raw() []byte {
+ // ensure that this is not a view into the bytes
+ // i.e. make new copy always.
+ bs := d.nextValueBytes()
+ bs2 := make([]byte, len(bs))
+ copy(bs2, bs)
+ return bs2
+}
+
+// --------------------------------------------------
+
+// decSliceHelper assists when decoding into a slice, from a map or an array in the stream.
+// A slice can be set from a map or array in stream. This supports the MapBySlice interface.
+type decSliceHelper struct {
+ d *Decoder
+ // ct valueType
+ array bool
+}
+
+func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) {
+ dd := d.d
+ ctyp := dd.ContainerType()
+ if ctyp == valueTypeArray {
+ x.array = true
+ clen = dd.ReadArrayStart()
+ } else if ctyp == valueTypeMap {
+ clen = dd.ReadMapStart() * 2
+ } else {
+ d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp)
+ }
+ // x.ct = ctyp
+ x.d = d
+ return
+}
+
+func (x decSliceHelper) End() {
+ cr := x.d.cr
+ if cr == nil {
+ return
+ }
+ if x.array {
+ cr.sendContainerState(containerArrayEnd)
+ } else {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (x decSliceHelper) ElemContainerState(index int) {
+ cr := x.d.cr
+ if cr == nil {
+ return
+ }
+ if x.array {
+ cr.sendContainerState(containerArrayElem)
+ } else {
+ if index%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+}
+
+func decByteSlice(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) {
+ if clen == 0 {
+ return zeroByteSlice
+ }
+ if len(bs) == clen {
+ bsOut = bs
+ r.readb(bsOut)
+ } else if cap(bs) >= clen {
+ bsOut = bs[:clen]
+ r.readb(bsOut)
+ } else {
+ // bsOut = make([]byte, clen)
+ len2, _ := decInferLen(clen, maxInitLen, 1)
+ bsOut = make([]byte, len2)
+ r.readb(bsOut)
+ for len2 < clen {
+ len3, _ := decInferLen(clen-len2, maxInitLen, 1)
+ // fmt.Printf(">>>>> TESTING: in loop: clen: %v, maxInitLen: %v, len2: %v, len3: %v\n", clen, maxInitLen, len2, len3)
+ bs3 := bsOut
+ bsOut = make([]byte, len2+len3)
+ copy(bsOut, bs3)
+ r.readb(bsOut[len2:])
+ len2 += len3
+ }
+ }
+ return
+}
+
+func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) {
+ if xlen := len(in); xlen > 0 {
+ if isBytesReader || xlen <= scratchByteArrayLen {
+ if cap(dest) >= xlen {
+ out = dest[:xlen]
+ } else {
+ out = make([]byte, xlen)
+ }
+ copy(out, in)
+ return
+ }
+ }
+ return in
+}
+
+// decInferLen will infer a sensible length, given the following:
+// - clen: length wanted.
+// - maxlen: max length to be returned.
+// if <= 0, it is unset, and we infer it based on the unit size
+// - unit: number of bytes for each element of the collection
+func decInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) {
+ // handle when maxlen is not set i.e. <= 0
+ if clen <= 0 {
+ return
+ }
+ if maxlen <= 0 {
+ // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items.
+ // maxlen = 256 * 1024 / unit
+ // if maxlen < (4 * 1024) {
+ // maxlen = 4 * 1024
+ // }
+ if unit < (256 / 4) {
+ maxlen = 256 * 1024 / unit
+ } else {
+ maxlen = 4 * 1024
+ }
+ }
+ if clen > maxlen {
+ rvlen = maxlen
+ truncated = true
+ } else {
+ rvlen = clen
+ }
+ return
+ // if clen <= 0 {
+ // rvlen = 0
+ // } else if maxlen > 0 && clen > maxlen {
+ // rvlen = maxlen
+ // truncated = true
+ // } else {
+ // rvlen = clen
+ // }
+ // return
+}
+
+// // implement overall decReader wrapping both, for possible use inline:
+// type decReaderT struct {
+// bytes bool
+// rb *bytesDecReader
+// ri *ioDecReader
+// }
+//
+// // implement *Decoder as a decReader.
+// // Using decReaderT (defined just above) caused performance degradation
+// // possibly because of constant copying the value,
+// // and some value->interface conversion causing allocation.
+// func (d *Decoder) unreadn1() {
+// if d.bytes {
+// d.rb.unreadn1()
+// } else {
+// d.ri.unreadn1()
+// }
+// }
+// ... for other methods of decReader.
+// Testing showed that performance improvement was negligible.
diff --git a/vendor/github.com/ugorji/go/codec/decode_go.go b/vendor/github.com/ugorji/go/codec/decode_go.go
new file mode 100644
index 000000000..ba289cef6
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/decode_go.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build go1.5
+
+package codec
+
+import "reflect"
+
+const reflectArrayOfSupported = true
+
+func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) {
+ rvn2 = reflect.New(reflect.ArrayOf(rvn.Len(), intfTyp)).Elem()
+ reflect.Copy(rvn2, rvn)
+ return
+}
diff --git a/vendor/github.com/ugorji/go/codec/decode_go14.go b/vendor/github.com/ugorji/go/codec/decode_go14.go
new file mode 100644
index 000000000..50063bc8f
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/decode_go14.go
@@ -0,0 +1,14 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build !go1.5
+
+package codec
+
+import "reflect"
+
+const reflectArrayOfSupported = false
+
+func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) {
+ panic("reflect.ArrayOf unsupported")
+}
diff --git a/vendor/github.com/ugorji/go/codec/encode.go b/vendor/github.com/ugorji/go/codec/encode.go
new file mode 100644
index 000000000..c2cef812e
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/encode.go
@@ -0,0 +1,1461 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "sync"
+)
+
+const (
+ defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024
+)
+
+// AsSymbolFlag defines what should be encoded as symbols.
+type AsSymbolFlag uint8
+
+const (
+ // AsSymbolDefault is default.
+ // Currently, this means only encode struct field names as symbols.
+ // The default is subject to change.
+ AsSymbolDefault AsSymbolFlag = iota
+
+ // AsSymbolAll means encode anything which could be a symbol as a symbol.
+ AsSymbolAll = 0xfe
+
+ // AsSymbolNone means do not encode anything as a symbol.
+ AsSymbolNone = 1 << iota
+
+ // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols.
+ AsSymbolMapStringKeysFlag
+
+ // AsSymbolStructFieldName means encode struct field names as symbols.
+ AsSymbolStructFieldNameFlag
+)
+
+// encWriter abstracts writing to a byte array or to an io.Writer.
+type encWriter interface {
+ writeb([]byte)
+ writestr(string)
+ writen1(byte)
+ writen2(byte, byte)
+ atEndOfEncode()
+}
+
+// encDriver abstracts the actual codec (binc vs msgpack, etc)
+type encDriver interface {
+ IsBuiltinType(rt uintptr) bool
+ EncodeBuiltin(rt uintptr, v interface{})
+ EncodeNil()
+ EncodeInt(i int64)
+ EncodeUint(i uint64)
+ EncodeBool(b bool)
+ EncodeFloat32(f float32)
+ EncodeFloat64(f float64)
+ // encodeExtPreamble(xtag byte, length int)
+ EncodeRawExt(re *RawExt, e *Encoder)
+ EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder)
+ EncodeArrayStart(length int)
+ EncodeMapStart(length int)
+ EncodeString(c charEncoding, v string)
+ EncodeSymbol(v string)
+ EncodeStringBytes(c charEncoding, v []byte)
+ //TODO
+ //encBignum(f *big.Int)
+ //encStringRunes(c charEncoding, v []rune)
+
+ reset()
+}
+
+type encDriverAsis interface {
+ EncodeAsis(v []byte)
+}
+
+type encNoSeparator struct{}
+
+func (_ encNoSeparator) EncodeEnd() {}
+
+type ioEncWriterWriter interface {
+ WriteByte(c byte) error
+ WriteString(s string) (n int, err error)
+ Write(p []byte) (n int, err error)
+}
+
+type ioEncStringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+type EncodeOptions struct {
+ // Encode a struct as an array, and not as a map
+ StructToArray bool
+
+ // Canonical representation means that encoding a value will always result in the same
+ // sequence of bytes.
+ //
+ // This only affects maps, as the iteration order for maps is random.
+ //
+ // The implementation MAY use the natural sort order for the map keys if possible:
+ //
+ // - If there is a natural sort order (ie for number, bool, string or []byte keys),
+ // then the map keys are first sorted in natural order and then written
+ // with corresponding map values to the strema.
+ // - If there is no natural sort order, then the map keys will first be
+ // encoded into []byte, and then sorted,
+ // before writing the sorted keys and the corresponding map values to the stream.
+ //
+ Canonical bool
+
+ // CheckCircularRef controls whether we check for circular references
+ // and error fast during an encode.
+ //
+ // If enabled, an error is received if a pointer to a struct
+ // references itself either directly or through one of its fields (iteratively).
+ //
+ // This is opt-in, as there may be a performance hit to checking circular references.
+ CheckCircularRef bool
+
+ // RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers
+ // when checking if a value is empty.
+ //
+ // Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls.
+ RecursiveEmptyCheck bool
+
+ // Raw controls whether we encode Raw values.
+ // This is a "dangerous" option and must be explicitly set.
+ // If set, we blindly encode Raw values as-is, without checking
+ // if they are a correct representation of a value in that format.
+ // If unset, we error out.
+ Raw bool
+
+ // AsSymbols defines what should be encoded as symbols.
+ //
+ // Encoding as symbols can reduce the encoded size significantly.
+ //
+ // However, during decoding, each string to be encoded as a symbol must
+ // be checked to see if it has been seen before. Consequently, encoding time
+ // will increase if using symbols, because string comparisons has a clear cost.
+ //
+ // Sample values:
+ // AsSymbolNone
+ // AsSymbolAll
+ // AsSymbolMapStringKeys
+ // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag
+ AsSymbols AsSymbolFlag
+}
+
+// ---------------------------------------------
+
+type simpleIoEncWriterWriter struct {
+ w io.Writer
+ bw io.ByteWriter
+ sw ioEncStringWriter
+ bs [1]byte
+}
+
+func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) {
+ if o.bw != nil {
+ return o.bw.WriteByte(c)
+ }
+ // _, err = o.w.Write([]byte{c})
+ o.bs[0] = c
+ _, err = o.w.Write(o.bs[:])
+ return
+}
+
+func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) {
+ if o.sw != nil {
+ return o.sw.WriteString(s)
+ }
+ // return o.w.Write([]byte(s))
+ return o.w.Write(bytesView(s))
+}
+
+func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) {
+ return o.w.Write(p)
+}
+
+// ----------------------------------------
+
+// ioEncWriter implements encWriter and can write to an io.Writer implementation
+type ioEncWriter struct {
+ w ioEncWriterWriter
+ s simpleIoEncWriterWriter
+ // x [8]byte // temp byte array re-used internally for efficiency
+}
+
+func (z *ioEncWriter) writeb(bs []byte) {
+ if len(bs) == 0 {
+ return
+ }
+ n, err := z.w.Write(bs)
+ if err != nil {
+ panic(err)
+ }
+ if n != len(bs) {
+ panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n))
+ }
+}
+
+func (z *ioEncWriter) writestr(s string) {
+ n, err := z.w.WriteString(s)
+ if err != nil {
+ panic(err)
+ }
+ if n != len(s) {
+ panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n))
+ }
+}
+
+func (z *ioEncWriter) writen1(b byte) {
+ if err := z.w.WriteByte(b); err != nil {
+ panic(err)
+ }
+}
+
+func (z *ioEncWriter) writen2(b1 byte, b2 byte) {
+ z.writen1(b1)
+ z.writen1(b2)
+}
+
+func (z *ioEncWriter) atEndOfEncode() {}
+
+// ----------------------------------------
+
+// bytesEncWriter implements encWriter and can write to an byte slice.
+// It is used by Marshal function.
+type bytesEncWriter struct {
+ b []byte
+ c int // cursor
+ out *[]byte // write out on atEndOfEncode
+}
+
+func (z *bytesEncWriter) writeb(s []byte) {
+ if len(s) == 0 {
+ return
+ }
+ oc, a := z.growNoAlloc(len(s))
+ if a {
+ z.growAlloc(len(s), oc)
+ }
+ copy(z.b[oc:], s)
+}
+
+func (z *bytesEncWriter) writestr(s string) {
+ if len(s) == 0 {
+ return
+ }
+ oc, a := z.growNoAlloc(len(s))
+ if a {
+ z.growAlloc(len(s), oc)
+ }
+ copy(z.b[oc:], s)
+}
+
+func (z *bytesEncWriter) writen1(b1 byte) {
+ oc, a := z.growNoAlloc(1)
+ if a {
+ z.growAlloc(1, oc)
+ }
+ z.b[oc] = b1
+}
+
+func (z *bytesEncWriter) writen2(b1 byte, b2 byte) {
+ oc, a := z.growNoAlloc(2)
+ if a {
+ z.growAlloc(2, oc)
+ }
+ z.b[oc+1] = b2
+ z.b[oc] = b1
+}
+
+func (z *bytesEncWriter) atEndOfEncode() {
+ *(z.out) = z.b[:z.c]
+}
+
+// have a growNoalloc(n int), which can be inlined.
+// if allocation is needed, then call growAlloc(n int)
+
+func (z *bytesEncWriter) growNoAlloc(n int) (oldcursor int, allocNeeded bool) {
+ oldcursor = z.c
+ z.c = z.c + n
+ if z.c > len(z.b) {
+ if z.c > cap(z.b) {
+ allocNeeded = true
+ } else {
+ z.b = z.b[:cap(z.b)]
+ }
+ }
+ return
+}
+
+func (z *bytesEncWriter) growAlloc(n int, oldcursor int) {
+ // appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls.
+ // bytes.Buffer model (2*cap + n): much better
+ // bs := make([]byte, 2*cap(z.b)+n)
+ bs := make([]byte, growCap(cap(z.b), 1, n))
+ copy(bs, z.b[:oldcursor])
+ z.b = bs
+}
+
+// ---------------------------------------------
+
+type encFnInfo struct {
+ e *Encoder
+ ti *typeInfo
+ xfFn Ext
+ xfTag uint64
+ seq seqType
+}
+
+func (f *encFnInfo) builtin(rv reflect.Value) {
+ f.e.e.EncodeBuiltin(f.ti.rtid, rv.Interface())
+}
+
+func (f *encFnInfo) raw(rv reflect.Value) {
+ f.e.raw(rv.Interface().(Raw))
+}
+
+func (f *encFnInfo) rawExt(rv reflect.Value) {
+ // rev := rv.Interface().(RawExt)
+ // f.e.e.EncodeRawExt(&rev, f.e)
+ var re *RawExt
+ if rv.CanAddr() {
+ re = rv.Addr().Interface().(*RawExt)
+ } else {
+ rev := rv.Interface().(RawExt)
+ re = &rev
+ }
+ f.e.e.EncodeRawExt(re, f.e)
+}
+
+func (f *encFnInfo) ext(rv reflect.Value) {
+ // if this is a struct|array and it was addressable, then pass the address directly (not the value)
+ if k := rv.Kind(); (k == reflect.Struct || k == reflect.Array) && rv.CanAddr() {
+ rv = rv.Addr()
+ }
+ f.e.e.EncodeExt(rv.Interface(), f.xfTag, f.xfFn, f.e)
+}
+
+func (f *encFnInfo) getValueForMarshalInterface(rv reflect.Value, indir int8) (v interface{}, proceed bool) {
+ if indir == 0 {
+ v = rv.Interface()
+ } else if indir == -1 {
+ // If a non-pointer was passed to Encode(), then that value is not addressable.
+ // Take addr if addressable, else copy value to an addressable value.
+ if rv.CanAddr() {
+ v = rv.Addr().Interface()
+ } else {
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
+ // fmt.Printf("rv.Type: %v, rv2.Type: %v, v: %v\n", rv.Type(), rv2.Type(), v)
+ }
+ } else {
+ for j := int8(0); j < indir; j++ {
+ if rv.IsNil() {
+ f.e.e.EncodeNil()
+ return
+ }
+ rv = rv.Elem()
+ }
+ v = rv.Interface()
+ }
+ return v, true
+}
+
+func (f *encFnInfo) selferMarshal(rv reflect.Value) {
+ if v, proceed := f.getValueForMarshalInterface(rv, f.ti.csIndir); proceed {
+ v.(Selfer).CodecEncodeSelf(f.e)
+ }
+}
+
+func (f *encFnInfo) binaryMarshal(rv reflect.Value) {
+ if v, proceed := f.getValueForMarshalInterface(rv, f.ti.bmIndir); proceed {
+ bs, fnerr := v.(encoding.BinaryMarshaler).MarshalBinary()
+ f.e.marshal(bs, fnerr, false, c_RAW)
+ }
+}
+
+func (f *encFnInfo) textMarshal(rv reflect.Value) {
+ if v, proceed := f.getValueForMarshalInterface(rv, f.ti.tmIndir); proceed {
+ // debugf(">>>> encoding.TextMarshaler: %T", rv.Interface())
+ bs, fnerr := v.(encoding.TextMarshaler).MarshalText()
+ f.e.marshal(bs, fnerr, false, c_UTF8)
+ }
+}
+
+func (f *encFnInfo) jsonMarshal(rv reflect.Value) {
+ if v, proceed := f.getValueForMarshalInterface(rv, f.ti.jmIndir); proceed {
+ bs, fnerr := v.(jsonMarshaler).MarshalJSON()
+ f.e.marshal(bs, fnerr, true, c_UTF8)
+ }
+}
+
+func (f *encFnInfo) kBool(rv reflect.Value) {
+ f.e.e.EncodeBool(rv.Bool())
+}
+
+func (f *encFnInfo) kString(rv reflect.Value) {
+ f.e.e.EncodeString(c_UTF8, rv.String())
+}
+
+func (f *encFnInfo) kFloat64(rv reflect.Value) {
+ f.e.e.EncodeFloat64(rv.Float())
+}
+
+func (f *encFnInfo) kFloat32(rv reflect.Value) {
+ f.e.e.EncodeFloat32(float32(rv.Float()))
+}
+
+func (f *encFnInfo) kInt(rv reflect.Value) {
+ f.e.e.EncodeInt(rv.Int())
+}
+
+func (f *encFnInfo) kUint(rv reflect.Value) {
+ f.e.e.EncodeUint(rv.Uint())
+}
+
+func (f *encFnInfo) kInvalid(rv reflect.Value) {
+ f.e.e.EncodeNil()
+}
+
+func (f *encFnInfo) kErr(rv reflect.Value) {
+ f.e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv)
+}
+
+func (f *encFnInfo) kSlice(rv reflect.Value) {
+ ti := f.ti
+ // array may be non-addressable, so we have to manage with care
+ // (don't call rv.Bytes, rv.Slice, etc).
+ // E.g. type struct S{B [2]byte};
+ // Encode(S{}) will bomb on "panic: slice of unaddressable array".
+ e := f.e
+ if f.seq != seqTypeArray {
+ if rv.IsNil() {
+ e.e.EncodeNil()
+ return
+ }
+ // If in this method, then there was no extension function defined.
+ // So it's okay to treat as []byte.
+ if ti.rtid == uint8SliceTypId {
+ e.e.EncodeStringBytes(c_RAW, rv.Bytes())
+ return
+ }
+ }
+ cr := e.cr
+ rtelem := ti.rt.Elem()
+ l := rv.Len()
+ if ti.rtid == uint8SliceTypId || rtelem.Kind() == reflect.Uint8 {
+ switch f.seq {
+ case seqTypeArray:
+ // if l == 0 { e.e.encodeStringBytes(c_RAW, nil) } else
+ if rv.CanAddr() {
+ e.e.EncodeStringBytes(c_RAW, rv.Slice(0, l).Bytes())
+ } else {
+ var bs []byte
+ if l <= cap(e.b) {
+ bs = e.b[:l]
+ } else {
+ bs = make([]byte, l)
+ }
+ reflect.Copy(reflect.ValueOf(bs), rv)
+ // TODO: Test that reflect.Copy works instead of manual one-by-one
+ // for i := 0; i < l; i++ {
+ // bs[i] = byte(rv.Index(i).Uint())
+ // }
+ e.e.EncodeStringBytes(c_RAW, bs)
+ }
+ case seqTypeSlice:
+ e.e.EncodeStringBytes(c_RAW, rv.Bytes())
+ case seqTypeChan:
+ bs := e.b[:0]
+ // do not use range, so that the number of elements encoded
+ // does not change, and encoding does not hang waiting on someone to close chan.
+ // for b := range rv.Interface().(<-chan byte) {
+ // bs = append(bs, b)
+ // }
+ ch := rv.Interface().(<-chan byte)
+ for i := 0; i < l; i++ {
+ bs = append(bs, <-ch)
+ }
+ e.e.EncodeStringBytes(c_RAW, bs)
+ }
+ return
+ }
+
+ if ti.mbs {
+ if l%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", l)
+ return
+ }
+ e.e.EncodeMapStart(l / 2)
+ } else {
+ e.e.EncodeArrayStart(l)
+ }
+
+ if l > 0 {
+ for rtelem.Kind() == reflect.Ptr {
+ rtelem = rtelem.Elem()
+ }
+ // if kind is reflect.Interface, do not pre-determine the
+ // encoding type, because preEncodeValue may break it down to
+ // a concrete type and kInterface will bomb.
+ var fn *encFn
+ if rtelem.Kind() != reflect.Interface {
+ rtelemid := reflect.ValueOf(rtelem).Pointer()
+ fn = e.getEncFn(rtelemid, rtelem, true, true)
+ }
+ // TODO: Consider perf implication of encoding odd index values as symbols if type is string
+ for j := 0; j < l; j++ {
+ if cr != nil {
+ if ti.mbs {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ } else {
+ cr.sendContainerState(containerArrayElem)
+ }
+ }
+ if f.seq == seqTypeChan {
+ if rv2, ok2 := rv.Recv(); ok2 {
+ e.encodeValue(rv2, fn)
+ } else {
+ e.encode(nil) // WE HAVE TO DO SOMETHING, so nil if nothing received.
+ }
+ } else {
+ e.encodeValue(rv.Index(j), fn)
+ }
+ }
+ }
+
+ if cr != nil {
+ if ti.mbs {
+ cr.sendContainerState(containerMapEnd)
+ } else {
+ cr.sendContainerState(containerArrayEnd)
+ }
+ }
+}
+
+func (f *encFnInfo) kStruct(rv reflect.Value) {
+ fti := f.ti
+ e := f.e
+ cr := e.cr
+ tisfi := fti.sfip
+ toMap := !(fti.toArray || e.h.StructToArray)
+ newlen := len(fti.sfi)
+
+ // Use sync.Pool to reduce allocating slices unnecessarily.
+ // The cost of sync.Pool is less than the cost of new allocation.
+ pool, poolv, fkvs := encStructPoolGet(newlen)
+
+ // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct)
+ if toMap {
+ tisfi = fti.sfi
+ }
+ newlen = 0
+ var kv stringRv
+ recur := e.h.RecursiveEmptyCheck
+ for _, si := range tisfi {
+ kv.r = si.field(rv, false)
+ if toMap {
+ if si.omitEmpty && isEmptyValue(kv.r, recur, recur) {
+ continue
+ }
+ kv.v = si.encName
+ } else {
+ // use the zero value.
+ // if a reference or struct, set to nil (so you do not output too much)
+ if si.omitEmpty && isEmptyValue(kv.r, recur, recur) {
+ switch kv.r.Kind() {
+ case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice:
+ kv.r = reflect.Value{} //encode as nil
+ }
+ }
+ }
+ fkvs[newlen] = kv
+ newlen++
+ }
+
+ // debugf(">>>> kStruct: newlen: %v", newlen)
+ // sep := !e.be
+ ee := e.e //don't dereference every time
+
+ if toMap {
+ ee.EncodeMapStart(newlen)
+ // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0
+ asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0
+ for j := 0; j < newlen; j++ {
+ kv = fkvs[j]
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(kv.v)
+ } else {
+ ee.EncodeString(c_UTF8, kv.v)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(kv.r, nil)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ } else {
+ ee.EncodeArrayStart(newlen)
+ for j := 0; j < newlen; j++ {
+ kv = fkvs[j]
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ e.encodeValue(kv.r, nil)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+ }
+
+ // do not use defer. Instead, use explicit pool return at end of function.
+ // defer has a cost we are trying to avoid.
+ // If there is a panic and these slices are not returned, it is ok.
+ if pool != nil {
+ pool.Put(poolv)
+ }
+}
+
+// func (f *encFnInfo) kPtr(rv reflect.Value) {
+// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called")
+// if rv.IsNil() {
+// f.e.e.encodeNil()
+// return
+// }
+// f.e.encodeValue(rv.Elem())
+// }
+
+// func (f *encFnInfo) kInterface(rv reflect.Value) {
+// println("kInterface called")
+// debug.PrintStack()
+// if rv.IsNil() {
+// f.e.e.EncodeNil()
+// return
+// }
+// f.e.encodeValue(rv.Elem(), nil)
+// }
+
+func (f *encFnInfo) kMap(rv reflect.Value) {
+ ee := f.e.e
+ if rv.IsNil() {
+ ee.EncodeNil()
+ return
+ }
+
+ l := rv.Len()
+ ee.EncodeMapStart(l)
+ e := f.e
+ cr := e.cr
+ if l == 0 {
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return
+ }
+ var asSymbols bool
+ // determine the underlying key and val encFn's for the map.
+ // This eliminates some work which is done for each loop iteration i.e.
+ // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn.
+ //
+ // However, if kind is reflect.Interface, do not pre-determine the
+ // encoding type, because preEncodeValue may break it down to
+ // a concrete type and kInterface will bomb.
+ var keyFn, valFn *encFn
+ ti := f.ti
+ rtkey := ti.rt.Key()
+ rtval := ti.rt.Elem()
+ rtkeyid := reflect.ValueOf(rtkey).Pointer()
+ // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String
+ var keyTypeIsString = rtkeyid == stringTypId
+ if keyTypeIsString {
+ asSymbols = e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ } else {
+ for rtkey.Kind() == reflect.Ptr {
+ rtkey = rtkey.Elem()
+ }
+ if rtkey.Kind() != reflect.Interface {
+ rtkeyid = reflect.ValueOf(rtkey).Pointer()
+ keyFn = e.getEncFn(rtkeyid, rtkey, true, true)
+ }
+ }
+ for rtval.Kind() == reflect.Ptr {
+ rtval = rtval.Elem()
+ }
+ if rtval.Kind() != reflect.Interface {
+ rtvalid := reflect.ValueOf(rtval).Pointer()
+ valFn = e.getEncFn(rtvalid, rtval, true, true)
+ }
+ mks := rv.MapKeys()
+ // for j, lmks := 0, len(mks); j < lmks; j++ {
+
+ if e.h.Canonical {
+ e.kMapCanonical(rtkeyid, rtkey, rv, mks, valFn, asSymbols)
+ } else {
+ for j := range mks {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if keyTypeIsString {
+ if asSymbols {
+ ee.EncodeSymbol(mks[j].String())
+ } else {
+ ee.EncodeString(c_UTF8, mks[j].String())
+ }
+ } else {
+ e.encodeValue(mks[j], keyFn)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mks[j]), valFn)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (e *Encoder) kMapCanonical(rtkeyid uintptr, rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *encFn, asSymbols bool) {
+ ee := e.e
+ cr := e.cr
+ // we previously did out-of-band if an extension was registered.
+ // This is not necessary, as the natural kind is sufficient for ordering.
+
+ if rtkeyid == uint8SliceTypId {
+ mksv := make([]bytesRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Bytes()
+ }
+ sort.Sort(bytesRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeStringBytes(c_RAW, mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ } else {
+ switch rtkey.Kind() {
+ case reflect.Bool:
+ mksv := make([]boolRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Bool()
+ }
+ sort.Sort(boolRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ case reflect.String:
+ mksv := make([]stringRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.String()
+ }
+ sort.Sort(stringRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(mksv[i].v)
+ } else {
+ ee.EncodeString(c_UTF8, mksv[i].v)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr:
+ mksv := make([]uintRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Uint()
+ }
+ sort.Sort(uintRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ mksv := make([]intRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Int()
+ }
+ sort.Sort(intRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ case reflect.Float32:
+ mksv := make([]floatRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Float()
+ }
+ sort.Sort(floatRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(mksv[i].v))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ case reflect.Float64:
+ mksv := make([]floatRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Float()
+ }
+ sort.Sort(floatRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ default:
+ // out-of-band
+ // first encode each key to a []byte first, then sort them, then record
+ var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ mksbv := make([]bytesRv, len(mks))
+ for i, k := range mks {
+ v := &mksbv[i]
+ l := len(mksv)
+ e2.MustEncode(k)
+ v.r = k
+ v.v = mksv[l:]
+ // fmt.Printf(">>>>> %s\n", mksv[l:])
+ }
+ sort.Sort(bytesRvSlice(mksbv))
+ for j := range mksbv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(mksbv[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksbv[j].r), valFn)
+ }
+ }
+ }
+}
+
+// --------------------------------------------------
+
+// encFn encapsulates the captured variables and the encode function.
+// This way, we only do some calculations one times, and pass to the
+// code block that should be called (encapsulated in a function)
+// instead of executing the checks every time.
+type encFn struct {
+ i encFnInfo
+ f func(*encFnInfo, reflect.Value)
+}
+
+// --------------------------------------------------
+
+type encRtidFn struct {
+ rtid uintptr
+ fn encFn
+}
+
+// An Encoder writes an object to an output stream in the codec format.
+type Encoder struct {
+ // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder
+ e encDriver
+ // NOTE: Encoder shouldn't call it's write methods,
+ // as the handler MAY need to do some coordination.
+ w encWriter
+ s []encRtidFn
+ ci set
+ be bool // is binary encoding
+ js bool // is json handle
+
+ wi ioEncWriter
+ wb bytesEncWriter
+
+ h *BasicHandle
+ hh Handle
+
+ cr containerStateRecv
+ as encDriverAsis
+
+ f map[uintptr]*encFn
+ b [scratchByteArrayLen]byte
+}
+
+// NewEncoder returns an Encoder for encoding into an io.Writer.
+//
+// For efficiency, Users are encouraged to pass in a memory buffered writer
+// (eg bufio.Writer, bytes.Buffer).
+func NewEncoder(w io.Writer, h Handle) *Encoder {
+ e := newEncoder(h)
+ e.Reset(w)
+ return e
+}
+
+// NewEncoderBytes returns an encoder for encoding directly and efficiently
+// into a byte slice, using zero-copying to temporary slices.
+//
+// It will potentially replace the output byte slice pointed to.
+// After encoding, the out parameter contains the encoded contents.
+func NewEncoderBytes(out *[]byte, h Handle) *Encoder {
+ e := newEncoder(h)
+ e.ResetBytes(out)
+ return e
+}
+
+func newEncoder(h Handle) *Encoder {
+ e := &Encoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()}
+ _, e.js = h.(*JsonHandle)
+ e.e = h.newEncDriver(e)
+ e.as, _ = e.e.(encDriverAsis)
+ e.cr, _ = e.e.(containerStateRecv)
+ return e
+}
+
+// Reset the Encoder with a new output stream.
+//
+// This accommodates using the state of the Encoder,
+// where it has "cached" information about sub-engines.
+func (e *Encoder) Reset(w io.Writer) {
+ ww, ok := w.(ioEncWriterWriter)
+ if ok {
+ e.wi.w = ww
+ } else {
+ sww := &e.wi.s
+ sww.w = w
+ sww.bw, _ = w.(io.ByteWriter)
+ sww.sw, _ = w.(ioEncStringWriter)
+ e.wi.w = sww
+ //ww = bufio.NewWriterSize(w, defEncByteBufSize)
+ }
+ e.w = &e.wi
+ e.e.reset()
+}
+
+func (e *Encoder) ResetBytes(out *[]byte) {
+ in := *out
+ if in == nil {
+ in = make([]byte, defEncByteBufSize)
+ }
+ e.wb.b, e.wb.out, e.wb.c = in, out, 0
+ e.w = &e.wb
+ e.e.reset()
+}
+
+// func (e *Encoder) sendContainerState(c containerState) {
+// if e.cr != nil {
+// e.cr.sendContainerState(c)
+// }
+// }
+
+// Encode writes an object into a stream.
+//
+// Encoding can be configured via the struct tag for the fields.
+// The "codec" key in struct field's tag value is the key name,
+// followed by an optional comma and options.
+// Note that the "json" key is used in the absence of the "codec" key.
+//
+// To set an option on all fields (e.g. omitempty on all fields), you
+// can create a field called _struct, and set flags on it.
+//
+// Struct values "usually" encode as maps. Each exported struct field is encoded unless:
+// - the field's tag is "-", OR
+// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option.
+//
+// When encoding as a map, the first string in the tag (before the comma)
+// is the map key string to use when encoding.
+//
+// However, struct values may encode as arrays. This happens when:
+// - StructToArray Encode option is set, OR
+// - the tag on the _struct field sets the "toarray" option
+//
+// Values with types that implement MapBySlice are encoded as stream maps.
+//
+// The empty values (for omitempty option) are false, 0, any nil pointer
+// or interface value, and any array, slice, map, or string of length zero.
+//
+// Anonymous fields are encoded inline except:
+// - the struct tag specifies a replacement name (first value)
+// - the field is of an interface type
+//
+// Examples:
+//
+// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below.
+// type MyStruct struct {
+// _struct bool `codec:",omitempty"` //set omitempty for every field
+// Field1 string `codec:"-"` //skip this field
+// Field2 int `codec:"myName"` //Use key "myName" in encode stream
+// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty.
+// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty.
+// io.Reader //use key "Reader".
+// MyStruct `codec:"my1" //use key "my1".
+// MyStruct //inline it
+// ...
+// }
+//
+// type MyStruct struct {
+// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field
+// //and encode struct as an array
+// }
+//
+// The mode of encoding is based on the type of the value. When a value is seen:
+// - If a Selfer, call its CodecEncodeSelf method
+// - If an extension is registered for it, call that extension function
+// - If it implements encoding.(Binary|Text|JSON)Marshaler, call its Marshal(Binary|Text|JSON) method
+// - Else encode it based on its reflect.Kind
+//
+// Note that struct field names and keys in map[string]XXX will be treated as symbols.
+// Some formats support symbols (e.g. binc) and will properly encode the string
+// only once in the stream, and use a tag to refer to it thereafter.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer panicToErr(&err)
+ e.encode(v)
+ e.w.atEndOfEncode()
+ return
+}
+
+// MustEncode is like Encode, but panics if unable to Encode.
+// This provides insight to the code location that triggered the error.
+func (e *Encoder) MustEncode(v interface{}) {
+ e.encode(v)
+ e.w.atEndOfEncode()
+}
+
+func (e *Encoder) encode(iv interface{}) {
+ // if ics, ok := iv.(Selfer); ok {
+ // ics.CodecEncodeSelf(e)
+ // return
+ // }
+
+ switch v := iv.(type) {
+ case nil:
+ e.e.EncodeNil()
+ case Selfer:
+ v.CodecEncodeSelf(e)
+ case Raw:
+ e.raw(v)
+ case reflect.Value:
+ e.encodeValue(v, nil)
+
+ case string:
+ e.e.EncodeString(c_UTF8, v)
+ case bool:
+ e.e.EncodeBool(v)
+ case int:
+ e.e.EncodeInt(int64(v))
+ case int8:
+ e.e.EncodeInt(int64(v))
+ case int16:
+ e.e.EncodeInt(int64(v))
+ case int32:
+ e.e.EncodeInt(int64(v))
+ case int64:
+ e.e.EncodeInt(v)
+ case uint:
+ e.e.EncodeUint(uint64(v))
+ case uint8:
+ e.e.EncodeUint(uint64(v))
+ case uint16:
+ e.e.EncodeUint(uint64(v))
+ case uint32:
+ e.e.EncodeUint(uint64(v))
+ case uint64:
+ e.e.EncodeUint(v)
+ case float32:
+ e.e.EncodeFloat32(v)
+ case float64:
+ e.e.EncodeFloat64(v)
+
+ case []uint8:
+ e.e.EncodeStringBytes(c_RAW, v)
+
+ case *string:
+ e.e.EncodeString(c_UTF8, *v)
+ case *bool:
+ e.e.EncodeBool(*v)
+ case *int:
+ e.e.EncodeInt(int64(*v))
+ case *int8:
+ e.e.EncodeInt(int64(*v))
+ case *int16:
+ e.e.EncodeInt(int64(*v))
+ case *int32:
+ e.e.EncodeInt(int64(*v))
+ case *int64:
+ e.e.EncodeInt(*v)
+ case *uint:
+ e.e.EncodeUint(uint64(*v))
+ case *uint8:
+ e.e.EncodeUint(uint64(*v))
+ case *uint16:
+ e.e.EncodeUint(uint64(*v))
+ case *uint32:
+ e.e.EncodeUint(uint64(*v))
+ case *uint64:
+ e.e.EncodeUint(*v)
+ case *float32:
+ e.e.EncodeFloat32(*v)
+ case *float64:
+ e.e.EncodeFloat64(*v)
+
+ case *[]uint8:
+ e.e.EncodeStringBytes(c_RAW, *v)
+
+ default:
+ const checkCodecSelfer1 = true // in case T is passed, where *T is a Selfer, still checkCodecSelfer
+ if !fastpathEncodeTypeSwitch(iv, e) {
+ e.encodeI(iv, false, checkCodecSelfer1)
+ }
+ }
+}
+
+func (e *Encoder) preEncodeValue(rv reflect.Value) (rv2 reflect.Value, sptr uintptr, proceed bool) {
+ // use a goto statement instead of a recursive function for ptr/interface.
+TOP:
+ switch rv.Kind() {
+ case reflect.Ptr:
+ if rv.IsNil() {
+ e.e.EncodeNil()
+ return
+ }
+ rv = rv.Elem()
+ if e.h.CheckCircularRef && rv.Kind() == reflect.Struct {
+ // TODO: Movable pointers will be an issue here. Future problem.
+ sptr = rv.UnsafeAddr()
+ break TOP
+ }
+ goto TOP
+ case reflect.Interface:
+ if rv.IsNil() {
+ e.e.EncodeNil()
+ return
+ }
+ rv = rv.Elem()
+ goto TOP
+ case reflect.Slice, reflect.Map:
+ if rv.IsNil() {
+ e.e.EncodeNil()
+ return
+ }
+ case reflect.Invalid, reflect.Func:
+ e.e.EncodeNil()
+ return
+ }
+
+ proceed = true
+ rv2 = rv
+ return
+}
+
+func (e *Encoder) doEncodeValue(rv reflect.Value, fn *encFn, sptr uintptr,
+ checkFastpath, checkCodecSelfer bool) {
+ if sptr != 0 {
+ if (&e.ci).add(sptr) {
+ e.errorf("circular reference found: # %d", sptr)
+ }
+ }
+ if fn == nil {
+ rt := rv.Type()
+ rtid := reflect.ValueOf(rt).Pointer()
+ // fn = e.getEncFn(rtid, rt, true, true)
+ fn = e.getEncFn(rtid, rt, checkFastpath, checkCodecSelfer)
+ }
+ fn.f(&fn.i, rv)
+ if sptr != 0 {
+ (&e.ci).remove(sptr)
+ }
+}
+
+func (e *Encoder) encodeI(iv interface{}, checkFastpath, checkCodecSelfer bool) {
+ if rv, sptr, proceed := e.preEncodeValue(reflect.ValueOf(iv)); proceed {
+ e.doEncodeValue(rv, nil, sptr, checkFastpath, checkCodecSelfer)
+ }
+}
+
+func (e *Encoder) encodeValue(rv reflect.Value, fn *encFn) {
+ // if a valid fn is passed, it MUST BE for the dereferenced type of rv
+ if rv, sptr, proceed := e.preEncodeValue(rv); proceed {
+ e.doEncodeValue(rv, fn, sptr, true, true)
+ }
+}
+
+func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *encFn) {
+ // rtid := reflect.ValueOf(rt).Pointer()
+ var ok bool
+ if useMapForCodecCache {
+ fn, ok = e.f[rtid]
+ } else {
+ for i := range e.s {
+ v := &(e.s[i])
+ if v.rtid == rtid {
+ fn, ok = &(v.fn), true
+ break
+ }
+ }
+ }
+ if ok {
+ return
+ }
+
+ if useMapForCodecCache {
+ if e.f == nil {
+ e.f = make(map[uintptr]*encFn, initCollectionCap)
+ }
+ fn = new(encFn)
+ e.f[rtid] = fn
+ } else {
+ if e.s == nil {
+ e.s = make([]encRtidFn, 0, initCollectionCap)
+ }
+ e.s = append(e.s, encRtidFn{rtid: rtid})
+ fn = &(e.s[len(e.s)-1]).fn
+ }
+
+ ti := e.h.getTypeInfo(rtid, rt)
+ fi := &(fn.i)
+ fi.e = e
+ fi.ti = ti
+
+ if checkCodecSelfer && ti.cs {
+ fn.f = (*encFnInfo).selferMarshal
+ } else if rtid == rawTypId {
+ fn.f = (*encFnInfo).raw
+ } else if rtid == rawExtTypId {
+ fn.f = (*encFnInfo).rawExt
+ } else if e.e.IsBuiltinType(rtid) {
+ fn.f = (*encFnInfo).builtin
+ } else if xfFn := e.h.getExt(rtid); xfFn != nil {
+ fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
+ fn.f = (*encFnInfo).ext
+ } else if supportMarshalInterfaces && e.be && ti.bm {
+ fn.f = (*encFnInfo).binaryMarshal
+ } else if supportMarshalInterfaces && !e.be && e.js && ti.jm {
+ //If JSON, we should check JSONMarshal before textMarshal
+ fn.f = (*encFnInfo).jsonMarshal
+ } else if supportMarshalInterfaces && !e.be && ti.tm {
+ fn.f = (*encFnInfo).textMarshal
+ } else {
+ rk := rt.Kind()
+ if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) {
+ if rt.PkgPath() == "" { // un-named slice or map
+ if idx := fastpathAV.index(rtid); idx != -1 {
+ fn.f = fastpathAV[idx].encfn
+ }
+ } else {
+ ok = false
+ // use mapping for underlying type if there
+ var rtu reflect.Type
+ if rk == reflect.Map {
+ rtu = reflect.MapOf(rt.Key(), rt.Elem())
+ } else {
+ rtu = reflect.SliceOf(rt.Elem())
+ }
+ rtuid := reflect.ValueOf(rtu).Pointer()
+ if idx := fastpathAV.index(rtuid); idx != -1 {
+ xfnf := fastpathAV[idx].encfn
+ xrt := fastpathAV[idx].rt
+ fn.f = func(xf *encFnInfo, xrv reflect.Value) {
+ xfnf(xf, xrv.Convert(xrt))
+ }
+ }
+ }
+ }
+ if fn.f == nil {
+ switch rk {
+ case reflect.Bool:
+ fn.f = (*encFnInfo).kBool
+ case reflect.String:
+ fn.f = (*encFnInfo).kString
+ case reflect.Float64:
+ fn.f = (*encFnInfo).kFloat64
+ case reflect.Float32:
+ fn.f = (*encFnInfo).kFloat32
+ case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16:
+ fn.f = (*encFnInfo).kInt
+ case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uintptr:
+ fn.f = (*encFnInfo).kUint
+ case reflect.Invalid:
+ fn.f = (*encFnInfo).kInvalid
+ case reflect.Chan:
+ fi.seq = seqTypeChan
+ fn.f = (*encFnInfo).kSlice
+ case reflect.Slice:
+ fi.seq = seqTypeSlice
+ fn.f = (*encFnInfo).kSlice
+ case reflect.Array:
+ fi.seq = seqTypeArray
+ fn.f = (*encFnInfo).kSlice
+ case reflect.Struct:
+ fn.f = (*encFnInfo).kStruct
+ // reflect.Ptr and reflect.Interface are handled already by preEncodeValue
+ // case reflect.Ptr:
+ // fn.f = (*encFnInfo).kPtr
+ // case reflect.Interface:
+ // fn.f = (*encFnInfo).kInterface
+ case reflect.Map:
+ fn.f = (*encFnInfo).kMap
+ default:
+ fn.f = (*encFnInfo).kErr
+ }
+ }
+ }
+
+ return
+}
+
+func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) {
+ if fnerr != nil {
+ panic(fnerr)
+ }
+ if bs == nil {
+ e.e.EncodeNil()
+ } else if asis {
+ e.asis(bs)
+ } else {
+ e.e.EncodeStringBytes(c, bs)
+ }
+}
+
+func (e *Encoder) asis(v []byte) {
+ if e.as == nil {
+ e.w.writeb(v)
+ } else {
+ e.as.EncodeAsis(v)
+ }
+}
+
+func (e *Encoder) raw(vv Raw) {
+ v := []byte(vv)
+ if !e.h.Raw {
+ e.errorf("Raw values cannot be encoded: %v", v)
+ }
+ if e.as == nil {
+ e.w.writeb(v)
+ } else {
+ e.as.EncodeAsis(v)
+ }
+}
+
+func (e *Encoder) errorf(format string, params ...interface{}) {
+ err := fmt.Errorf(format, params...)
+ panic(err)
+}
+
+// ----------------------------------------
+
+const encStructPoolLen = 5
+
+// encStructPool is an array of sync.Pool.
+// Each element of the array pools one of encStructPool(8|16|32|64).
+// It allows the re-use of slices up to 64 in length.
+// A performance cost of encoding structs was collecting
+// which values were empty and should be omitted.
+// We needed slices of reflect.Value and string to collect them.
+// This shared pool reduces the amount of unnecessary creation we do.
+// The cost is that of locking sometimes, but sync.Pool is efficient
+// enough to reduce thread contention.
+var encStructPool [encStructPoolLen]sync.Pool
+
+func init() {
+ encStructPool[0].New = func() interface{} { return new([8]stringRv) }
+ encStructPool[1].New = func() interface{} { return new([16]stringRv) }
+ encStructPool[2].New = func() interface{} { return new([32]stringRv) }
+ encStructPool[3].New = func() interface{} { return new([64]stringRv) }
+ encStructPool[4].New = func() interface{} { return new([128]stringRv) }
+}
+
+func encStructPoolGet(newlen int) (p *sync.Pool, v interface{}, s []stringRv) {
+ // if encStructPoolLen != 5 { // constant chec, so removed at build time.
+ // panic(errors.New("encStructPoolLen must be equal to 4")) // defensive, in case it is changed
+ // }
+ // idxpool := newlen / 8
+ if newlen <= 8 {
+ p = &encStructPool[0]
+ v = p.Get()
+ s = v.(*[8]stringRv)[:newlen]
+ } else if newlen <= 16 {
+ p = &encStructPool[1]
+ v = p.Get()
+ s = v.(*[16]stringRv)[:newlen]
+ } else if newlen <= 32 {
+ p = &encStructPool[2]
+ v = p.Get()
+ s = v.(*[32]stringRv)[:newlen]
+ } else if newlen <= 64 {
+ p = &encStructPool[3]
+ v = p.Get()
+ s = v.(*[64]stringRv)[:newlen]
+ } else if newlen <= 128 {
+ p = &encStructPool[4]
+ v = p.Get()
+ s = v.(*[128]stringRv)[:newlen]
+ } else {
+ s = make([]stringRv, newlen)
+ }
+ return
+}
+
+// ----------------------------------------
+
+// func encErr(format string, params ...interface{}) {
+// doPanic(msgTagEnc, format, params...)
+// }
diff --git a/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/vendor/github.com/ugorji/go/codec/fast-path.generated.go
new file mode 100644
index 000000000..f2e5d2dcf
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/fast-path.generated.go
@@ -0,0 +1,39352 @@
+// +build !notfastpath
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl
+// ************************************************************
+
+package codec
+
+// Fast path functions try to create a fast path encode or decode implementation
+// for common maps and slices.
+//
+// We define the functions and register then in this single file
+// so as not to pollute the encode.go and decode.go, and create a dependency in there.
+// This file can be omitted without causing a build failure.
+//
+// The advantage of fast paths is:
+// - Many calls bypass reflection altogether
+//
+// Currently support
+// - slice of all builtin types,
+// - map of all builtin types to string or interface value
+// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8)
+// This should provide adequate "typical" implementations.
+//
+// Note that fast track decode functions must handle values for which an address cannot be obtained.
+// For example:
+// m2 := map[string]int{}
+// p2 := []interface{}{m2}
+// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
+//
+
+import (
+ "reflect"
+ "sort"
+)
+
+const fastpathEnabled = true
+
+const fastpathCheckNilFalse = false // for reflect
+const fastpathCheckNilTrue = true // for type switch
+
+type fastpathT struct{}
+
+var fastpathTV fastpathT
+
+type fastpathE struct {
+ rtid uintptr
+ rt reflect.Type
+ encfn func(*encFnInfo, reflect.Value)
+ decfn func(*decFnInfo, reflect.Value)
+}
+
+type fastpathA [271]fastpathE
+
+func (x *fastpathA) index(rtid uintptr) int {
+ // use binary search to grab the index (adapted from sort/search.go)
+ h, i, j := 0, 0, 271 // len(x)
+ for i < j {
+ h = i + (j-i)/2
+ if x[h].rtid < rtid {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ if i < 271 && x[i].rtid == rtid {
+ return i
+ }
+ return -1
+}
+
+type fastpathAslice []fastpathE
+
+func (x fastpathAslice) Len() int { return len(x) }
+func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid }
+func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+var fastpathAV fastpathA
+
+// due to possible initialization loop error, make fastpath in an init()
+func init() {
+ i := 0
+ fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) {
+ xrt := reflect.TypeOf(v)
+ xptr := reflect.ValueOf(xrt).Pointer()
+ fastpathAV[i] = fastpathE{xptr, xrt, fe, fd}
+ i++
+ return
+ }
+
+ fn([]interface{}(nil), (*encFnInfo).fastpathEncSliceIntfR, (*decFnInfo).fastpathDecSliceIntfR)
+ fn([]string(nil), (*encFnInfo).fastpathEncSliceStringR, (*decFnInfo).fastpathDecSliceStringR)
+ fn([]float32(nil), (*encFnInfo).fastpathEncSliceFloat32R, (*decFnInfo).fastpathDecSliceFloat32R)
+ fn([]float64(nil), (*encFnInfo).fastpathEncSliceFloat64R, (*decFnInfo).fastpathDecSliceFloat64R)
+ fn([]uint(nil), (*encFnInfo).fastpathEncSliceUintR, (*decFnInfo).fastpathDecSliceUintR)
+ fn([]uint16(nil), (*encFnInfo).fastpathEncSliceUint16R, (*decFnInfo).fastpathDecSliceUint16R)
+ fn([]uint32(nil), (*encFnInfo).fastpathEncSliceUint32R, (*decFnInfo).fastpathDecSliceUint32R)
+ fn([]uint64(nil), (*encFnInfo).fastpathEncSliceUint64R, (*decFnInfo).fastpathDecSliceUint64R)
+ fn([]uintptr(nil), (*encFnInfo).fastpathEncSliceUintptrR, (*decFnInfo).fastpathDecSliceUintptrR)
+ fn([]int(nil), (*encFnInfo).fastpathEncSliceIntR, (*decFnInfo).fastpathDecSliceIntR)
+ fn([]int8(nil), (*encFnInfo).fastpathEncSliceInt8R, (*decFnInfo).fastpathDecSliceInt8R)
+ fn([]int16(nil), (*encFnInfo).fastpathEncSliceInt16R, (*decFnInfo).fastpathDecSliceInt16R)
+ fn([]int32(nil), (*encFnInfo).fastpathEncSliceInt32R, (*decFnInfo).fastpathDecSliceInt32R)
+ fn([]int64(nil), (*encFnInfo).fastpathEncSliceInt64R, (*decFnInfo).fastpathDecSliceInt64R)
+ fn([]bool(nil), (*encFnInfo).fastpathEncSliceBoolR, (*decFnInfo).fastpathDecSliceBoolR)
+
+ fn(map[interface{}]interface{}(nil), (*encFnInfo).fastpathEncMapIntfIntfR, (*decFnInfo).fastpathDecMapIntfIntfR)
+ fn(map[interface{}]string(nil), (*encFnInfo).fastpathEncMapIntfStringR, (*decFnInfo).fastpathDecMapIntfStringR)
+ fn(map[interface{}]uint(nil), (*encFnInfo).fastpathEncMapIntfUintR, (*decFnInfo).fastpathDecMapIntfUintR)
+ fn(map[interface{}]uint8(nil), (*encFnInfo).fastpathEncMapIntfUint8R, (*decFnInfo).fastpathDecMapIntfUint8R)
+ fn(map[interface{}]uint16(nil), (*encFnInfo).fastpathEncMapIntfUint16R, (*decFnInfo).fastpathDecMapIntfUint16R)
+ fn(map[interface{}]uint32(nil), (*encFnInfo).fastpathEncMapIntfUint32R, (*decFnInfo).fastpathDecMapIntfUint32R)
+ fn(map[interface{}]uint64(nil), (*encFnInfo).fastpathEncMapIntfUint64R, (*decFnInfo).fastpathDecMapIntfUint64R)
+ fn(map[interface{}]uintptr(nil), (*encFnInfo).fastpathEncMapIntfUintptrR, (*decFnInfo).fastpathDecMapIntfUintptrR)
+ fn(map[interface{}]int(nil), (*encFnInfo).fastpathEncMapIntfIntR, (*decFnInfo).fastpathDecMapIntfIntR)
+ fn(map[interface{}]int8(nil), (*encFnInfo).fastpathEncMapIntfInt8R, (*decFnInfo).fastpathDecMapIntfInt8R)
+ fn(map[interface{}]int16(nil), (*encFnInfo).fastpathEncMapIntfInt16R, (*decFnInfo).fastpathDecMapIntfInt16R)
+ fn(map[interface{}]int32(nil), (*encFnInfo).fastpathEncMapIntfInt32R, (*decFnInfo).fastpathDecMapIntfInt32R)
+ fn(map[interface{}]int64(nil), (*encFnInfo).fastpathEncMapIntfInt64R, (*decFnInfo).fastpathDecMapIntfInt64R)
+ fn(map[interface{}]float32(nil), (*encFnInfo).fastpathEncMapIntfFloat32R, (*decFnInfo).fastpathDecMapIntfFloat32R)
+ fn(map[interface{}]float64(nil), (*encFnInfo).fastpathEncMapIntfFloat64R, (*decFnInfo).fastpathDecMapIntfFloat64R)
+ fn(map[interface{}]bool(nil), (*encFnInfo).fastpathEncMapIntfBoolR, (*decFnInfo).fastpathDecMapIntfBoolR)
+ fn(map[string]interface{}(nil), (*encFnInfo).fastpathEncMapStringIntfR, (*decFnInfo).fastpathDecMapStringIntfR)
+ fn(map[string]string(nil), (*encFnInfo).fastpathEncMapStringStringR, (*decFnInfo).fastpathDecMapStringStringR)
+ fn(map[string]uint(nil), (*encFnInfo).fastpathEncMapStringUintR, (*decFnInfo).fastpathDecMapStringUintR)
+ fn(map[string]uint8(nil), (*encFnInfo).fastpathEncMapStringUint8R, (*decFnInfo).fastpathDecMapStringUint8R)
+ fn(map[string]uint16(nil), (*encFnInfo).fastpathEncMapStringUint16R, (*decFnInfo).fastpathDecMapStringUint16R)
+ fn(map[string]uint32(nil), (*encFnInfo).fastpathEncMapStringUint32R, (*decFnInfo).fastpathDecMapStringUint32R)
+ fn(map[string]uint64(nil), (*encFnInfo).fastpathEncMapStringUint64R, (*decFnInfo).fastpathDecMapStringUint64R)
+ fn(map[string]uintptr(nil), (*encFnInfo).fastpathEncMapStringUintptrR, (*decFnInfo).fastpathDecMapStringUintptrR)
+ fn(map[string]int(nil), (*encFnInfo).fastpathEncMapStringIntR, (*decFnInfo).fastpathDecMapStringIntR)
+ fn(map[string]int8(nil), (*encFnInfo).fastpathEncMapStringInt8R, (*decFnInfo).fastpathDecMapStringInt8R)
+ fn(map[string]int16(nil), (*encFnInfo).fastpathEncMapStringInt16R, (*decFnInfo).fastpathDecMapStringInt16R)
+ fn(map[string]int32(nil), (*encFnInfo).fastpathEncMapStringInt32R, (*decFnInfo).fastpathDecMapStringInt32R)
+ fn(map[string]int64(nil), (*encFnInfo).fastpathEncMapStringInt64R, (*decFnInfo).fastpathDecMapStringInt64R)
+ fn(map[string]float32(nil), (*encFnInfo).fastpathEncMapStringFloat32R, (*decFnInfo).fastpathDecMapStringFloat32R)
+ fn(map[string]float64(nil), (*encFnInfo).fastpathEncMapStringFloat64R, (*decFnInfo).fastpathDecMapStringFloat64R)
+ fn(map[string]bool(nil), (*encFnInfo).fastpathEncMapStringBoolR, (*decFnInfo).fastpathDecMapStringBoolR)
+ fn(map[float32]interface{}(nil), (*encFnInfo).fastpathEncMapFloat32IntfR, (*decFnInfo).fastpathDecMapFloat32IntfR)
+ fn(map[float32]string(nil), (*encFnInfo).fastpathEncMapFloat32StringR, (*decFnInfo).fastpathDecMapFloat32StringR)
+ fn(map[float32]uint(nil), (*encFnInfo).fastpathEncMapFloat32UintR, (*decFnInfo).fastpathDecMapFloat32UintR)
+ fn(map[float32]uint8(nil), (*encFnInfo).fastpathEncMapFloat32Uint8R, (*decFnInfo).fastpathDecMapFloat32Uint8R)
+ fn(map[float32]uint16(nil), (*encFnInfo).fastpathEncMapFloat32Uint16R, (*decFnInfo).fastpathDecMapFloat32Uint16R)
+ fn(map[float32]uint32(nil), (*encFnInfo).fastpathEncMapFloat32Uint32R, (*decFnInfo).fastpathDecMapFloat32Uint32R)
+ fn(map[float32]uint64(nil), (*encFnInfo).fastpathEncMapFloat32Uint64R, (*decFnInfo).fastpathDecMapFloat32Uint64R)
+ fn(map[float32]uintptr(nil), (*encFnInfo).fastpathEncMapFloat32UintptrR, (*decFnInfo).fastpathDecMapFloat32UintptrR)
+ fn(map[float32]int(nil), (*encFnInfo).fastpathEncMapFloat32IntR, (*decFnInfo).fastpathDecMapFloat32IntR)
+ fn(map[float32]int8(nil), (*encFnInfo).fastpathEncMapFloat32Int8R, (*decFnInfo).fastpathDecMapFloat32Int8R)
+ fn(map[float32]int16(nil), (*encFnInfo).fastpathEncMapFloat32Int16R, (*decFnInfo).fastpathDecMapFloat32Int16R)
+ fn(map[float32]int32(nil), (*encFnInfo).fastpathEncMapFloat32Int32R, (*decFnInfo).fastpathDecMapFloat32Int32R)
+ fn(map[float32]int64(nil), (*encFnInfo).fastpathEncMapFloat32Int64R, (*decFnInfo).fastpathDecMapFloat32Int64R)
+ fn(map[float32]float32(nil), (*encFnInfo).fastpathEncMapFloat32Float32R, (*decFnInfo).fastpathDecMapFloat32Float32R)
+ fn(map[float32]float64(nil), (*encFnInfo).fastpathEncMapFloat32Float64R, (*decFnInfo).fastpathDecMapFloat32Float64R)
+ fn(map[float32]bool(nil), (*encFnInfo).fastpathEncMapFloat32BoolR, (*decFnInfo).fastpathDecMapFloat32BoolR)
+ fn(map[float64]interface{}(nil), (*encFnInfo).fastpathEncMapFloat64IntfR, (*decFnInfo).fastpathDecMapFloat64IntfR)
+ fn(map[float64]string(nil), (*encFnInfo).fastpathEncMapFloat64StringR, (*decFnInfo).fastpathDecMapFloat64StringR)
+ fn(map[float64]uint(nil), (*encFnInfo).fastpathEncMapFloat64UintR, (*decFnInfo).fastpathDecMapFloat64UintR)
+ fn(map[float64]uint8(nil), (*encFnInfo).fastpathEncMapFloat64Uint8R, (*decFnInfo).fastpathDecMapFloat64Uint8R)
+ fn(map[float64]uint16(nil), (*encFnInfo).fastpathEncMapFloat64Uint16R, (*decFnInfo).fastpathDecMapFloat64Uint16R)
+ fn(map[float64]uint32(nil), (*encFnInfo).fastpathEncMapFloat64Uint32R, (*decFnInfo).fastpathDecMapFloat64Uint32R)
+ fn(map[float64]uint64(nil), (*encFnInfo).fastpathEncMapFloat64Uint64R, (*decFnInfo).fastpathDecMapFloat64Uint64R)
+ fn(map[float64]uintptr(nil), (*encFnInfo).fastpathEncMapFloat64UintptrR, (*decFnInfo).fastpathDecMapFloat64UintptrR)
+ fn(map[float64]int(nil), (*encFnInfo).fastpathEncMapFloat64IntR, (*decFnInfo).fastpathDecMapFloat64IntR)
+ fn(map[float64]int8(nil), (*encFnInfo).fastpathEncMapFloat64Int8R, (*decFnInfo).fastpathDecMapFloat64Int8R)
+ fn(map[float64]int16(nil), (*encFnInfo).fastpathEncMapFloat64Int16R, (*decFnInfo).fastpathDecMapFloat64Int16R)
+ fn(map[float64]int32(nil), (*encFnInfo).fastpathEncMapFloat64Int32R, (*decFnInfo).fastpathDecMapFloat64Int32R)
+ fn(map[float64]int64(nil), (*encFnInfo).fastpathEncMapFloat64Int64R, (*decFnInfo).fastpathDecMapFloat64Int64R)
+ fn(map[float64]float32(nil), (*encFnInfo).fastpathEncMapFloat64Float32R, (*decFnInfo).fastpathDecMapFloat64Float32R)
+ fn(map[float64]float64(nil), (*encFnInfo).fastpathEncMapFloat64Float64R, (*decFnInfo).fastpathDecMapFloat64Float64R)
+ fn(map[float64]bool(nil), (*encFnInfo).fastpathEncMapFloat64BoolR, (*decFnInfo).fastpathDecMapFloat64BoolR)
+ fn(map[uint]interface{}(nil), (*encFnInfo).fastpathEncMapUintIntfR, (*decFnInfo).fastpathDecMapUintIntfR)
+ fn(map[uint]string(nil), (*encFnInfo).fastpathEncMapUintStringR, (*decFnInfo).fastpathDecMapUintStringR)
+ fn(map[uint]uint(nil), (*encFnInfo).fastpathEncMapUintUintR, (*decFnInfo).fastpathDecMapUintUintR)
+ fn(map[uint]uint8(nil), (*encFnInfo).fastpathEncMapUintUint8R, (*decFnInfo).fastpathDecMapUintUint8R)
+ fn(map[uint]uint16(nil), (*encFnInfo).fastpathEncMapUintUint16R, (*decFnInfo).fastpathDecMapUintUint16R)
+ fn(map[uint]uint32(nil), (*encFnInfo).fastpathEncMapUintUint32R, (*decFnInfo).fastpathDecMapUintUint32R)
+ fn(map[uint]uint64(nil), (*encFnInfo).fastpathEncMapUintUint64R, (*decFnInfo).fastpathDecMapUintUint64R)
+ fn(map[uint]uintptr(nil), (*encFnInfo).fastpathEncMapUintUintptrR, (*decFnInfo).fastpathDecMapUintUintptrR)
+ fn(map[uint]int(nil), (*encFnInfo).fastpathEncMapUintIntR, (*decFnInfo).fastpathDecMapUintIntR)
+ fn(map[uint]int8(nil), (*encFnInfo).fastpathEncMapUintInt8R, (*decFnInfo).fastpathDecMapUintInt8R)
+ fn(map[uint]int16(nil), (*encFnInfo).fastpathEncMapUintInt16R, (*decFnInfo).fastpathDecMapUintInt16R)
+ fn(map[uint]int32(nil), (*encFnInfo).fastpathEncMapUintInt32R, (*decFnInfo).fastpathDecMapUintInt32R)
+ fn(map[uint]int64(nil), (*encFnInfo).fastpathEncMapUintInt64R, (*decFnInfo).fastpathDecMapUintInt64R)
+ fn(map[uint]float32(nil), (*encFnInfo).fastpathEncMapUintFloat32R, (*decFnInfo).fastpathDecMapUintFloat32R)
+ fn(map[uint]float64(nil), (*encFnInfo).fastpathEncMapUintFloat64R, (*decFnInfo).fastpathDecMapUintFloat64R)
+ fn(map[uint]bool(nil), (*encFnInfo).fastpathEncMapUintBoolR, (*decFnInfo).fastpathDecMapUintBoolR)
+ fn(map[uint8]interface{}(nil), (*encFnInfo).fastpathEncMapUint8IntfR, (*decFnInfo).fastpathDecMapUint8IntfR)
+ fn(map[uint8]string(nil), (*encFnInfo).fastpathEncMapUint8StringR, (*decFnInfo).fastpathDecMapUint8StringR)
+ fn(map[uint8]uint(nil), (*encFnInfo).fastpathEncMapUint8UintR, (*decFnInfo).fastpathDecMapUint8UintR)
+ fn(map[uint8]uint8(nil), (*encFnInfo).fastpathEncMapUint8Uint8R, (*decFnInfo).fastpathDecMapUint8Uint8R)
+ fn(map[uint8]uint16(nil), (*encFnInfo).fastpathEncMapUint8Uint16R, (*decFnInfo).fastpathDecMapUint8Uint16R)
+ fn(map[uint8]uint32(nil), (*encFnInfo).fastpathEncMapUint8Uint32R, (*decFnInfo).fastpathDecMapUint8Uint32R)
+ fn(map[uint8]uint64(nil), (*encFnInfo).fastpathEncMapUint8Uint64R, (*decFnInfo).fastpathDecMapUint8Uint64R)
+ fn(map[uint8]uintptr(nil), (*encFnInfo).fastpathEncMapUint8UintptrR, (*decFnInfo).fastpathDecMapUint8UintptrR)
+ fn(map[uint8]int(nil), (*encFnInfo).fastpathEncMapUint8IntR, (*decFnInfo).fastpathDecMapUint8IntR)
+ fn(map[uint8]int8(nil), (*encFnInfo).fastpathEncMapUint8Int8R, (*decFnInfo).fastpathDecMapUint8Int8R)
+ fn(map[uint8]int16(nil), (*encFnInfo).fastpathEncMapUint8Int16R, (*decFnInfo).fastpathDecMapUint8Int16R)
+ fn(map[uint8]int32(nil), (*encFnInfo).fastpathEncMapUint8Int32R, (*decFnInfo).fastpathDecMapUint8Int32R)
+ fn(map[uint8]int64(nil), (*encFnInfo).fastpathEncMapUint8Int64R, (*decFnInfo).fastpathDecMapUint8Int64R)
+ fn(map[uint8]float32(nil), (*encFnInfo).fastpathEncMapUint8Float32R, (*decFnInfo).fastpathDecMapUint8Float32R)
+ fn(map[uint8]float64(nil), (*encFnInfo).fastpathEncMapUint8Float64R, (*decFnInfo).fastpathDecMapUint8Float64R)
+ fn(map[uint8]bool(nil), (*encFnInfo).fastpathEncMapUint8BoolR, (*decFnInfo).fastpathDecMapUint8BoolR)
+ fn(map[uint16]interface{}(nil), (*encFnInfo).fastpathEncMapUint16IntfR, (*decFnInfo).fastpathDecMapUint16IntfR)
+ fn(map[uint16]string(nil), (*encFnInfo).fastpathEncMapUint16StringR, (*decFnInfo).fastpathDecMapUint16StringR)
+ fn(map[uint16]uint(nil), (*encFnInfo).fastpathEncMapUint16UintR, (*decFnInfo).fastpathDecMapUint16UintR)
+ fn(map[uint16]uint8(nil), (*encFnInfo).fastpathEncMapUint16Uint8R, (*decFnInfo).fastpathDecMapUint16Uint8R)
+ fn(map[uint16]uint16(nil), (*encFnInfo).fastpathEncMapUint16Uint16R, (*decFnInfo).fastpathDecMapUint16Uint16R)
+ fn(map[uint16]uint32(nil), (*encFnInfo).fastpathEncMapUint16Uint32R, (*decFnInfo).fastpathDecMapUint16Uint32R)
+ fn(map[uint16]uint64(nil), (*encFnInfo).fastpathEncMapUint16Uint64R, (*decFnInfo).fastpathDecMapUint16Uint64R)
+ fn(map[uint16]uintptr(nil), (*encFnInfo).fastpathEncMapUint16UintptrR, (*decFnInfo).fastpathDecMapUint16UintptrR)
+ fn(map[uint16]int(nil), (*encFnInfo).fastpathEncMapUint16IntR, (*decFnInfo).fastpathDecMapUint16IntR)
+ fn(map[uint16]int8(nil), (*encFnInfo).fastpathEncMapUint16Int8R, (*decFnInfo).fastpathDecMapUint16Int8R)
+ fn(map[uint16]int16(nil), (*encFnInfo).fastpathEncMapUint16Int16R, (*decFnInfo).fastpathDecMapUint16Int16R)
+ fn(map[uint16]int32(nil), (*encFnInfo).fastpathEncMapUint16Int32R, (*decFnInfo).fastpathDecMapUint16Int32R)
+ fn(map[uint16]int64(nil), (*encFnInfo).fastpathEncMapUint16Int64R, (*decFnInfo).fastpathDecMapUint16Int64R)
+ fn(map[uint16]float32(nil), (*encFnInfo).fastpathEncMapUint16Float32R, (*decFnInfo).fastpathDecMapUint16Float32R)
+ fn(map[uint16]float64(nil), (*encFnInfo).fastpathEncMapUint16Float64R, (*decFnInfo).fastpathDecMapUint16Float64R)
+ fn(map[uint16]bool(nil), (*encFnInfo).fastpathEncMapUint16BoolR, (*decFnInfo).fastpathDecMapUint16BoolR)
+ fn(map[uint32]interface{}(nil), (*encFnInfo).fastpathEncMapUint32IntfR, (*decFnInfo).fastpathDecMapUint32IntfR)
+ fn(map[uint32]string(nil), (*encFnInfo).fastpathEncMapUint32StringR, (*decFnInfo).fastpathDecMapUint32StringR)
+ fn(map[uint32]uint(nil), (*encFnInfo).fastpathEncMapUint32UintR, (*decFnInfo).fastpathDecMapUint32UintR)
+ fn(map[uint32]uint8(nil), (*encFnInfo).fastpathEncMapUint32Uint8R, (*decFnInfo).fastpathDecMapUint32Uint8R)
+ fn(map[uint32]uint16(nil), (*encFnInfo).fastpathEncMapUint32Uint16R, (*decFnInfo).fastpathDecMapUint32Uint16R)
+ fn(map[uint32]uint32(nil), (*encFnInfo).fastpathEncMapUint32Uint32R, (*decFnInfo).fastpathDecMapUint32Uint32R)
+ fn(map[uint32]uint64(nil), (*encFnInfo).fastpathEncMapUint32Uint64R, (*decFnInfo).fastpathDecMapUint32Uint64R)
+ fn(map[uint32]uintptr(nil), (*encFnInfo).fastpathEncMapUint32UintptrR, (*decFnInfo).fastpathDecMapUint32UintptrR)
+ fn(map[uint32]int(nil), (*encFnInfo).fastpathEncMapUint32IntR, (*decFnInfo).fastpathDecMapUint32IntR)
+ fn(map[uint32]int8(nil), (*encFnInfo).fastpathEncMapUint32Int8R, (*decFnInfo).fastpathDecMapUint32Int8R)
+ fn(map[uint32]int16(nil), (*encFnInfo).fastpathEncMapUint32Int16R, (*decFnInfo).fastpathDecMapUint32Int16R)
+ fn(map[uint32]int32(nil), (*encFnInfo).fastpathEncMapUint32Int32R, (*decFnInfo).fastpathDecMapUint32Int32R)
+ fn(map[uint32]int64(nil), (*encFnInfo).fastpathEncMapUint32Int64R, (*decFnInfo).fastpathDecMapUint32Int64R)
+ fn(map[uint32]float32(nil), (*encFnInfo).fastpathEncMapUint32Float32R, (*decFnInfo).fastpathDecMapUint32Float32R)
+ fn(map[uint32]float64(nil), (*encFnInfo).fastpathEncMapUint32Float64R, (*decFnInfo).fastpathDecMapUint32Float64R)
+ fn(map[uint32]bool(nil), (*encFnInfo).fastpathEncMapUint32BoolR, (*decFnInfo).fastpathDecMapUint32BoolR)
+ fn(map[uint64]interface{}(nil), (*encFnInfo).fastpathEncMapUint64IntfR, (*decFnInfo).fastpathDecMapUint64IntfR)
+ fn(map[uint64]string(nil), (*encFnInfo).fastpathEncMapUint64StringR, (*decFnInfo).fastpathDecMapUint64StringR)
+ fn(map[uint64]uint(nil), (*encFnInfo).fastpathEncMapUint64UintR, (*decFnInfo).fastpathDecMapUint64UintR)
+ fn(map[uint64]uint8(nil), (*encFnInfo).fastpathEncMapUint64Uint8R, (*decFnInfo).fastpathDecMapUint64Uint8R)
+ fn(map[uint64]uint16(nil), (*encFnInfo).fastpathEncMapUint64Uint16R, (*decFnInfo).fastpathDecMapUint64Uint16R)
+ fn(map[uint64]uint32(nil), (*encFnInfo).fastpathEncMapUint64Uint32R, (*decFnInfo).fastpathDecMapUint64Uint32R)
+ fn(map[uint64]uint64(nil), (*encFnInfo).fastpathEncMapUint64Uint64R, (*decFnInfo).fastpathDecMapUint64Uint64R)
+ fn(map[uint64]uintptr(nil), (*encFnInfo).fastpathEncMapUint64UintptrR, (*decFnInfo).fastpathDecMapUint64UintptrR)
+ fn(map[uint64]int(nil), (*encFnInfo).fastpathEncMapUint64IntR, (*decFnInfo).fastpathDecMapUint64IntR)
+ fn(map[uint64]int8(nil), (*encFnInfo).fastpathEncMapUint64Int8R, (*decFnInfo).fastpathDecMapUint64Int8R)
+ fn(map[uint64]int16(nil), (*encFnInfo).fastpathEncMapUint64Int16R, (*decFnInfo).fastpathDecMapUint64Int16R)
+ fn(map[uint64]int32(nil), (*encFnInfo).fastpathEncMapUint64Int32R, (*decFnInfo).fastpathDecMapUint64Int32R)
+ fn(map[uint64]int64(nil), (*encFnInfo).fastpathEncMapUint64Int64R, (*decFnInfo).fastpathDecMapUint64Int64R)
+ fn(map[uint64]float32(nil), (*encFnInfo).fastpathEncMapUint64Float32R, (*decFnInfo).fastpathDecMapUint64Float32R)
+ fn(map[uint64]float64(nil), (*encFnInfo).fastpathEncMapUint64Float64R, (*decFnInfo).fastpathDecMapUint64Float64R)
+ fn(map[uint64]bool(nil), (*encFnInfo).fastpathEncMapUint64BoolR, (*decFnInfo).fastpathDecMapUint64BoolR)
+ fn(map[uintptr]interface{}(nil), (*encFnInfo).fastpathEncMapUintptrIntfR, (*decFnInfo).fastpathDecMapUintptrIntfR)
+ fn(map[uintptr]string(nil), (*encFnInfo).fastpathEncMapUintptrStringR, (*decFnInfo).fastpathDecMapUintptrStringR)
+ fn(map[uintptr]uint(nil), (*encFnInfo).fastpathEncMapUintptrUintR, (*decFnInfo).fastpathDecMapUintptrUintR)
+ fn(map[uintptr]uint8(nil), (*encFnInfo).fastpathEncMapUintptrUint8R, (*decFnInfo).fastpathDecMapUintptrUint8R)
+ fn(map[uintptr]uint16(nil), (*encFnInfo).fastpathEncMapUintptrUint16R, (*decFnInfo).fastpathDecMapUintptrUint16R)
+ fn(map[uintptr]uint32(nil), (*encFnInfo).fastpathEncMapUintptrUint32R, (*decFnInfo).fastpathDecMapUintptrUint32R)
+ fn(map[uintptr]uint64(nil), (*encFnInfo).fastpathEncMapUintptrUint64R, (*decFnInfo).fastpathDecMapUintptrUint64R)
+ fn(map[uintptr]uintptr(nil), (*encFnInfo).fastpathEncMapUintptrUintptrR, (*decFnInfo).fastpathDecMapUintptrUintptrR)
+ fn(map[uintptr]int(nil), (*encFnInfo).fastpathEncMapUintptrIntR, (*decFnInfo).fastpathDecMapUintptrIntR)
+ fn(map[uintptr]int8(nil), (*encFnInfo).fastpathEncMapUintptrInt8R, (*decFnInfo).fastpathDecMapUintptrInt8R)
+ fn(map[uintptr]int16(nil), (*encFnInfo).fastpathEncMapUintptrInt16R, (*decFnInfo).fastpathDecMapUintptrInt16R)
+ fn(map[uintptr]int32(nil), (*encFnInfo).fastpathEncMapUintptrInt32R, (*decFnInfo).fastpathDecMapUintptrInt32R)
+ fn(map[uintptr]int64(nil), (*encFnInfo).fastpathEncMapUintptrInt64R, (*decFnInfo).fastpathDecMapUintptrInt64R)
+ fn(map[uintptr]float32(nil), (*encFnInfo).fastpathEncMapUintptrFloat32R, (*decFnInfo).fastpathDecMapUintptrFloat32R)
+ fn(map[uintptr]float64(nil), (*encFnInfo).fastpathEncMapUintptrFloat64R, (*decFnInfo).fastpathDecMapUintptrFloat64R)
+ fn(map[uintptr]bool(nil), (*encFnInfo).fastpathEncMapUintptrBoolR, (*decFnInfo).fastpathDecMapUintptrBoolR)
+ fn(map[int]interface{}(nil), (*encFnInfo).fastpathEncMapIntIntfR, (*decFnInfo).fastpathDecMapIntIntfR)
+ fn(map[int]string(nil), (*encFnInfo).fastpathEncMapIntStringR, (*decFnInfo).fastpathDecMapIntStringR)
+ fn(map[int]uint(nil), (*encFnInfo).fastpathEncMapIntUintR, (*decFnInfo).fastpathDecMapIntUintR)
+ fn(map[int]uint8(nil), (*encFnInfo).fastpathEncMapIntUint8R, (*decFnInfo).fastpathDecMapIntUint8R)
+ fn(map[int]uint16(nil), (*encFnInfo).fastpathEncMapIntUint16R, (*decFnInfo).fastpathDecMapIntUint16R)
+ fn(map[int]uint32(nil), (*encFnInfo).fastpathEncMapIntUint32R, (*decFnInfo).fastpathDecMapIntUint32R)
+ fn(map[int]uint64(nil), (*encFnInfo).fastpathEncMapIntUint64R, (*decFnInfo).fastpathDecMapIntUint64R)
+ fn(map[int]uintptr(nil), (*encFnInfo).fastpathEncMapIntUintptrR, (*decFnInfo).fastpathDecMapIntUintptrR)
+ fn(map[int]int(nil), (*encFnInfo).fastpathEncMapIntIntR, (*decFnInfo).fastpathDecMapIntIntR)
+ fn(map[int]int8(nil), (*encFnInfo).fastpathEncMapIntInt8R, (*decFnInfo).fastpathDecMapIntInt8R)
+ fn(map[int]int16(nil), (*encFnInfo).fastpathEncMapIntInt16R, (*decFnInfo).fastpathDecMapIntInt16R)
+ fn(map[int]int32(nil), (*encFnInfo).fastpathEncMapIntInt32R, (*decFnInfo).fastpathDecMapIntInt32R)
+ fn(map[int]int64(nil), (*encFnInfo).fastpathEncMapIntInt64R, (*decFnInfo).fastpathDecMapIntInt64R)
+ fn(map[int]float32(nil), (*encFnInfo).fastpathEncMapIntFloat32R, (*decFnInfo).fastpathDecMapIntFloat32R)
+ fn(map[int]float64(nil), (*encFnInfo).fastpathEncMapIntFloat64R, (*decFnInfo).fastpathDecMapIntFloat64R)
+ fn(map[int]bool(nil), (*encFnInfo).fastpathEncMapIntBoolR, (*decFnInfo).fastpathDecMapIntBoolR)
+ fn(map[int8]interface{}(nil), (*encFnInfo).fastpathEncMapInt8IntfR, (*decFnInfo).fastpathDecMapInt8IntfR)
+ fn(map[int8]string(nil), (*encFnInfo).fastpathEncMapInt8StringR, (*decFnInfo).fastpathDecMapInt8StringR)
+ fn(map[int8]uint(nil), (*encFnInfo).fastpathEncMapInt8UintR, (*decFnInfo).fastpathDecMapInt8UintR)
+ fn(map[int8]uint8(nil), (*encFnInfo).fastpathEncMapInt8Uint8R, (*decFnInfo).fastpathDecMapInt8Uint8R)
+ fn(map[int8]uint16(nil), (*encFnInfo).fastpathEncMapInt8Uint16R, (*decFnInfo).fastpathDecMapInt8Uint16R)
+ fn(map[int8]uint32(nil), (*encFnInfo).fastpathEncMapInt8Uint32R, (*decFnInfo).fastpathDecMapInt8Uint32R)
+ fn(map[int8]uint64(nil), (*encFnInfo).fastpathEncMapInt8Uint64R, (*decFnInfo).fastpathDecMapInt8Uint64R)
+ fn(map[int8]uintptr(nil), (*encFnInfo).fastpathEncMapInt8UintptrR, (*decFnInfo).fastpathDecMapInt8UintptrR)
+ fn(map[int8]int(nil), (*encFnInfo).fastpathEncMapInt8IntR, (*decFnInfo).fastpathDecMapInt8IntR)
+ fn(map[int8]int8(nil), (*encFnInfo).fastpathEncMapInt8Int8R, (*decFnInfo).fastpathDecMapInt8Int8R)
+ fn(map[int8]int16(nil), (*encFnInfo).fastpathEncMapInt8Int16R, (*decFnInfo).fastpathDecMapInt8Int16R)
+ fn(map[int8]int32(nil), (*encFnInfo).fastpathEncMapInt8Int32R, (*decFnInfo).fastpathDecMapInt8Int32R)
+ fn(map[int8]int64(nil), (*encFnInfo).fastpathEncMapInt8Int64R, (*decFnInfo).fastpathDecMapInt8Int64R)
+ fn(map[int8]float32(nil), (*encFnInfo).fastpathEncMapInt8Float32R, (*decFnInfo).fastpathDecMapInt8Float32R)
+ fn(map[int8]float64(nil), (*encFnInfo).fastpathEncMapInt8Float64R, (*decFnInfo).fastpathDecMapInt8Float64R)
+ fn(map[int8]bool(nil), (*encFnInfo).fastpathEncMapInt8BoolR, (*decFnInfo).fastpathDecMapInt8BoolR)
+ fn(map[int16]interface{}(nil), (*encFnInfo).fastpathEncMapInt16IntfR, (*decFnInfo).fastpathDecMapInt16IntfR)
+ fn(map[int16]string(nil), (*encFnInfo).fastpathEncMapInt16StringR, (*decFnInfo).fastpathDecMapInt16StringR)
+ fn(map[int16]uint(nil), (*encFnInfo).fastpathEncMapInt16UintR, (*decFnInfo).fastpathDecMapInt16UintR)
+ fn(map[int16]uint8(nil), (*encFnInfo).fastpathEncMapInt16Uint8R, (*decFnInfo).fastpathDecMapInt16Uint8R)
+ fn(map[int16]uint16(nil), (*encFnInfo).fastpathEncMapInt16Uint16R, (*decFnInfo).fastpathDecMapInt16Uint16R)
+ fn(map[int16]uint32(nil), (*encFnInfo).fastpathEncMapInt16Uint32R, (*decFnInfo).fastpathDecMapInt16Uint32R)
+ fn(map[int16]uint64(nil), (*encFnInfo).fastpathEncMapInt16Uint64R, (*decFnInfo).fastpathDecMapInt16Uint64R)
+ fn(map[int16]uintptr(nil), (*encFnInfo).fastpathEncMapInt16UintptrR, (*decFnInfo).fastpathDecMapInt16UintptrR)
+ fn(map[int16]int(nil), (*encFnInfo).fastpathEncMapInt16IntR, (*decFnInfo).fastpathDecMapInt16IntR)
+ fn(map[int16]int8(nil), (*encFnInfo).fastpathEncMapInt16Int8R, (*decFnInfo).fastpathDecMapInt16Int8R)
+ fn(map[int16]int16(nil), (*encFnInfo).fastpathEncMapInt16Int16R, (*decFnInfo).fastpathDecMapInt16Int16R)
+ fn(map[int16]int32(nil), (*encFnInfo).fastpathEncMapInt16Int32R, (*decFnInfo).fastpathDecMapInt16Int32R)
+ fn(map[int16]int64(nil), (*encFnInfo).fastpathEncMapInt16Int64R, (*decFnInfo).fastpathDecMapInt16Int64R)
+ fn(map[int16]float32(nil), (*encFnInfo).fastpathEncMapInt16Float32R, (*decFnInfo).fastpathDecMapInt16Float32R)
+ fn(map[int16]float64(nil), (*encFnInfo).fastpathEncMapInt16Float64R, (*decFnInfo).fastpathDecMapInt16Float64R)
+ fn(map[int16]bool(nil), (*encFnInfo).fastpathEncMapInt16BoolR, (*decFnInfo).fastpathDecMapInt16BoolR)
+ fn(map[int32]interface{}(nil), (*encFnInfo).fastpathEncMapInt32IntfR, (*decFnInfo).fastpathDecMapInt32IntfR)
+ fn(map[int32]string(nil), (*encFnInfo).fastpathEncMapInt32StringR, (*decFnInfo).fastpathDecMapInt32StringR)
+ fn(map[int32]uint(nil), (*encFnInfo).fastpathEncMapInt32UintR, (*decFnInfo).fastpathDecMapInt32UintR)
+ fn(map[int32]uint8(nil), (*encFnInfo).fastpathEncMapInt32Uint8R, (*decFnInfo).fastpathDecMapInt32Uint8R)
+ fn(map[int32]uint16(nil), (*encFnInfo).fastpathEncMapInt32Uint16R, (*decFnInfo).fastpathDecMapInt32Uint16R)
+ fn(map[int32]uint32(nil), (*encFnInfo).fastpathEncMapInt32Uint32R, (*decFnInfo).fastpathDecMapInt32Uint32R)
+ fn(map[int32]uint64(nil), (*encFnInfo).fastpathEncMapInt32Uint64R, (*decFnInfo).fastpathDecMapInt32Uint64R)
+ fn(map[int32]uintptr(nil), (*encFnInfo).fastpathEncMapInt32UintptrR, (*decFnInfo).fastpathDecMapInt32UintptrR)
+ fn(map[int32]int(nil), (*encFnInfo).fastpathEncMapInt32IntR, (*decFnInfo).fastpathDecMapInt32IntR)
+ fn(map[int32]int8(nil), (*encFnInfo).fastpathEncMapInt32Int8R, (*decFnInfo).fastpathDecMapInt32Int8R)
+ fn(map[int32]int16(nil), (*encFnInfo).fastpathEncMapInt32Int16R, (*decFnInfo).fastpathDecMapInt32Int16R)
+ fn(map[int32]int32(nil), (*encFnInfo).fastpathEncMapInt32Int32R, (*decFnInfo).fastpathDecMapInt32Int32R)
+ fn(map[int32]int64(nil), (*encFnInfo).fastpathEncMapInt32Int64R, (*decFnInfo).fastpathDecMapInt32Int64R)
+ fn(map[int32]float32(nil), (*encFnInfo).fastpathEncMapInt32Float32R, (*decFnInfo).fastpathDecMapInt32Float32R)
+ fn(map[int32]float64(nil), (*encFnInfo).fastpathEncMapInt32Float64R, (*decFnInfo).fastpathDecMapInt32Float64R)
+ fn(map[int32]bool(nil), (*encFnInfo).fastpathEncMapInt32BoolR, (*decFnInfo).fastpathDecMapInt32BoolR)
+ fn(map[int64]interface{}(nil), (*encFnInfo).fastpathEncMapInt64IntfR, (*decFnInfo).fastpathDecMapInt64IntfR)
+ fn(map[int64]string(nil), (*encFnInfo).fastpathEncMapInt64StringR, (*decFnInfo).fastpathDecMapInt64StringR)
+ fn(map[int64]uint(nil), (*encFnInfo).fastpathEncMapInt64UintR, (*decFnInfo).fastpathDecMapInt64UintR)
+ fn(map[int64]uint8(nil), (*encFnInfo).fastpathEncMapInt64Uint8R, (*decFnInfo).fastpathDecMapInt64Uint8R)
+ fn(map[int64]uint16(nil), (*encFnInfo).fastpathEncMapInt64Uint16R, (*decFnInfo).fastpathDecMapInt64Uint16R)
+ fn(map[int64]uint32(nil), (*encFnInfo).fastpathEncMapInt64Uint32R, (*decFnInfo).fastpathDecMapInt64Uint32R)
+ fn(map[int64]uint64(nil), (*encFnInfo).fastpathEncMapInt64Uint64R, (*decFnInfo).fastpathDecMapInt64Uint64R)
+ fn(map[int64]uintptr(nil), (*encFnInfo).fastpathEncMapInt64UintptrR, (*decFnInfo).fastpathDecMapInt64UintptrR)
+ fn(map[int64]int(nil), (*encFnInfo).fastpathEncMapInt64IntR, (*decFnInfo).fastpathDecMapInt64IntR)
+ fn(map[int64]int8(nil), (*encFnInfo).fastpathEncMapInt64Int8R, (*decFnInfo).fastpathDecMapInt64Int8R)
+ fn(map[int64]int16(nil), (*encFnInfo).fastpathEncMapInt64Int16R, (*decFnInfo).fastpathDecMapInt64Int16R)
+ fn(map[int64]int32(nil), (*encFnInfo).fastpathEncMapInt64Int32R, (*decFnInfo).fastpathDecMapInt64Int32R)
+ fn(map[int64]int64(nil), (*encFnInfo).fastpathEncMapInt64Int64R, (*decFnInfo).fastpathDecMapInt64Int64R)
+ fn(map[int64]float32(nil), (*encFnInfo).fastpathEncMapInt64Float32R, (*decFnInfo).fastpathDecMapInt64Float32R)
+ fn(map[int64]float64(nil), (*encFnInfo).fastpathEncMapInt64Float64R, (*decFnInfo).fastpathDecMapInt64Float64R)
+ fn(map[int64]bool(nil), (*encFnInfo).fastpathEncMapInt64BoolR, (*decFnInfo).fastpathDecMapInt64BoolR)
+ fn(map[bool]interface{}(nil), (*encFnInfo).fastpathEncMapBoolIntfR, (*decFnInfo).fastpathDecMapBoolIntfR)
+ fn(map[bool]string(nil), (*encFnInfo).fastpathEncMapBoolStringR, (*decFnInfo).fastpathDecMapBoolStringR)
+ fn(map[bool]uint(nil), (*encFnInfo).fastpathEncMapBoolUintR, (*decFnInfo).fastpathDecMapBoolUintR)
+ fn(map[bool]uint8(nil), (*encFnInfo).fastpathEncMapBoolUint8R, (*decFnInfo).fastpathDecMapBoolUint8R)
+ fn(map[bool]uint16(nil), (*encFnInfo).fastpathEncMapBoolUint16R, (*decFnInfo).fastpathDecMapBoolUint16R)
+ fn(map[bool]uint32(nil), (*encFnInfo).fastpathEncMapBoolUint32R, (*decFnInfo).fastpathDecMapBoolUint32R)
+ fn(map[bool]uint64(nil), (*encFnInfo).fastpathEncMapBoolUint64R, (*decFnInfo).fastpathDecMapBoolUint64R)
+ fn(map[bool]uintptr(nil), (*encFnInfo).fastpathEncMapBoolUintptrR, (*decFnInfo).fastpathDecMapBoolUintptrR)
+ fn(map[bool]int(nil), (*encFnInfo).fastpathEncMapBoolIntR, (*decFnInfo).fastpathDecMapBoolIntR)
+ fn(map[bool]int8(nil), (*encFnInfo).fastpathEncMapBoolInt8R, (*decFnInfo).fastpathDecMapBoolInt8R)
+ fn(map[bool]int16(nil), (*encFnInfo).fastpathEncMapBoolInt16R, (*decFnInfo).fastpathDecMapBoolInt16R)
+ fn(map[bool]int32(nil), (*encFnInfo).fastpathEncMapBoolInt32R, (*decFnInfo).fastpathDecMapBoolInt32R)
+ fn(map[bool]int64(nil), (*encFnInfo).fastpathEncMapBoolInt64R, (*decFnInfo).fastpathDecMapBoolInt64R)
+ fn(map[bool]float32(nil), (*encFnInfo).fastpathEncMapBoolFloat32R, (*decFnInfo).fastpathDecMapBoolFloat32R)
+ fn(map[bool]float64(nil), (*encFnInfo).fastpathEncMapBoolFloat64R, (*decFnInfo).fastpathDecMapBoolFloat64R)
+ fn(map[bool]bool(nil), (*encFnInfo).fastpathEncMapBoolBoolR, (*decFnInfo).fastpathDecMapBoolBoolR)
+
+ sort.Sort(fastpathAslice(fastpathAV[:]))
+}
+
+// -- encode
+
+// -- -- fast path type switch
+func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
+ switch v := iv.(type) {
+
+ case []interface{}:
+ fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e)
+ case *[]interface{}:
+ fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]interface{}:
+ fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]interface{}:
+ fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]string:
+ fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]string:
+ fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint:
+ fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint:
+ fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint8:
+ fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint8:
+ fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint16:
+ fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint16:
+ fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint32:
+ fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint32:
+ fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint64:
+ fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint64:
+ fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uintptr:
+ fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uintptr:
+ fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int:
+ fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int:
+ fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int8:
+ fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int8:
+ fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int16:
+ fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int16:
+ fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int32:
+ fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int32:
+ fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int64:
+ fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int64:
+ fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]float32:
+ fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]float32:
+ fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]float64:
+ fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]float64:
+ fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]bool:
+ fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]bool:
+ fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e)
+
+ case []string:
+ fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e)
+ case *[]string:
+ fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]interface{}:
+ fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e)
+ case *map[string]interface{}:
+ fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]string:
+ fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e)
+ case *map[string]string:
+ fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint:
+ fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e)
+ case *map[string]uint:
+ fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint8:
+ fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint8:
+ fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint16:
+ fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint16:
+ fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint32:
+ fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint32:
+ fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint64:
+ fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint64:
+ fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uintptr:
+ fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[string]uintptr:
+ fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int:
+ fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e)
+ case *map[string]int:
+ fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int8:
+ fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e)
+ case *map[string]int8:
+ fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int16:
+ fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e)
+ case *map[string]int16:
+ fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int32:
+ fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e)
+ case *map[string]int32:
+ fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int64:
+ fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e)
+ case *map[string]int64:
+ fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]float32:
+ fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[string]float32:
+ fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]float64:
+ fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[string]float64:
+ fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]bool:
+ fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e)
+ case *map[string]bool:
+ fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e)
+
+ case []float32:
+ fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e)
+ case *[]float32:
+ fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]interface{}:
+ fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e)
+ case *map[float32]interface{}:
+ fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]string:
+ fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e)
+ case *map[float32]string:
+ fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint:
+ fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint:
+ fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint8:
+ fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint8:
+ fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint16:
+ fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint16:
+ fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint32:
+ fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint32:
+ fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint64:
+ fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint64:
+ fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uintptr:
+ fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[float32]uintptr:
+ fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int:
+ fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e)
+ case *map[float32]int:
+ fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int8:
+ fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int8:
+ fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int16:
+ fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int16:
+ fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int32:
+ fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int32:
+ fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int64:
+ fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int64:
+ fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]float32:
+ fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e)
+ case *map[float32]float32:
+ fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]float64:
+ fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e)
+ case *map[float32]float64:
+ fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]bool:
+ fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e)
+ case *map[float32]bool:
+ fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []float64:
+ fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e)
+ case *[]float64:
+ fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]interface{}:
+ fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e)
+ case *map[float64]interface{}:
+ fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]string:
+ fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e)
+ case *map[float64]string:
+ fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint:
+ fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint:
+ fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint8:
+ fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint8:
+ fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint16:
+ fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint16:
+ fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint32:
+ fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint32:
+ fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint64:
+ fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint64:
+ fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uintptr:
+ fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[float64]uintptr:
+ fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int:
+ fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e)
+ case *map[float64]int:
+ fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int8:
+ fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int8:
+ fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int16:
+ fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int16:
+ fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int32:
+ fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int32:
+ fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int64:
+ fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int64:
+ fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]float32:
+ fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e)
+ case *map[float64]float32:
+ fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]float64:
+ fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e)
+ case *map[float64]float64:
+ fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]bool:
+ fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e)
+ case *map[float64]bool:
+ fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []uint:
+ fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e)
+ case *[]uint:
+ fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]interface{}:
+ fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint]interface{}:
+ fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]string:
+ fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e)
+ case *map[uint]string:
+ fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint:
+ fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint:
+ fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint8:
+ fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint8:
+ fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint16:
+ fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint16:
+ fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint32:
+ fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint32:
+ fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint64:
+ fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint64:
+ fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uintptr:
+ fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint]uintptr:
+ fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int:
+ fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e)
+ case *map[uint]int:
+ fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int8:
+ fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int8:
+ fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int16:
+ fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int16:
+ fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int32:
+ fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int32:
+ fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int64:
+ fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int64:
+ fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]float32:
+ fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[uint]float32:
+ fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]float64:
+ fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[uint]float64:
+ fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]bool:
+ fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint]bool:
+ fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]interface{}:
+ fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]interface{}:
+ fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]string:
+ fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]string:
+ fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint:
+ fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint:
+ fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint8:
+ fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint8:
+ fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint16:
+ fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint16:
+ fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint32:
+ fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint32:
+ fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint64:
+ fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint64:
+ fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uintptr:
+ fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uintptr:
+ fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int:
+ fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int:
+ fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int8:
+ fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int8:
+ fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int16:
+ fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int16:
+ fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int32:
+ fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int32:
+ fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int64:
+ fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int64:
+ fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]float32:
+ fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]float32:
+ fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]float64:
+ fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]float64:
+ fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]bool:
+ fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]bool:
+ fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []uint16:
+ fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e)
+ case *[]uint16:
+ fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]interface{}:
+ fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]interface{}:
+ fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]string:
+ fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]string:
+ fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint:
+ fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint:
+ fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint8:
+ fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint8:
+ fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint16:
+ fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint16:
+ fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint32:
+ fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint32:
+ fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint64:
+ fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint64:
+ fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uintptr:
+ fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uintptr:
+ fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int:
+ fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int:
+ fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int8:
+ fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int8:
+ fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int16:
+ fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int16:
+ fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int32:
+ fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int32:
+ fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int64:
+ fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int64:
+ fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]float32:
+ fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]float32:
+ fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]float64:
+ fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]float64:
+ fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]bool:
+ fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]bool:
+ fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []uint32:
+ fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e)
+ case *[]uint32:
+ fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]interface{}:
+ fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]interface{}:
+ fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]string:
+ fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]string:
+ fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint:
+ fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint:
+ fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint8:
+ fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint8:
+ fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint16:
+ fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint16:
+ fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint32:
+ fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint32:
+ fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint64:
+ fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint64:
+ fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uintptr:
+ fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uintptr:
+ fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int:
+ fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int:
+ fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int8:
+ fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int8:
+ fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int16:
+ fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int16:
+ fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int32:
+ fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int32:
+ fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int64:
+ fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int64:
+ fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]float32:
+ fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]float32:
+ fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]float64:
+ fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]float64:
+ fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]bool:
+ fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]bool:
+ fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []uint64:
+ fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e)
+ case *[]uint64:
+ fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]interface{}:
+ fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]interface{}:
+ fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]string:
+ fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]string:
+ fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint:
+ fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint:
+ fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint8:
+ fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint8:
+ fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint16:
+ fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint16:
+ fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint32:
+ fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint32:
+ fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint64:
+ fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint64:
+ fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uintptr:
+ fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uintptr:
+ fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int:
+ fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int:
+ fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int8:
+ fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int8:
+ fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int16:
+ fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int16:
+ fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int32:
+ fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int32:
+ fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int64:
+ fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int64:
+ fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]float32:
+ fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]float32:
+ fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]float64:
+ fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]float64:
+ fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]bool:
+ fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]bool:
+ fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []uintptr:
+ fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e)
+ case *[]uintptr:
+ fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]interface{}:
+ fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]interface{}:
+ fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]string:
+ fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]string:
+ fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint:
+ fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint:
+ fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint8:
+ fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint8:
+ fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint16:
+ fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint16:
+ fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint32:
+ fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint32:
+ fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint64:
+ fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint64:
+ fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uintptr:
+ fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uintptr:
+ fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int:
+ fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int:
+ fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int8:
+ fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int8:
+ fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int16:
+ fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int16:
+ fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int32:
+ fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int32:
+ fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int64:
+ fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int64:
+ fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]float32:
+ fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]float32:
+ fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]float64:
+ fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]float64:
+ fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]bool:
+ fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]bool:
+ fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e)
+
+ case []int:
+ fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e)
+ case *[]int:
+ fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]interface{}:
+ fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e)
+ case *map[int]interface{}:
+ fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]string:
+ fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e)
+ case *map[int]string:
+ fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint:
+ fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e)
+ case *map[int]uint:
+ fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint8:
+ fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint8:
+ fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint16:
+ fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint16:
+ fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint32:
+ fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint32:
+ fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint64:
+ fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint64:
+ fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uintptr:
+ fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int]uintptr:
+ fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int:
+ fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e)
+ case *map[int]int:
+ fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int8:
+ fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e)
+ case *map[int]int8:
+ fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int16:
+ fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e)
+ case *map[int]int16:
+ fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int32:
+ fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e)
+ case *map[int]int32:
+ fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int64:
+ fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e)
+ case *map[int]int64:
+ fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]float32:
+ fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[int]float32:
+ fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]float64:
+ fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[int]float64:
+ fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]bool:
+ fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e)
+ case *map[int]bool:
+ fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e)
+
+ case []int8:
+ fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e)
+ case *[]int8:
+ fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]interface{}:
+ fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int8]interface{}:
+ fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]string:
+ fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e)
+ case *map[int8]string:
+ fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint:
+ fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint:
+ fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint8:
+ fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint8:
+ fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint16:
+ fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint16:
+ fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint32:
+ fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint32:
+ fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint64:
+ fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint64:
+ fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uintptr:
+ fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int8]uintptr:
+ fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int:
+ fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e)
+ case *map[int8]int:
+ fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int8:
+ fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int8:
+ fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int16:
+ fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int16:
+ fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int32:
+ fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int32:
+ fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int64:
+ fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int64:
+ fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]float32:
+ fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int8]float32:
+ fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]float64:
+ fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int8]float64:
+ fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]bool:
+ fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int8]bool:
+ fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []int16:
+ fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e)
+ case *[]int16:
+ fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]interface{}:
+ fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int16]interface{}:
+ fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]string:
+ fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e)
+ case *map[int16]string:
+ fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint:
+ fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint:
+ fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint8:
+ fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint8:
+ fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint16:
+ fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint16:
+ fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint32:
+ fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint32:
+ fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint64:
+ fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint64:
+ fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uintptr:
+ fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int16]uintptr:
+ fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int:
+ fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e)
+ case *map[int16]int:
+ fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int8:
+ fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int8:
+ fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int16:
+ fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int16:
+ fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int32:
+ fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int32:
+ fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int64:
+ fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int64:
+ fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]float32:
+ fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int16]float32:
+ fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]float64:
+ fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int16]float64:
+ fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]bool:
+ fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int16]bool:
+ fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []int32:
+ fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e)
+ case *[]int32:
+ fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]interface{}:
+ fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int32]interface{}:
+ fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]string:
+ fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e)
+ case *map[int32]string:
+ fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint:
+ fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint:
+ fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint8:
+ fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint8:
+ fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint16:
+ fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint16:
+ fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint32:
+ fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint32:
+ fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint64:
+ fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint64:
+ fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uintptr:
+ fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int32]uintptr:
+ fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int:
+ fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e)
+ case *map[int32]int:
+ fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int8:
+ fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int8:
+ fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int16:
+ fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int16:
+ fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int32:
+ fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int32:
+ fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int64:
+ fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int64:
+ fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]float32:
+ fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int32]float32:
+ fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]float64:
+ fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int32]float64:
+ fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]bool:
+ fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int32]bool:
+ fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []int64:
+ fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e)
+ case *[]int64:
+ fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]interface{}:
+ fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int64]interface{}:
+ fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]string:
+ fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e)
+ case *map[int64]string:
+ fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint:
+ fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint:
+ fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint8:
+ fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint8:
+ fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint16:
+ fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint16:
+ fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint32:
+ fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint32:
+ fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint64:
+ fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint64:
+ fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uintptr:
+ fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int64]uintptr:
+ fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int:
+ fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e)
+ case *map[int64]int:
+ fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int8:
+ fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int8:
+ fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int16:
+ fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int16:
+ fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int32:
+ fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int32:
+ fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int64:
+ fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int64:
+ fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]float32:
+ fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int64]float32:
+ fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]float64:
+ fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int64]float64:
+ fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]bool:
+ fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int64]bool:
+ fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []bool:
+ fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e)
+ case *[]bool:
+ fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]interface{}:
+ fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e)
+ case *map[bool]interface{}:
+ fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]string:
+ fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e)
+ case *map[bool]string:
+ fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint:
+ fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint:
+ fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint8:
+ fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint8:
+ fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint16:
+ fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint16:
+ fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint32:
+ fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint32:
+ fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint64:
+ fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint64:
+ fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uintptr:
+ fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[bool]uintptr:
+ fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int:
+ fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e)
+ case *map[bool]int:
+ fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int8:
+ fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int8:
+ fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int16:
+ fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int16:
+ fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int32:
+ fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int32:
+ fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int64:
+ fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int64:
+ fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]float32:
+ fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[bool]float32:
+ fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]float64:
+ fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[bool]float64:
+ fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]bool:
+ fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e)
+ case *map[bool]bool:
+ fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e)
+
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
+ switch v := iv.(type) {
+
+ case []interface{}:
+ fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e)
+ case *[]interface{}:
+ fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e)
+
+ case []string:
+ fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e)
+ case *[]string:
+ fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e)
+
+ case []float32:
+ fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e)
+ case *[]float32:
+ fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case []float64:
+ fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e)
+ case *[]float64:
+ fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case []uint:
+ fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e)
+ case *[]uint:
+ fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e)
+
+ case []uint16:
+ fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e)
+ case *[]uint16:
+ fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e)
+
+ case []uint32:
+ fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e)
+ case *[]uint32:
+ fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e)
+
+ case []uint64:
+ fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e)
+ case *[]uint64:
+ fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e)
+
+ case []uintptr:
+ fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e)
+ case *[]uintptr:
+ fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case []int:
+ fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e)
+ case *[]int:
+ fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e)
+
+ case []int8:
+ fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e)
+ case *[]int8:
+ fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e)
+
+ case []int16:
+ fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e)
+ case *[]int16:
+ fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e)
+
+ case []int32:
+ fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e)
+ case *[]int32:
+ fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e)
+
+ case []int64:
+ fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e)
+ case *[]int64:
+ fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e)
+
+ case []bool:
+ fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e)
+ case *[]bool:
+ fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e)
+
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
+ switch v := iv.(type) {
+
+ case map[interface{}]interface{}:
+ fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]interface{}:
+ fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]string:
+ fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]string:
+ fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint:
+ fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint:
+ fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint8:
+ fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint8:
+ fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint16:
+ fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint16:
+ fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint32:
+ fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint32:
+ fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint64:
+ fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint64:
+ fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uintptr:
+ fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uintptr:
+ fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int:
+ fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int:
+ fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int8:
+ fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int8:
+ fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int16:
+ fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int16:
+ fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int32:
+ fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int32:
+ fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int64:
+ fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int64:
+ fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]float32:
+ fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]float32:
+ fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]float64:
+ fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]float64:
+ fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]bool:
+ fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]bool:
+ fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]interface{}:
+ fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e)
+ case *map[string]interface{}:
+ fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]string:
+ fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e)
+ case *map[string]string:
+ fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint:
+ fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e)
+ case *map[string]uint:
+ fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint8:
+ fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint8:
+ fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint16:
+ fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint16:
+ fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint32:
+ fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint32:
+ fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint64:
+ fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint64:
+ fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uintptr:
+ fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[string]uintptr:
+ fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int:
+ fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e)
+ case *map[string]int:
+ fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int8:
+ fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e)
+ case *map[string]int8:
+ fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int16:
+ fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e)
+ case *map[string]int16:
+ fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int32:
+ fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e)
+ case *map[string]int32:
+ fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int64:
+ fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e)
+ case *map[string]int64:
+ fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]float32:
+ fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[string]float32:
+ fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]float64:
+ fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[string]float64:
+ fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]bool:
+ fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e)
+ case *map[string]bool:
+ fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]interface{}:
+ fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e)
+ case *map[float32]interface{}:
+ fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]string:
+ fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e)
+ case *map[float32]string:
+ fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint:
+ fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint:
+ fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint8:
+ fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint8:
+ fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint16:
+ fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint16:
+ fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint32:
+ fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint32:
+ fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint64:
+ fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint64:
+ fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uintptr:
+ fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[float32]uintptr:
+ fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int:
+ fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e)
+ case *map[float32]int:
+ fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int8:
+ fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int8:
+ fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int16:
+ fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int16:
+ fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int32:
+ fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int32:
+ fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int64:
+ fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int64:
+ fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]float32:
+ fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e)
+ case *map[float32]float32:
+ fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]float64:
+ fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e)
+ case *map[float32]float64:
+ fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]bool:
+ fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e)
+ case *map[float32]bool:
+ fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]interface{}:
+ fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e)
+ case *map[float64]interface{}:
+ fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]string:
+ fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e)
+ case *map[float64]string:
+ fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint:
+ fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint:
+ fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint8:
+ fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint8:
+ fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint16:
+ fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint16:
+ fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint32:
+ fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint32:
+ fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint64:
+ fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint64:
+ fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uintptr:
+ fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[float64]uintptr:
+ fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int:
+ fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e)
+ case *map[float64]int:
+ fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int8:
+ fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int8:
+ fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int16:
+ fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int16:
+ fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int32:
+ fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int32:
+ fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int64:
+ fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int64:
+ fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]float32:
+ fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e)
+ case *map[float64]float32:
+ fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]float64:
+ fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e)
+ case *map[float64]float64:
+ fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]bool:
+ fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e)
+ case *map[float64]bool:
+ fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]interface{}:
+ fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint]interface{}:
+ fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]string:
+ fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e)
+ case *map[uint]string:
+ fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint:
+ fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint:
+ fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint8:
+ fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint8:
+ fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint16:
+ fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint16:
+ fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint32:
+ fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint32:
+ fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint64:
+ fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint64:
+ fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uintptr:
+ fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint]uintptr:
+ fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int:
+ fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e)
+ case *map[uint]int:
+ fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int8:
+ fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int8:
+ fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int16:
+ fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int16:
+ fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int32:
+ fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int32:
+ fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int64:
+ fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int64:
+ fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]float32:
+ fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[uint]float32:
+ fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]float64:
+ fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[uint]float64:
+ fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]bool:
+ fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint]bool:
+ fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]interface{}:
+ fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]interface{}:
+ fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]string:
+ fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]string:
+ fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint:
+ fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint:
+ fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint8:
+ fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint8:
+ fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint16:
+ fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint16:
+ fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint32:
+ fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint32:
+ fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint64:
+ fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint64:
+ fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uintptr:
+ fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uintptr:
+ fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int:
+ fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int:
+ fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int8:
+ fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int8:
+ fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int16:
+ fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int16:
+ fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int32:
+ fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int32:
+ fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int64:
+ fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int64:
+ fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]float32:
+ fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]float32:
+ fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]float64:
+ fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]float64:
+ fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]bool:
+ fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]bool:
+ fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]interface{}:
+ fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]interface{}:
+ fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]string:
+ fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]string:
+ fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint:
+ fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint:
+ fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint8:
+ fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint8:
+ fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint16:
+ fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint16:
+ fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint32:
+ fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint32:
+ fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint64:
+ fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint64:
+ fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uintptr:
+ fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uintptr:
+ fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int:
+ fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int:
+ fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int8:
+ fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int8:
+ fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int16:
+ fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int16:
+ fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int32:
+ fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int32:
+ fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int64:
+ fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int64:
+ fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]float32:
+ fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]float32:
+ fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]float64:
+ fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]float64:
+ fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]bool:
+ fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]bool:
+ fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]interface{}:
+ fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]interface{}:
+ fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]string:
+ fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]string:
+ fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint:
+ fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint:
+ fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint8:
+ fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint8:
+ fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint16:
+ fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint16:
+ fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint32:
+ fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint32:
+ fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint64:
+ fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint64:
+ fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uintptr:
+ fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uintptr:
+ fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int:
+ fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int:
+ fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int8:
+ fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int8:
+ fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int16:
+ fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int16:
+ fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int32:
+ fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int32:
+ fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int64:
+ fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int64:
+ fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]float32:
+ fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]float32:
+ fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]float64:
+ fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]float64:
+ fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]bool:
+ fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]bool:
+ fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]interface{}:
+ fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]interface{}:
+ fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]string:
+ fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]string:
+ fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint:
+ fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint:
+ fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint8:
+ fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint8:
+ fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint16:
+ fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint16:
+ fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint32:
+ fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint32:
+ fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint64:
+ fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint64:
+ fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uintptr:
+ fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uintptr:
+ fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int:
+ fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int:
+ fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int8:
+ fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int8:
+ fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int16:
+ fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int16:
+ fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int32:
+ fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int32:
+ fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int64:
+ fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int64:
+ fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]float32:
+ fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]float32:
+ fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]float64:
+ fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]float64:
+ fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]bool:
+ fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]bool:
+ fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]interface{}:
+ fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]interface{}:
+ fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]string:
+ fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]string:
+ fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint:
+ fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint:
+ fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint8:
+ fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint8:
+ fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint16:
+ fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint16:
+ fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint32:
+ fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint32:
+ fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint64:
+ fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint64:
+ fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uintptr:
+ fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uintptr:
+ fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int:
+ fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int:
+ fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int8:
+ fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int8:
+ fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int16:
+ fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int16:
+ fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int32:
+ fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int32:
+ fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int64:
+ fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int64:
+ fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]float32:
+ fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]float32:
+ fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]float64:
+ fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]float64:
+ fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]bool:
+ fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]bool:
+ fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]interface{}:
+ fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e)
+ case *map[int]interface{}:
+ fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]string:
+ fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e)
+ case *map[int]string:
+ fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint:
+ fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e)
+ case *map[int]uint:
+ fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint8:
+ fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint8:
+ fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint16:
+ fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint16:
+ fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint32:
+ fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint32:
+ fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint64:
+ fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint64:
+ fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uintptr:
+ fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int]uintptr:
+ fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int:
+ fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e)
+ case *map[int]int:
+ fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int8:
+ fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e)
+ case *map[int]int8:
+ fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int16:
+ fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e)
+ case *map[int]int16:
+ fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int32:
+ fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e)
+ case *map[int]int32:
+ fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int64:
+ fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e)
+ case *map[int]int64:
+ fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]float32:
+ fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[int]float32:
+ fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]float64:
+ fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[int]float64:
+ fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]bool:
+ fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e)
+ case *map[int]bool:
+ fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]interface{}:
+ fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int8]interface{}:
+ fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]string:
+ fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e)
+ case *map[int8]string:
+ fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint:
+ fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint:
+ fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint8:
+ fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint8:
+ fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint16:
+ fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint16:
+ fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint32:
+ fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint32:
+ fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint64:
+ fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint64:
+ fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uintptr:
+ fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int8]uintptr:
+ fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int:
+ fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e)
+ case *map[int8]int:
+ fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int8:
+ fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int8:
+ fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int16:
+ fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int16:
+ fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int32:
+ fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int32:
+ fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int64:
+ fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int64:
+ fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]float32:
+ fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int8]float32:
+ fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]float64:
+ fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int8]float64:
+ fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]bool:
+ fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int8]bool:
+ fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]interface{}:
+ fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int16]interface{}:
+ fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]string:
+ fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e)
+ case *map[int16]string:
+ fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint:
+ fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint:
+ fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint8:
+ fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint8:
+ fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint16:
+ fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint16:
+ fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint32:
+ fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint32:
+ fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint64:
+ fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint64:
+ fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uintptr:
+ fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int16]uintptr:
+ fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int:
+ fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e)
+ case *map[int16]int:
+ fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int8:
+ fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int8:
+ fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int16:
+ fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int16:
+ fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int32:
+ fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int32:
+ fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int64:
+ fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int64:
+ fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]float32:
+ fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int16]float32:
+ fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]float64:
+ fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int16]float64:
+ fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]bool:
+ fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int16]bool:
+ fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]interface{}:
+ fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int32]interface{}:
+ fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]string:
+ fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e)
+ case *map[int32]string:
+ fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint:
+ fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint:
+ fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint8:
+ fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint8:
+ fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint16:
+ fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint16:
+ fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint32:
+ fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint32:
+ fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint64:
+ fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint64:
+ fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uintptr:
+ fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int32]uintptr:
+ fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int:
+ fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e)
+ case *map[int32]int:
+ fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int8:
+ fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int8:
+ fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int16:
+ fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int16:
+ fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int32:
+ fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int32:
+ fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int64:
+ fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int64:
+ fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]float32:
+ fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int32]float32:
+ fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]float64:
+ fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int32]float64:
+ fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]bool:
+ fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int32]bool:
+ fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]interface{}:
+ fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int64]interface{}:
+ fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]string:
+ fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e)
+ case *map[int64]string:
+ fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint:
+ fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint:
+ fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint8:
+ fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint8:
+ fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint16:
+ fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint16:
+ fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint32:
+ fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint32:
+ fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint64:
+ fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint64:
+ fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uintptr:
+ fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int64]uintptr:
+ fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int:
+ fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e)
+ case *map[int64]int:
+ fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int8:
+ fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int8:
+ fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int16:
+ fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int16:
+ fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int32:
+ fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int32:
+ fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int64:
+ fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int64:
+ fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]float32:
+ fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int64]float32:
+ fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]float64:
+ fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int64]float64:
+ fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]bool:
+ fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int64]bool:
+ fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]interface{}:
+ fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e)
+ case *map[bool]interface{}:
+ fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]string:
+ fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e)
+ case *map[bool]string:
+ fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint:
+ fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint:
+ fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint8:
+ fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint8:
+ fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint16:
+ fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint16:
+ fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint32:
+ fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint32:
+ fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint64:
+ fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint64:
+ fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uintptr:
+ fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[bool]uintptr:
+ fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int:
+ fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e)
+ case *map[bool]int:
+ fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int8:
+ fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int8:
+ fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int16:
+ fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int16:
+ fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int32:
+ fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int32:
+ fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int64:
+ fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int64:
+ fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]float32:
+ fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[bool]float32:
+ fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]float64:
+ fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[bool]float64:
+ fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]bool:
+ fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e)
+ case *map[bool]bool:
+ fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e)
+
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+// -- -- fast path functions
+
+func (f *encFnInfo) fastpathEncSliceIntfR(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceIntfV(v []interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ e.encode(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ e.encode(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceStringR(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceStringV(v []string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceStringV(v []string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceFloat32R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceFloat32V(v []float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeFloat32(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceFloat64R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceFloat64V(v []float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeFloat64(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceUintR(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceUintV(v []uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceUintV(v []uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceUint16R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceUint16V(v []uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceUint32R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceUint32V(v []uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceUint64R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceUint64V(v []uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceUintptrR(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ e.encode(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ e.encode(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceIntR(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceIntV(v []int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceIntV(v []int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceInt8R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceInt8V(v []int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceInt8V(v []int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceInt16R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceInt16V(v []int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceInt16V(v []int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceInt32R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceInt32V(v []int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceInt32V(v []int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceInt64R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceInt64V(v []int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceInt64V(v []int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceBoolR(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceBoolV(v []bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeBool(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceBoolV(v []bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeBool(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfIntfR(rv reflect.Value) {
+ fastpathTV.EncMapIntfIntfV(rv.Interface().(map[interface{}]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfStringR(rv reflect.Value) {
+ fastpathTV.EncMapIntfStringV(rv.Interface().(map[interface{}]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfUintR(rv reflect.Value) {
+ fastpathTV.EncMapIntfUintV(rv.Interface().(map[interface{}]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfUint8R(rv reflect.Value) {
+ fastpathTV.EncMapIntfUint8V(rv.Interface().(map[interface{}]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfUint16R(rv reflect.Value) {
+ fastpathTV.EncMapIntfUint16V(rv.Interface().(map[interface{}]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfUint32R(rv reflect.Value) {
+ fastpathTV.EncMapIntfUint32V(rv.Interface().(map[interface{}]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfUint64R(rv reflect.Value) {
+ fastpathTV.EncMapIntfUint64V(rv.Interface().(map[interface{}]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfUintptrR(rv reflect.Value) {
+ fastpathTV.EncMapIntfUintptrV(rv.Interface().(map[interface{}]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfIntR(rv reflect.Value) {
+ fastpathTV.EncMapIntfIntV(rv.Interface().(map[interface{}]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfInt8R(rv reflect.Value) {
+ fastpathTV.EncMapIntfInt8V(rv.Interface().(map[interface{}]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfInt16R(rv reflect.Value) {
+ fastpathTV.EncMapIntfInt16V(rv.Interface().(map[interface{}]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfInt32R(rv reflect.Value) {
+ fastpathTV.EncMapIntfInt32V(rv.Interface().(map[interface{}]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfInt64R(rv reflect.Value) {
+ fastpathTV.EncMapIntfInt64V(rv.Interface().(map[interface{}]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfFloat32R(rv reflect.Value) {
+ fastpathTV.EncMapIntfFloat32V(rv.Interface().(map[interface{}]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfFloat64R(rv reflect.Value) {
+ fastpathTV.EncMapIntfFloat64V(rv.Interface().(map[interface{}]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfBoolR(rv reflect.Value) {
+ fastpathTV.EncMapIntfBoolV(rv.Interface().(map[interface{}]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringIntfR(rv reflect.Value) {
+ fastpathTV.EncMapStringIntfV(rv.Interface().(map[string]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[string(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringStringR(rv reflect.Value) {
+ fastpathTV.EncMapStringStringV(rv.Interface().(map[string]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringStringV(v map[string]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[string(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringUintR(rv reflect.Value) {
+ fastpathTV.EncMapStringUintV(rv.Interface().(map[string]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringUintV(v map[string]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringUint8R(rv reflect.Value) {
+ fastpathTV.EncMapStringUint8V(rv.Interface().(map[string]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringUint16R(rv reflect.Value) {
+ fastpathTV.EncMapStringUint16V(rv.Interface().(map[string]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringUint32R(rv reflect.Value) {
+ fastpathTV.EncMapStringUint32V(rv.Interface().(map[string]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringUint64R(rv reflect.Value) {
+ fastpathTV.EncMapStringUint64V(rv.Interface().(map[string]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringUintptrR(rv reflect.Value) {
+ fastpathTV.EncMapStringUintptrV(rv.Interface().(map[string]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[string(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringIntR(rv reflect.Value) {
+ fastpathTV.EncMapStringIntV(rv.Interface().(map[string]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringIntV(v map[string]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringInt8R(rv reflect.Value) {
+ fastpathTV.EncMapStringInt8V(rv.Interface().(map[string]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringInt8V(v map[string]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringInt16R(rv reflect.Value) {
+ fastpathTV.EncMapStringInt16V(rv.Interface().(map[string]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringInt16V(v map[string]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringInt32R(rv reflect.Value) {
+ fastpathTV.EncMapStringInt32V(rv.Interface().(map[string]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringInt32V(v map[string]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringInt64R(rv reflect.Value) {
+ fastpathTV.EncMapStringInt64V(rv.Interface().(map[string]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringInt64V(v map[string]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringFloat32R(rv reflect.Value) {
+ fastpathTV.EncMapStringFloat32V(rv.Interface().(map[string]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[string(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringFloat64R(rv reflect.Value) {
+ fastpathTV.EncMapStringFloat64V(rv.Interface().(map[string]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[string(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringBoolR(rv reflect.Value) {
+ fastpathTV.EncMapStringBoolV(rv.Interface().(map[string]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringBoolV(v map[string]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[string(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32IntfR(rv reflect.Value) {
+ fastpathTV.EncMapFloat32IntfV(rv.Interface().(map[float32]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[float32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32StringR(rv reflect.Value) {
+ fastpathTV.EncMapFloat32StringV(rv.Interface().(map[float32]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[float32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32UintR(rv reflect.Value) {
+ fastpathTV.EncMapFloat32UintV(rv.Interface().(map[float32]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Uint8V(rv.Interface().(map[float32]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Uint16V(rv.Interface().(map[float32]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Uint32V(rv.Interface().(map[float32]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Uint64V(rv.Interface().(map[float32]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapFloat32UintptrV(rv.Interface().(map[float32]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[float32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32IntR(rv reflect.Value) {
+ fastpathTV.EncMapFloat32IntV(rv.Interface().(map[float32]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Int8R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Int8V(rv.Interface().(map[float32]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Int16R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Int16V(rv.Interface().(map[float32]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Int32R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Int32V(rv.Interface().(map[float32]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Int64R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Int64V(rv.Interface().(map[float32]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Float32R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Float32V(rv.Interface().(map[float32]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[float32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Float64R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Float64V(rv.Interface().(map[float32]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[float32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32BoolR(rv reflect.Value) {
+ fastpathTV.EncMapFloat32BoolV(rv.Interface().(map[float32]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[float32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64IntfR(rv reflect.Value) {
+ fastpathTV.EncMapFloat64IntfV(rv.Interface().(map[float64]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[float64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64StringR(rv reflect.Value) {
+ fastpathTV.EncMapFloat64StringV(rv.Interface().(map[float64]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[float64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64UintR(rv reflect.Value) {
+ fastpathTV.EncMapFloat64UintV(rv.Interface().(map[float64]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Uint8V(rv.Interface().(map[float64]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Uint16V(rv.Interface().(map[float64]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Uint32V(rv.Interface().(map[float64]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Uint64V(rv.Interface().(map[float64]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapFloat64UintptrV(rv.Interface().(map[float64]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[float64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64IntR(rv reflect.Value) {
+ fastpathTV.EncMapFloat64IntV(rv.Interface().(map[float64]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Int8R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Int8V(rv.Interface().(map[float64]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Int16R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Int16V(rv.Interface().(map[float64]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Int32R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Int32V(rv.Interface().(map[float64]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Int64R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Int64V(rv.Interface().(map[float64]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Float32R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Float32V(rv.Interface().(map[float64]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[float64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Float64R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Float64V(rv.Interface().(map[float64]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[float64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64BoolR(rv reflect.Value) {
+ fastpathTV.EncMapFloat64BoolV(rv.Interface().(map[float64]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[float64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintIntfR(rv reflect.Value) {
+ fastpathTV.EncMapUintIntfV(rv.Interface().(map[uint]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintStringR(rv reflect.Value) {
+ fastpathTV.EncMapUintStringV(rv.Interface().(map[uint]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintStringV(v map[uint]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[uint(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintUintR(rv reflect.Value) {
+ fastpathTV.EncMapUintUintV(rv.Interface().(map[uint]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintUintV(v map[uint]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintUint8R(rv reflect.Value) {
+ fastpathTV.EncMapUintUint8V(rv.Interface().(map[uint]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintUint16R(rv reflect.Value) {
+ fastpathTV.EncMapUintUint16V(rv.Interface().(map[uint]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintUint32R(rv reflect.Value) {
+ fastpathTV.EncMapUintUint32V(rv.Interface().(map[uint]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintUint64R(rv reflect.Value) {
+ fastpathTV.EncMapUintUint64V(rv.Interface().(map[uint]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintUintptrR(rv reflect.Value) {
+ fastpathTV.EncMapUintUintptrV(rv.Interface().(map[uint]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintIntR(rv reflect.Value) {
+ fastpathTV.EncMapUintIntV(rv.Interface().(map[uint]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintIntV(v map[uint]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintInt8R(rv reflect.Value) {
+ fastpathTV.EncMapUintInt8V(rv.Interface().(map[uint]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintInt16R(rv reflect.Value) {
+ fastpathTV.EncMapUintInt16V(rv.Interface().(map[uint]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintInt32R(rv reflect.Value) {
+ fastpathTV.EncMapUintInt32V(rv.Interface().(map[uint]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintInt64R(rv reflect.Value) {
+ fastpathTV.EncMapUintInt64V(rv.Interface().(map[uint]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintFloat32R(rv reflect.Value) {
+ fastpathTV.EncMapUintFloat32V(rv.Interface().(map[uint]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[uint(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintFloat64R(rv reflect.Value) {
+ fastpathTV.EncMapUintFloat64V(rv.Interface().(map[uint]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[uint(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintBoolR(rv reflect.Value) {
+ fastpathTV.EncMapUintBoolV(rv.Interface().(map[uint]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[uint(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8IntfR(rv reflect.Value) {
+ fastpathTV.EncMapUint8IntfV(rv.Interface().(map[uint8]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8StringR(rv reflect.Value) {
+ fastpathTV.EncMapUint8StringV(rv.Interface().(map[uint8]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[uint8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8UintR(rv reflect.Value) {
+ fastpathTV.EncMapUint8UintV(rv.Interface().(map[uint8]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Uint8V(rv.Interface().(map[uint8]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Uint16V(rv.Interface().(map[uint8]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Uint32V(rv.Interface().(map[uint8]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Uint64V(rv.Interface().(map[uint8]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapUint8UintptrV(rv.Interface().(map[uint8]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8IntR(rv reflect.Value) {
+ fastpathTV.EncMapUint8IntV(rv.Interface().(map[uint8]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Int8R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Int8V(rv.Interface().(map[uint8]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Int16R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Int16V(rv.Interface().(map[uint8]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Int32R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Int32V(rv.Interface().(map[uint8]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Int64R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Int64V(rv.Interface().(map[uint8]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Float32R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Float32V(rv.Interface().(map[uint8]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[uint8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Float64R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Float64V(rv.Interface().(map[uint8]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[uint8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8BoolR(rv reflect.Value) {
+ fastpathTV.EncMapUint8BoolV(rv.Interface().(map[uint8]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[uint8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16IntfR(rv reflect.Value) {
+ fastpathTV.EncMapUint16IntfV(rv.Interface().(map[uint16]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16StringR(rv reflect.Value) {
+ fastpathTV.EncMapUint16StringV(rv.Interface().(map[uint16]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[uint16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16UintR(rv reflect.Value) {
+ fastpathTV.EncMapUint16UintV(rv.Interface().(map[uint16]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Uint8V(rv.Interface().(map[uint16]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Uint16V(rv.Interface().(map[uint16]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Uint32V(rv.Interface().(map[uint16]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Uint64V(rv.Interface().(map[uint16]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapUint16UintptrV(rv.Interface().(map[uint16]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16IntR(rv reflect.Value) {
+ fastpathTV.EncMapUint16IntV(rv.Interface().(map[uint16]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Int8R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Int8V(rv.Interface().(map[uint16]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Int16R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Int16V(rv.Interface().(map[uint16]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Int32R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Int32V(rv.Interface().(map[uint16]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Int64R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Int64V(rv.Interface().(map[uint16]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Float32R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Float32V(rv.Interface().(map[uint16]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[uint16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Float64R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Float64V(rv.Interface().(map[uint16]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[uint16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16BoolR(rv reflect.Value) {
+ fastpathTV.EncMapUint16BoolV(rv.Interface().(map[uint16]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[uint16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32IntfR(rv reflect.Value) {
+ fastpathTV.EncMapUint32IntfV(rv.Interface().(map[uint32]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32StringR(rv reflect.Value) {
+ fastpathTV.EncMapUint32StringV(rv.Interface().(map[uint32]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[uint32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32UintR(rv reflect.Value) {
+ fastpathTV.EncMapUint32UintV(rv.Interface().(map[uint32]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Uint8V(rv.Interface().(map[uint32]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Uint16V(rv.Interface().(map[uint32]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Uint32V(rv.Interface().(map[uint32]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Uint64V(rv.Interface().(map[uint32]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapUint32UintptrV(rv.Interface().(map[uint32]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32IntR(rv reflect.Value) {
+ fastpathTV.EncMapUint32IntV(rv.Interface().(map[uint32]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Int8R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Int8V(rv.Interface().(map[uint32]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Int16R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Int16V(rv.Interface().(map[uint32]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Int32R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Int32V(rv.Interface().(map[uint32]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Int64R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Int64V(rv.Interface().(map[uint32]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Float32R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Float32V(rv.Interface().(map[uint32]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[uint32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Float64R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Float64V(rv.Interface().(map[uint32]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[uint32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32BoolR(rv reflect.Value) {
+ fastpathTV.EncMapUint32BoolV(rv.Interface().(map[uint32]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[uint32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64IntfR(rv reflect.Value) {
+ fastpathTV.EncMapUint64IntfV(rv.Interface().(map[uint64]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64StringR(rv reflect.Value) {
+ fastpathTV.EncMapUint64StringV(rv.Interface().(map[uint64]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[uint64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64UintR(rv reflect.Value) {
+ fastpathTV.EncMapUint64UintV(rv.Interface().(map[uint64]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Uint8V(rv.Interface().(map[uint64]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Uint16V(rv.Interface().(map[uint64]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Uint32V(rv.Interface().(map[uint64]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Uint64V(rv.Interface().(map[uint64]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapUint64UintptrV(rv.Interface().(map[uint64]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64IntR(rv reflect.Value) {
+ fastpathTV.EncMapUint64IntV(rv.Interface().(map[uint64]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Int8R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Int8V(rv.Interface().(map[uint64]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Int16R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Int16V(rv.Interface().(map[uint64]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Int32R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Int32V(rv.Interface().(map[uint64]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Int64R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Int64V(rv.Interface().(map[uint64]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Float32R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Float32V(rv.Interface().(map[uint64]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[uint64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Float64R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Float64V(rv.Interface().(map[uint64]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[uint64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64BoolR(rv reflect.Value) {
+ fastpathTV.EncMapUint64BoolV(rv.Interface().(map[uint64]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[uint64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrIntfR(rv reflect.Value) {
+ fastpathTV.EncMapUintptrIntfV(rv.Interface().(map[uintptr]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uintptr(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrStringR(rv reflect.Value) {
+ fastpathTV.EncMapUintptrStringV(rv.Interface().(map[uintptr]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[uintptr(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrUintR(rv reflect.Value) {
+ fastpathTV.EncMapUintptrUintV(rv.Interface().(map[uintptr]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrUint8R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrUint8V(rv.Interface().(map[uintptr]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrUint16R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrUint16V(rv.Interface().(map[uintptr]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrUint32R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrUint32V(rv.Interface().(map[uintptr]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrUint64R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrUint64V(rv.Interface().(map[uintptr]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrUintptrR(rv reflect.Value) {
+ fastpathTV.EncMapUintptrUintptrV(rv.Interface().(map[uintptr]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uintptr(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrIntR(rv reflect.Value) {
+ fastpathTV.EncMapUintptrIntV(rv.Interface().(map[uintptr]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrInt8R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrInt8V(rv.Interface().(map[uintptr]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrInt16R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrInt16V(rv.Interface().(map[uintptr]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrInt32R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrInt32V(rv.Interface().(map[uintptr]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrInt64R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrInt64V(rv.Interface().(map[uintptr]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrFloat32R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrFloat32V(rv.Interface().(map[uintptr]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[uintptr(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrFloat64R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrFloat64V(rv.Interface().(map[uintptr]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[uintptr(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrBoolR(rv reflect.Value) {
+ fastpathTV.EncMapUintptrBoolV(rv.Interface().(map[uintptr]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[uintptr(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntIntfR(rv reflect.Value) {
+ fastpathTV.EncMapIntIntfV(rv.Interface().(map[int]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntStringR(rv reflect.Value) {
+ fastpathTV.EncMapIntStringV(rv.Interface().(map[int]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntStringV(v map[int]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[int(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntUintR(rv reflect.Value) {
+ fastpathTV.EncMapIntUintV(rv.Interface().(map[int]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntUintV(v map[int]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntUint8R(rv reflect.Value) {
+ fastpathTV.EncMapIntUint8V(rv.Interface().(map[int]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntUint16R(rv reflect.Value) {
+ fastpathTV.EncMapIntUint16V(rv.Interface().(map[int]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntUint32R(rv reflect.Value) {
+ fastpathTV.EncMapIntUint32V(rv.Interface().(map[int]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntUint64R(rv reflect.Value) {
+ fastpathTV.EncMapIntUint64V(rv.Interface().(map[int]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntUintptrR(rv reflect.Value) {
+ fastpathTV.EncMapIntUintptrV(rv.Interface().(map[int]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntIntR(rv reflect.Value) {
+ fastpathTV.EncMapIntIntV(rv.Interface().(map[int]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntIntV(v map[int]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntInt8R(rv reflect.Value) {
+ fastpathTV.EncMapIntInt8V(rv.Interface().(map[int]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntInt8V(v map[int]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntInt16R(rv reflect.Value) {
+ fastpathTV.EncMapIntInt16V(rv.Interface().(map[int]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntInt16V(v map[int]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntInt32R(rv reflect.Value) {
+ fastpathTV.EncMapIntInt32V(rv.Interface().(map[int]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntInt32V(v map[int]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntInt64R(rv reflect.Value) {
+ fastpathTV.EncMapIntInt64V(rv.Interface().(map[int]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntInt64V(v map[int]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntFloat32R(rv reflect.Value) {
+ fastpathTV.EncMapIntFloat32V(rv.Interface().(map[int]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[int(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntFloat64R(rv reflect.Value) {
+ fastpathTV.EncMapIntFloat64V(rv.Interface().(map[int]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[int(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntBoolR(rv reflect.Value) {
+ fastpathTV.EncMapIntBoolV(rv.Interface().(map[int]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntBoolV(v map[int]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[int(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8IntfR(rv reflect.Value) {
+ fastpathTV.EncMapInt8IntfV(rv.Interface().(map[int8]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8StringR(rv reflect.Value) {
+ fastpathTV.EncMapInt8StringV(rv.Interface().(map[int8]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8StringV(v map[int8]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[int8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8UintR(rv reflect.Value) {
+ fastpathTV.EncMapInt8UintV(rv.Interface().(map[int8]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Uint8V(rv.Interface().(map[int8]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Uint16V(rv.Interface().(map[int8]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Uint32V(rv.Interface().(map[int8]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Uint64V(rv.Interface().(map[int8]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapInt8UintptrV(rv.Interface().(map[int8]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8IntR(rv reflect.Value) {
+ fastpathTV.EncMapInt8IntV(rv.Interface().(map[int8]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8IntV(v map[int8]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Int8R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Int8V(rv.Interface().(map[int8]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Int16R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Int16V(rv.Interface().(map[int8]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Int32R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Int32V(rv.Interface().(map[int8]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Int64R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Int64V(rv.Interface().(map[int8]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Float32R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Float32V(rv.Interface().(map[int8]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[int8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Float64R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Float64V(rv.Interface().(map[int8]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[int8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8BoolR(rv reflect.Value) {
+ fastpathTV.EncMapInt8BoolV(rv.Interface().(map[int8]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[int8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16IntfR(rv reflect.Value) {
+ fastpathTV.EncMapInt16IntfV(rv.Interface().(map[int16]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16StringR(rv reflect.Value) {
+ fastpathTV.EncMapInt16StringV(rv.Interface().(map[int16]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16StringV(v map[int16]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[int16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16UintR(rv reflect.Value) {
+ fastpathTV.EncMapInt16UintV(rv.Interface().(map[int16]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Uint8V(rv.Interface().(map[int16]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Uint16V(rv.Interface().(map[int16]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Uint32V(rv.Interface().(map[int16]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Uint64V(rv.Interface().(map[int16]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapInt16UintptrV(rv.Interface().(map[int16]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16IntR(rv reflect.Value) {
+ fastpathTV.EncMapInt16IntV(rv.Interface().(map[int16]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16IntV(v map[int16]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Int8R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Int8V(rv.Interface().(map[int16]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Int16R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Int16V(rv.Interface().(map[int16]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Int32R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Int32V(rv.Interface().(map[int16]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Int64R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Int64V(rv.Interface().(map[int16]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Float32R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Float32V(rv.Interface().(map[int16]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[int16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Float64R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Float64V(rv.Interface().(map[int16]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[int16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16BoolR(rv reflect.Value) {
+ fastpathTV.EncMapInt16BoolV(rv.Interface().(map[int16]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[int16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32IntfR(rv reflect.Value) {
+ fastpathTV.EncMapInt32IntfV(rv.Interface().(map[int32]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32StringR(rv reflect.Value) {
+ fastpathTV.EncMapInt32StringV(rv.Interface().(map[int32]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32StringV(v map[int32]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[int32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32UintR(rv reflect.Value) {
+ fastpathTV.EncMapInt32UintV(rv.Interface().(map[int32]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Uint8V(rv.Interface().(map[int32]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Uint16V(rv.Interface().(map[int32]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Uint32V(rv.Interface().(map[int32]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Uint64V(rv.Interface().(map[int32]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapInt32UintptrV(rv.Interface().(map[int32]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32IntR(rv reflect.Value) {
+ fastpathTV.EncMapInt32IntV(rv.Interface().(map[int32]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32IntV(v map[int32]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Int8R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Int8V(rv.Interface().(map[int32]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Int16R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Int16V(rv.Interface().(map[int32]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Int32R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Int32V(rv.Interface().(map[int32]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Int64R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Int64V(rv.Interface().(map[int32]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Float32R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Float32V(rv.Interface().(map[int32]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[int32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Float64R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Float64V(rv.Interface().(map[int32]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[int32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32BoolR(rv reflect.Value) {
+ fastpathTV.EncMapInt32BoolV(rv.Interface().(map[int32]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[int32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64IntfR(rv reflect.Value) {
+ fastpathTV.EncMapInt64IntfV(rv.Interface().(map[int64]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64StringR(rv reflect.Value) {
+ fastpathTV.EncMapInt64StringV(rv.Interface().(map[int64]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64StringV(v map[int64]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[int64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64UintR(rv reflect.Value) {
+ fastpathTV.EncMapInt64UintV(rv.Interface().(map[int64]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Uint8V(rv.Interface().(map[int64]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Uint16V(rv.Interface().(map[int64]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Uint32V(rv.Interface().(map[int64]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Uint64V(rv.Interface().(map[int64]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapInt64UintptrV(rv.Interface().(map[int64]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64IntR(rv reflect.Value) {
+ fastpathTV.EncMapInt64IntV(rv.Interface().(map[int64]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64IntV(v map[int64]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Int8R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Int8V(rv.Interface().(map[int64]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Int16R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Int16V(rv.Interface().(map[int64]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Int32R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Int32V(rv.Interface().(map[int64]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Int64R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Int64V(rv.Interface().(map[int64]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Float32R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Float32V(rv.Interface().(map[int64]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[int64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Float64R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Float64V(rv.Interface().(map[int64]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[int64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64BoolR(rv reflect.Value) {
+ fastpathTV.EncMapInt64BoolV(rv.Interface().(map[int64]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[int64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolIntfR(rv reflect.Value) {
+ fastpathTV.EncMapBoolIntfV(rv.Interface().(map[bool]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[bool(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolStringR(rv reflect.Value) {
+ fastpathTV.EncMapBoolStringV(rv.Interface().(map[bool]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolStringV(v map[bool]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[bool(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolUintR(rv reflect.Value) {
+ fastpathTV.EncMapBoolUintV(rv.Interface().(map[bool]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolUint8R(rv reflect.Value) {
+ fastpathTV.EncMapBoolUint8V(rv.Interface().(map[bool]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolUint16R(rv reflect.Value) {
+ fastpathTV.EncMapBoolUint16V(rv.Interface().(map[bool]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolUint32R(rv reflect.Value) {
+ fastpathTV.EncMapBoolUint32V(rv.Interface().(map[bool]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolUint64R(rv reflect.Value) {
+ fastpathTV.EncMapBoolUint64V(rv.Interface().(map[bool]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolUintptrR(rv reflect.Value) {
+ fastpathTV.EncMapBoolUintptrV(rv.Interface().(map[bool]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[bool(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolIntR(rv reflect.Value) {
+ fastpathTV.EncMapBoolIntV(rv.Interface().(map[bool]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolIntV(v map[bool]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolInt8R(rv reflect.Value) {
+ fastpathTV.EncMapBoolInt8V(rv.Interface().(map[bool]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolInt16R(rv reflect.Value) {
+ fastpathTV.EncMapBoolInt16V(rv.Interface().(map[bool]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolInt32R(rv reflect.Value) {
+ fastpathTV.EncMapBoolInt32V(rv.Interface().(map[bool]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolInt64R(rv reflect.Value) {
+ fastpathTV.EncMapBoolInt64V(rv.Interface().(map[bool]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolFloat32R(rv reflect.Value) {
+ fastpathTV.EncMapBoolFloat32V(rv.Interface().(map[bool]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[bool(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolFloat64R(rv reflect.Value) {
+ fastpathTV.EncMapBoolFloat64V(rv.Interface().(map[bool]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[bool(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolBoolR(rv reflect.Value) {
+ fastpathTV.EncMapBoolBoolV(rv.Interface().(map[bool]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[bool(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+// -- decode
+
+// -- -- fast path type switch
+func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
+ switch v := iv.(type) {
+
+ case []interface{}:
+ fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, d)
+ case *[]interface{}:
+ v2, changed2 := fastpathTV.DecSliceIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]interface{}:
+ fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]interface{}:
+ v2, changed2 := fastpathTV.DecMapIntfIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]string:
+ fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]string:
+ v2, changed2 := fastpathTV.DecMapIntfStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]uint:
+ fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]uint:
+ v2, changed2 := fastpathTV.DecMapIntfUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]uint8:
+ fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]uint8:
+ v2, changed2 := fastpathTV.DecMapIntfUint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]uint16:
+ fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]uint16:
+ v2, changed2 := fastpathTV.DecMapIntfUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]uint32:
+ fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]uint32:
+ v2, changed2 := fastpathTV.DecMapIntfUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]uint64:
+ fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]uint64:
+ v2, changed2 := fastpathTV.DecMapIntfUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]uintptr:
+ fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]uintptr:
+ v2, changed2 := fastpathTV.DecMapIntfUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]int:
+ fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]int:
+ v2, changed2 := fastpathTV.DecMapIntfIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]int8:
+ fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]int8:
+ v2, changed2 := fastpathTV.DecMapIntfInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]int16:
+ fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]int16:
+ v2, changed2 := fastpathTV.DecMapIntfInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]int32:
+ fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]int32:
+ v2, changed2 := fastpathTV.DecMapIntfInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]int64:
+ fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]int64:
+ v2, changed2 := fastpathTV.DecMapIntfInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]float32:
+ fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]float32:
+ v2, changed2 := fastpathTV.DecMapIntfFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]float64:
+ fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]float64:
+ v2, changed2 := fastpathTV.DecMapIntfFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]bool:
+ fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]bool:
+ v2, changed2 := fastpathTV.DecMapIntfBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []string:
+ fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, d)
+ case *[]string:
+ v2, changed2 := fastpathTV.DecSliceStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]interface{}:
+ fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[string]interface{}:
+ v2, changed2 := fastpathTV.DecMapStringIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]string:
+ fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, d)
+ case *map[string]string:
+ v2, changed2 := fastpathTV.DecMapStringStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]uint:
+ fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, d)
+ case *map[string]uint:
+ v2, changed2 := fastpathTV.DecMapStringUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]uint8:
+ fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]uint8:
+ v2, changed2 := fastpathTV.DecMapStringUint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]uint16:
+ fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]uint16:
+ v2, changed2 := fastpathTV.DecMapStringUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]uint32:
+ fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]uint32:
+ v2, changed2 := fastpathTV.DecMapStringUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]uint64:
+ fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]uint64:
+ v2, changed2 := fastpathTV.DecMapStringUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]uintptr:
+ fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[string]uintptr:
+ v2, changed2 := fastpathTV.DecMapStringUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]int:
+ fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, d)
+ case *map[string]int:
+ v2, changed2 := fastpathTV.DecMapStringIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]int8:
+ fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]int8:
+ v2, changed2 := fastpathTV.DecMapStringInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]int16:
+ fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]int16:
+ v2, changed2 := fastpathTV.DecMapStringInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]int32:
+ fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]int32:
+ v2, changed2 := fastpathTV.DecMapStringInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]int64:
+ fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]int64:
+ v2, changed2 := fastpathTV.DecMapStringInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]float32:
+ fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]float32:
+ v2, changed2 := fastpathTV.DecMapStringFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]float64:
+ fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]float64:
+ v2, changed2 := fastpathTV.DecMapStringFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]bool:
+ fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[string]bool:
+ v2, changed2 := fastpathTV.DecMapStringBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []float32:
+ fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *[]float32:
+ v2, changed2 := fastpathTV.DecSliceFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]interface{}:
+ fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]interface{}:
+ v2, changed2 := fastpathTV.DecMapFloat32IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]string:
+ fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]string:
+ v2, changed2 := fastpathTV.DecMapFloat32StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]uint:
+ fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]uint:
+ v2, changed2 := fastpathTV.DecMapFloat32UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]uint8:
+ fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]uint8:
+ v2, changed2 := fastpathTV.DecMapFloat32Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]uint16:
+ fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]uint16:
+ v2, changed2 := fastpathTV.DecMapFloat32Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]uint32:
+ fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]uint32:
+ v2, changed2 := fastpathTV.DecMapFloat32Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]uint64:
+ fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]uint64:
+ v2, changed2 := fastpathTV.DecMapFloat32Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]uintptr:
+ fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]uintptr:
+ v2, changed2 := fastpathTV.DecMapFloat32UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]int:
+ fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]int:
+ v2, changed2 := fastpathTV.DecMapFloat32IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]int8:
+ fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]int8:
+ v2, changed2 := fastpathTV.DecMapFloat32Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]int16:
+ fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]int16:
+ v2, changed2 := fastpathTV.DecMapFloat32Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]int32:
+ fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]int32:
+ v2, changed2 := fastpathTV.DecMapFloat32Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]int64:
+ fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]int64:
+ v2, changed2 := fastpathTV.DecMapFloat32Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]float32:
+ fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]float32:
+ v2, changed2 := fastpathTV.DecMapFloat32Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]float64:
+ fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]float64:
+ v2, changed2 := fastpathTV.DecMapFloat32Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]bool:
+ fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]bool:
+ v2, changed2 := fastpathTV.DecMapFloat32BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []float64:
+ fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *[]float64:
+ v2, changed2 := fastpathTV.DecSliceFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]interface{}:
+ fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]interface{}:
+ v2, changed2 := fastpathTV.DecMapFloat64IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]string:
+ fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]string:
+ v2, changed2 := fastpathTV.DecMapFloat64StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]uint:
+ fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]uint:
+ v2, changed2 := fastpathTV.DecMapFloat64UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]uint8:
+ fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]uint8:
+ v2, changed2 := fastpathTV.DecMapFloat64Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]uint16:
+ fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]uint16:
+ v2, changed2 := fastpathTV.DecMapFloat64Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]uint32:
+ fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]uint32:
+ v2, changed2 := fastpathTV.DecMapFloat64Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]uint64:
+ fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]uint64:
+ v2, changed2 := fastpathTV.DecMapFloat64Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]uintptr:
+ fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]uintptr:
+ v2, changed2 := fastpathTV.DecMapFloat64UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]int:
+ fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]int:
+ v2, changed2 := fastpathTV.DecMapFloat64IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]int8:
+ fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]int8:
+ v2, changed2 := fastpathTV.DecMapFloat64Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]int16:
+ fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]int16:
+ v2, changed2 := fastpathTV.DecMapFloat64Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]int32:
+ fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]int32:
+ v2, changed2 := fastpathTV.DecMapFloat64Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]int64:
+ fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]int64:
+ v2, changed2 := fastpathTV.DecMapFloat64Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]float32:
+ fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]float32:
+ v2, changed2 := fastpathTV.DecMapFloat64Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]float64:
+ fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]float64:
+ v2, changed2 := fastpathTV.DecMapFloat64Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]bool:
+ fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]bool:
+ v2, changed2 := fastpathTV.DecMapFloat64BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []uint:
+ fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, d)
+ case *[]uint:
+ v2, changed2 := fastpathTV.DecSliceUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]interface{}:
+ fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]interface{}:
+ v2, changed2 := fastpathTV.DecMapUintIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]string:
+ fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]string:
+ v2, changed2 := fastpathTV.DecMapUintStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]uint:
+ fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]uint:
+ v2, changed2 := fastpathTV.DecMapUintUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]uint8:
+ fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]uint8:
+ v2, changed2 := fastpathTV.DecMapUintUint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]uint16:
+ fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]uint16:
+ v2, changed2 := fastpathTV.DecMapUintUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]uint32:
+ fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]uint32:
+ v2, changed2 := fastpathTV.DecMapUintUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]uint64:
+ fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]uint64:
+ v2, changed2 := fastpathTV.DecMapUintUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]uintptr:
+ fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]uintptr:
+ v2, changed2 := fastpathTV.DecMapUintUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]int:
+ fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]int:
+ v2, changed2 := fastpathTV.DecMapUintIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]int8:
+ fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]int8:
+ v2, changed2 := fastpathTV.DecMapUintInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]int16:
+ fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]int16:
+ v2, changed2 := fastpathTV.DecMapUintInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]int32:
+ fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]int32:
+ v2, changed2 := fastpathTV.DecMapUintInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]int64:
+ fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]int64:
+ v2, changed2 := fastpathTV.DecMapUintInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]float32:
+ fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]float32:
+ v2, changed2 := fastpathTV.DecMapUintFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]float64:
+ fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]float64:
+ v2, changed2 := fastpathTV.DecMapUintFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]bool:
+ fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]bool:
+ v2, changed2 := fastpathTV.DecMapUintBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]interface{}:
+ fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]interface{}:
+ v2, changed2 := fastpathTV.DecMapUint8IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]string:
+ fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]string:
+ v2, changed2 := fastpathTV.DecMapUint8StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]uint:
+ fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]uint:
+ v2, changed2 := fastpathTV.DecMapUint8UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]uint8:
+ fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]uint8:
+ v2, changed2 := fastpathTV.DecMapUint8Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]uint16:
+ fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]uint16:
+ v2, changed2 := fastpathTV.DecMapUint8Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]uint32:
+ fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]uint32:
+ v2, changed2 := fastpathTV.DecMapUint8Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]uint64:
+ fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]uint64:
+ v2, changed2 := fastpathTV.DecMapUint8Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]uintptr:
+ fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]uintptr:
+ v2, changed2 := fastpathTV.DecMapUint8UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]int:
+ fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]int:
+ v2, changed2 := fastpathTV.DecMapUint8IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]int8:
+ fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]int8:
+ v2, changed2 := fastpathTV.DecMapUint8Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]int16:
+ fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]int16:
+ v2, changed2 := fastpathTV.DecMapUint8Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]int32:
+ fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]int32:
+ v2, changed2 := fastpathTV.DecMapUint8Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]int64:
+ fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]int64:
+ v2, changed2 := fastpathTV.DecMapUint8Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]float32:
+ fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]float32:
+ v2, changed2 := fastpathTV.DecMapUint8Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]float64:
+ fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]float64:
+ v2, changed2 := fastpathTV.DecMapUint8Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]bool:
+ fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]bool:
+ v2, changed2 := fastpathTV.DecMapUint8BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []uint16:
+ fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, d)
+ case *[]uint16:
+ v2, changed2 := fastpathTV.DecSliceUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]interface{}:
+ fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]interface{}:
+ v2, changed2 := fastpathTV.DecMapUint16IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]string:
+ fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]string:
+ v2, changed2 := fastpathTV.DecMapUint16StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]uint:
+ fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]uint:
+ v2, changed2 := fastpathTV.DecMapUint16UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]uint8:
+ fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]uint8:
+ v2, changed2 := fastpathTV.DecMapUint16Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]uint16:
+ fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]uint16:
+ v2, changed2 := fastpathTV.DecMapUint16Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]uint32:
+ fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]uint32:
+ v2, changed2 := fastpathTV.DecMapUint16Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]uint64:
+ fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]uint64:
+ v2, changed2 := fastpathTV.DecMapUint16Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]uintptr:
+ fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]uintptr:
+ v2, changed2 := fastpathTV.DecMapUint16UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]int:
+ fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]int:
+ v2, changed2 := fastpathTV.DecMapUint16IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]int8:
+ fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]int8:
+ v2, changed2 := fastpathTV.DecMapUint16Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]int16:
+ fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]int16:
+ v2, changed2 := fastpathTV.DecMapUint16Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]int32:
+ fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]int32:
+ v2, changed2 := fastpathTV.DecMapUint16Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]int64:
+ fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]int64:
+ v2, changed2 := fastpathTV.DecMapUint16Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]float32:
+ fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]float32:
+ v2, changed2 := fastpathTV.DecMapUint16Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]float64:
+ fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]float64:
+ v2, changed2 := fastpathTV.DecMapUint16Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]bool:
+ fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]bool:
+ v2, changed2 := fastpathTV.DecMapUint16BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []uint32:
+ fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, d)
+ case *[]uint32:
+ v2, changed2 := fastpathTV.DecSliceUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]interface{}:
+ fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]interface{}:
+ v2, changed2 := fastpathTV.DecMapUint32IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]string:
+ fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]string:
+ v2, changed2 := fastpathTV.DecMapUint32StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]uint:
+ fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]uint:
+ v2, changed2 := fastpathTV.DecMapUint32UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]uint8:
+ fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]uint8:
+ v2, changed2 := fastpathTV.DecMapUint32Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]uint16:
+ fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]uint16:
+ v2, changed2 := fastpathTV.DecMapUint32Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]uint32:
+ fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]uint32:
+ v2, changed2 := fastpathTV.DecMapUint32Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]uint64:
+ fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]uint64:
+ v2, changed2 := fastpathTV.DecMapUint32Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]uintptr:
+ fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]uintptr:
+ v2, changed2 := fastpathTV.DecMapUint32UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]int:
+ fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]int:
+ v2, changed2 := fastpathTV.DecMapUint32IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]int8:
+ fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]int8:
+ v2, changed2 := fastpathTV.DecMapUint32Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]int16:
+ fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]int16:
+ v2, changed2 := fastpathTV.DecMapUint32Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]int32:
+ fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]int32:
+ v2, changed2 := fastpathTV.DecMapUint32Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]int64:
+ fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]int64:
+ v2, changed2 := fastpathTV.DecMapUint32Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]float32:
+ fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]float32:
+ v2, changed2 := fastpathTV.DecMapUint32Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]float64:
+ fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]float64:
+ v2, changed2 := fastpathTV.DecMapUint32Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]bool:
+ fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]bool:
+ v2, changed2 := fastpathTV.DecMapUint32BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []uint64:
+ fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, d)
+ case *[]uint64:
+ v2, changed2 := fastpathTV.DecSliceUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]interface{}:
+ fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]interface{}:
+ v2, changed2 := fastpathTV.DecMapUint64IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]string:
+ fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]string:
+ v2, changed2 := fastpathTV.DecMapUint64StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]uint:
+ fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]uint:
+ v2, changed2 := fastpathTV.DecMapUint64UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]uint8:
+ fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]uint8:
+ v2, changed2 := fastpathTV.DecMapUint64Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]uint16:
+ fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]uint16:
+ v2, changed2 := fastpathTV.DecMapUint64Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]uint32:
+ fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]uint32:
+ v2, changed2 := fastpathTV.DecMapUint64Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]uint64:
+ fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]uint64:
+ v2, changed2 := fastpathTV.DecMapUint64Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]uintptr:
+ fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]uintptr:
+ v2, changed2 := fastpathTV.DecMapUint64UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]int:
+ fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]int:
+ v2, changed2 := fastpathTV.DecMapUint64IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]int8:
+ fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]int8:
+ v2, changed2 := fastpathTV.DecMapUint64Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]int16:
+ fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]int16:
+ v2, changed2 := fastpathTV.DecMapUint64Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]int32:
+ fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]int32:
+ v2, changed2 := fastpathTV.DecMapUint64Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]int64:
+ fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]int64:
+ v2, changed2 := fastpathTV.DecMapUint64Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]float32:
+ fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]float32:
+ v2, changed2 := fastpathTV.DecMapUint64Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]float64:
+ fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]float64:
+ v2, changed2 := fastpathTV.DecMapUint64Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]bool:
+ fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]bool:
+ v2, changed2 := fastpathTV.DecMapUint64BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []uintptr:
+ fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *[]uintptr:
+ v2, changed2 := fastpathTV.DecSliceUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]interface{}:
+ fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]interface{}:
+ v2, changed2 := fastpathTV.DecMapUintptrIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]string:
+ fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]string:
+ v2, changed2 := fastpathTV.DecMapUintptrStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]uint:
+ fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]uint:
+ v2, changed2 := fastpathTV.DecMapUintptrUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]uint8:
+ fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]uint8:
+ v2, changed2 := fastpathTV.DecMapUintptrUint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]uint16:
+ fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]uint16:
+ v2, changed2 := fastpathTV.DecMapUintptrUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]uint32:
+ fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]uint32:
+ v2, changed2 := fastpathTV.DecMapUintptrUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]uint64:
+ fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]uint64:
+ v2, changed2 := fastpathTV.DecMapUintptrUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]uintptr:
+ fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]uintptr:
+ v2, changed2 := fastpathTV.DecMapUintptrUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]int:
+ fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]int:
+ v2, changed2 := fastpathTV.DecMapUintptrIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]int8:
+ fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]int8:
+ v2, changed2 := fastpathTV.DecMapUintptrInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]int16:
+ fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]int16:
+ v2, changed2 := fastpathTV.DecMapUintptrInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]int32:
+ fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]int32:
+ v2, changed2 := fastpathTV.DecMapUintptrInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]int64:
+ fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]int64:
+ v2, changed2 := fastpathTV.DecMapUintptrInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]float32:
+ fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]float32:
+ v2, changed2 := fastpathTV.DecMapUintptrFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]float64:
+ fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]float64:
+ v2, changed2 := fastpathTV.DecMapUintptrFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]bool:
+ fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]bool:
+ v2, changed2 := fastpathTV.DecMapUintptrBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []int:
+ fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, d)
+ case *[]int:
+ v2, changed2 := fastpathTV.DecSliceIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]interface{}:
+ fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[int]interface{}:
+ v2, changed2 := fastpathTV.DecMapIntIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]string:
+ fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, d)
+ case *map[int]string:
+ v2, changed2 := fastpathTV.DecMapIntStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]uint:
+ fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, d)
+ case *map[int]uint:
+ v2, changed2 := fastpathTV.DecMapIntUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]uint8:
+ fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]uint8:
+ v2, changed2 := fastpathTV.DecMapIntUint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]uint16:
+ fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]uint16:
+ v2, changed2 := fastpathTV.DecMapIntUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]uint32:
+ fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]uint32:
+ v2, changed2 := fastpathTV.DecMapIntUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]uint64:
+ fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]uint64:
+ v2, changed2 := fastpathTV.DecMapIntUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]uintptr:
+ fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[int]uintptr:
+ v2, changed2 := fastpathTV.DecMapIntUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]int:
+ fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, d)
+ case *map[int]int:
+ v2, changed2 := fastpathTV.DecMapIntIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]int8:
+ fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]int8:
+ v2, changed2 := fastpathTV.DecMapIntInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]int16:
+ fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]int16:
+ v2, changed2 := fastpathTV.DecMapIntInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]int32:
+ fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]int32:
+ v2, changed2 := fastpathTV.DecMapIntInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]int64:
+ fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]int64:
+ v2, changed2 := fastpathTV.DecMapIntInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]float32:
+ fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]float32:
+ v2, changed2 := fastpathTV.DecMapIntFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]float64:
+ fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]float64:
+ v2, changed2 := fastpathTV.DecMapIntFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]bool:
+ fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[int]bool:
+ v2, changed2 := fastpathTV.DecMapIntBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []int8:
+ fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, d)
+ case *[]int8:
+ v2, changed2 := fastpathTV.DecSliceInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]interface{}:
+ fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]interface{}:
+ v2, changed2 := fastpathTV.DecMapInt8IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]string:
+ fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]string:
+ v2, changed2 := fastpathTV.DecMapInt8StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]uint:
+ fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]uint:
+ v2, changed2 := fastpathTV.DecMapInt8UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]uint8:
+ fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]uint8:
+ v2, changed2 := fastpathTV.DecMapInt8Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]uint16:
+ fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]uint16:
+ v2, changed2 := fastpathTV.DecMapInt8Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]uint32:
+ fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]uint32:
+ v2, changed2 := fastpathTV.DecMapInt8Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]uint64:
+ fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]uint64:
+ v2, changed2 := fastpathTV.DecMapInt8Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]uintptr:
+ fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]uintptr:
+ v2, changed2 := fastpathTV.DecMapInt8UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]int:
+ fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]int:
+ v2, changed2 := fastpathTV.DecMapInt8IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]int8:
+ fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]int8:
+ v2, changed2 := fastpathTV.DecMapInt8Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]int16:
+ fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]int16:
+ v2, changed2 := fastpathTV.DecMapInt8Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]int32:
+ fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]int32:
+ v2, changed2 := fastpathTV.DecMapInt8Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]int64:
+ fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]int64:
+ v2, changed2 := fastpathTV.DecMapInt8Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]float32:
+ fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]float32:
+ v2, changed2 := fastpathTV.DecMapInt8Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]float64:
+ fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]float64:
+ v2, changed2 := fastpathTV.DecMapInt8Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]bool:
+ fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]bool:
+ v2, changed2 := fastpathTV.DecMapInt8BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []int16:
+ fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, d)
+ case *[]int16:
+ v2, changed2 := fastpathTV.DecSliceInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]interface{}:
+ fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]interface{}:
+ v2, changed2 := fastpathTV.DecMapInt16IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]string:
+ fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]string:
+ v2, changed2 := fastpathTV.DecMapInt16StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]uint:
+ fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]uint:
+ v2, changed2 := fastpathTV.DecMapInt16UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]uint8:
+ fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]uint8:
+ v2, changed2 := fastpathTV.DecMapInt16Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]uint16:
+ fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]uint16:
+ v2, changed2 := fastpathTV.DecMapInt16Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]uint32:
+ fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]uint32:
+ v2, changed2 := fastpathTV.DecMapInt16Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]uint64:
+ fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]uint64:
+ v2, changed2 := fastpathTV.DecMapInt16Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]uintptr:
+ fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]uintptr:
+ v2, changed2 := fastpathTV.DecMapInt16UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]int:
+ fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]int:
+ v2, changed2 := fastpathTV.DecMapInt16IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]int8:
+ fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]int8:
+ v2, changed2 := fastpathTV.DecMapInt16Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]int16:
+ fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]int16:
+ v2, changed2 := fastpathTV.DecMapInt16Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]int32:
+ fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]int32:
+ v2, changed2 := fastpathTV.DecMapInt16Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]int64:
+ fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]int64:
+ v2, changed2 := fastpathTV.DecMapInt16Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]float32:
+ fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]float32:
+ v2, changed2 := fastpathTV.DecMapInt16Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]float64:
+ fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]float64:
+ v2, changed2 := fastpathTV.DecMapInt16Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]bool:
+ fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]bool:
+ v2, changed2 := fastpathTV.DecMapInt16BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []int32:
+ fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, d)
+ case *[]int32:
+ v2, changed2 := fastpathTV.DecSliceInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]interface{}:
+ fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]interface{}:
+ v2, changed2 := fastpathTV.DecMapInt32IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]string:
+ fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]string:
+ v2, changed2 := fastpathTV.DecMapInt32StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]uint:
+ fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]uint:
+ v2, changed2 := fastpathTV.DecMapInt32UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]uint8:
+ fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]uint8:
+ v2, changed2 := fastpathTV.DecMapInt32Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]uint16:
+ fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]uint16:
+ v2, changed2 := fastpathTV.DecMapInt32Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]uint32:
+ fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]uint32:
+ v2, changed2 := fastpathTV.DecMapInt32Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]uint64:
+ fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]uint64:
+ v2, changed2 := fastpathTV.DecMapInt32Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]uintptr:
+ fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]uintptr:
+ v2, changed2 := fastpathTV.DecMapInt32UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]int:
+ fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]int:
+ v2, changed2 := fastpathTV.DecMapInt32IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]int8:
+ fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]int8:
+ v2, changed2 := fastpathTV.DecMapInt32Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]int16:
+ fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]int16:
+ v2, changed2 := fastpathTV.DecMapInt32Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]int32:
+ fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]int32:
+ v2, changed2 := fastpathTV.DecMapInt32Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]int64:
+ fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]int64:
+ v2, changed2 := fastpathTV.DecMapInt32Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]float32:
+ fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]float32:
+ v2, changed2 := fastpathTV.DecMapInt32Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]float64:
+ fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]float64:
+ v2, changed2 := fastpathTV.DecMapInt32Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]bool:
+ fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]bool:
+ v2, changed2 := fastpathTV.DecMapInt32BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []int64:
+ fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, d)
+ case *[]int64:
+ v2, changed2 := fastpathTV.DecSliceInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]interface{}:
+ fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]interface{}:
+ v2, changed2 := fastpathTV.DecMapInt64IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]string:
+ fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]string:
+ v2, changed2 := fastpathTV.DecMapInt64StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]uint:
+ fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]uint:
+ v2, changed2 := fastpathTV.DecMapInt64UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]uint8:
+ fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]uint8:
+ v2, changed2 := fastpathTV.DecMapInt64Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]uint16:
+ fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]uint16:
+ v2, changed2 := fastpathTV.DecMapInt64Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]uint32:
+ fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]uint32:
+ v2, changed2 := fastpathTV.DecMapInt64Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]uint64:
+ fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]uint64:
+ v2, changed2 := fastpathTV.DecMapInt64Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]uintptr:
+ fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]uintptr:
+ v2, changed2 := fastpathTV.DecMapInt64UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]int:
+ fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]int:
+ v2, changed2 := fastpathTV.DecMapInt64IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]int8:
+ fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]int8:
+ v2, changed2 := fastpathTV.DecMapInt64Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]int16:
+ fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]int16:
+ v2, changed2 := fastpathTV.DecMapInt64Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]int32:
+ fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]int32:
+ v2, changed2 := fastpathTV.DecMapInt64Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]int64:
+ fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]int64:
+ v2, changed2 := fastpathTV.DecMapInt64Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]float32:
+ fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]float32:
+ v2, changed2 := fastpathTV.DecMapInt64Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]float64:
+ fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]float64:
+ v2, changed2 := fastpathTV.DecMapInt64Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]bool:
+ fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]bool:
+ v2, changed2 := fastpathTV.DecMapInt64BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []bool:
+ fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, d)
+ case *[]bool:
+ v2, changed2 := fastpathTV.DecSliceBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]interface{}:
+ fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]interface{}:
+ v2, changed2 := fastpathTV.DecMapBoolIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]string:
+ fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]string:
+ v2, changed2 := fastpathTV.DecMapBoolStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]uint:
+ fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]uint:
+ v2, changed2 := fastpathTV.DecMapBoolUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]uint8:
+ fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]uint8:
+ v2, changed2 := fastpathTV.DecMapBoolUint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]uint16:
+ fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]uint16:
+ v2, changed2 := fastpathTV.DecMapBoolUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]uint32:
+ fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]uint32:
+ v2, changed2 := fastpathTV.DecMapBoolUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]uint64:
+ fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]uint64:
+ v2, changed2 := fastpathTV.DecMapBoolUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]uintptr:
+ fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]uintptr:
+ v2, changed2 := fastpathTV.DecMapBoolUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]int:
+ fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]int:
+ v2, changed2 := fastpathTV.DecMapBoolIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]int8:
+ fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]int8:
+ v2, changed2 := fastpathTV.DecMapBoolInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]int16:
+ fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]int16:
+ v2, changed2 := fastpathTV.DecMapBoolInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]int32:
+ fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]int32:
+ v2, changed2 := fastpathTV.DecMapBoolInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]int64:
+ fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]int64:
+ v2, changed2 := fastpathTV.DecMapBoolInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]float32:
+ fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]float32:
+ v2, changed2 := fastpathTV.DecMapBoolFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]float64:
+ fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]float64:
+ v2, changed2 := fastpathTV.DecMapBoolFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]bool:
+ fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]bool:
+ v2, changed2 := fastpathTV.DecMapBoolBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+// -- -- fast path functions
+
+func (f *decFnInfo) fastpathDecSliceIntfR(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]interface{})
+ v, changed := fastpathTV.DecSliceIntfV(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]interface{})
+ fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceIntfX(vp *[]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool, d *Decoder) (_ []interface{}, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []interface{}{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]interface{}, xlen)
+ }
+ } else {
+ v = make([]interface{}, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ d.decode(&v[j])
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, nil)
+ slh.ElemContainerState(j)
+ d.decode(&v[j])
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []interface{}{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]interface{}, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, nil)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ d.decode(&v[j])
+
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceStringR(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]string)
+ v, changed := fastpathTV.DecSliceStringV(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]string)
+ fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceStringX(vp *[]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool, d *Decoder) (_ []string, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []string{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]string, xlen)
+ }
+ } else {
+ v = make([]string, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeString()
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, "")
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeString()
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []string{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]string, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, "")
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = dd.DecodeString()
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceFloat32R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]float32)
+ v, changed := fastpathTV.DecSliceFloat32V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]float32)
+ fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceFloat32X(vp *[]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool, d *Decoder) (_ []float32, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []float32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]float32, xlen)
+ }
+ } else {
+ v = make([]float32, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = float32(dd.DecodeFloat(true))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = float32(dd.DecodeFloat(true))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []float32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]float32, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = float32(dd.DecodeFloat(true))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceFloat64R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]float64)
+ v, changed := fastpathTV.DecSliceFloat64V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]float64)
+ fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceFloat64X(vp *[]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool, d *Decoder) (_ []float64, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []float64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]float64, xlen)
+ }
+ } else {
+ v = make([]float64, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeFloat(false)
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeFloat(false)
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []float64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]float64, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = dd.DecodeFloat(false)
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceUintR(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]uint)
+ v, changed := fastpathTV.DecSliceUintV(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]uint)
+ fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceUintX(vp *[]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool, d *Decoder) (_ []uint, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uint{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint, xlen)
+ }
+ } else {
+ v = make([]uint, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = uint(dd.DecodeUint(uintBitsize))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = uint(dd.DecodeUint(uintBitsize))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uint{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]uint, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = uint(dd.DecodeUint(uintBitsize))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceUint16R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]uint16)
+ v, changed := fastpathTV.DecSliceUint16V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]uint16)
+ fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceUint16X(vp *[]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool, d *Decoder) (_ []uint16, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uint16{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint16, xlen)
+ }
+ } else {
+ v = make([]uint16, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = uint16(dd.DecodeUint(16))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = uint16(dd.DecodeUint(16))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uint16{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]uint16, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = uint16(dd.DecodeUint(16))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceUint32R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]uint32)
+ v, changed := fastpathTV.DecSliceUint32V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]uint32)
+ fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceUint32X(vp *[]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool, d *Decoder) (_ []uint32, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uint32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint32, xlen)
+ }
+ } else {
+ v = make([]uint32, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = uint32(dd.DecodeUint(32))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = uint32(dd.DecodeUint(32))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uint32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]uint32, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = uint32(dd.DecodeUint(32))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceUint64R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]uint64)
+ v, changed := fastpathTV.DecSliceUint64V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]uint64)
+ fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceUint64X(vp *[]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool, d *Decoder) (_ []uint64, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uint64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint64, xlen)
+ }
+ } else {
+ v = make([]uint64, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeUint(64)
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeUint(64)
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uint64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]uint64, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = dd.DecodeUint(64)
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceUintptrR(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]uintptr)
+ v, changed := fastpathTV.DecSliceUintptrV(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]uintptr)
+ fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool, d *Decoder) (_ []uintptr, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uintptr{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uintptr, xlen)
+ }
+ } else {
+ v = make([]uintptr, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = uintptr(dd.DecodeUint(uintBitsize))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = uintptr(dd.DecodeUint(uintBitsize))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uintptr{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]uintptr, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = uintptr(dd.DecodeUint(uintBitsize))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceIntR(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]int)
+ v, changed := fastpathTV.DecSliceIntV(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]int)
+ fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceIntX(vp *[]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool, d *Decoder) (_ []int, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int, xlen)
+ }
+ } else {
+ v = make([]int, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = int(dd.DecodeInt(intBitsize))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = int(dd.DecodeInt(intBitsize))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]int, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = int(dd.DecodeInt(intBitsize))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceInt8R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]int8)
+ v, changed := fastpathTV.DecSliceInt8V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]int8)
+ fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceInt8X(vp *[]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool, d *Decoder) (_ []int8, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int8{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int8, xlen)
+ }
+ } else {
+ v = make([]int8, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = int8(dd.DecodeInt(8))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = int8(dd.DecodeInt(8))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int8{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]int8, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = int8(dd.DecodeInt(8))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceInt16R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]int16)
+ v, changed := fastpathTV.DecSliceInt16V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]int16)
+ fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceInt16X(vp *[]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool, d *Decoder) (_ []int16, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int16{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int16, xlen)
+ }
+ } else {
+ v = make([]int16, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = int16(dd.DecodeInt(16))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = int16(dd.DecodeInt(16))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int16{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]int16, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = int16(dd.DecodeInt(16))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceInt32R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]int32)
+ v, changed := fastpathTV.DecSliceInt32V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]int32)
+ fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceInt32X(vp *[]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool, d *Decoder) (_ []int32, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int32, xlen)
+ }
+ } else {
+ v = make([]int32, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = int32(dd.DecodeInt(32))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = int32(dd.DecodeInt(32))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]int32, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = int32(dd.DecodeInt(32))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceInt64R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]int64)
+ v, changed := fastpathTV.DecSliceInt64V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]int64)
+ fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceInt64X(vp *[]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool, d *Decoder) (_ []int64, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int64, xlen)
+ }
+ } else {
+ v = make([]int64, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeInt(64)
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeInt(64)
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]int64, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = dd.DecodeInt(64)
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceBoolR(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]bool)
+ v, changed := fastpathTV.DecSliceBoolV(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]bool)
+ fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceBoolX(vp *[]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool, d *Decoder) (_ []bool, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []bool{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]bool, xlen)
+ }
+ } else {
+ v = make([]bool, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeBool()
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, false)
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeBool()
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []bool{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]bool, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, false)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = dd.DecodeBool()
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfIntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]interface{})
+ v, changed := fastpathTV.DecMapIntfIntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]interface{})
+ fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32)
+ v = make(map[interface{}]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk interface{}
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfStringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]string)
+ v, changed := fastpathTV.DecMapIntfStringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]string)
+ fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32)
+ v = make(map[interface{}]string, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfUintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]uint)
+ v, changed := fastpathTV.DecMapIntfUintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]uint)
+ fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]uint, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfUint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]uint8)
+ v, changed := fastpathTV.DecMapIntfUint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]uint8)
+ fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfUint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[interface{}]uint8, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfUint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]uint16)
+ v, changed := fastpathTV.DecMapIntfUint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]uint16)
+ fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[interface{}]uint16, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfUint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]uint32)
+ v, changed := fastpathTV.DecMapIntfUint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]uint32)
+ fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[interface{}]uint32, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfUint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]uint64)
+ v, changed := fastpathTV.DecMapIntfUint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]uint64)
+ fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]uint64, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfUintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]uintptr)
+ v, changed := fastpathTV.DecMapIntfUintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]uintptr)
+ fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]uintptr, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfIntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]int)
+ v, changed := fastpathTV.DecMapIntfIntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]int)
+ fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]int, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfInt8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]int8)
+ v, changed := fastpathTV.DecMapIntfInt8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]int8)
+ fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[interface{}]int8, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfInt16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]int16)
+ v, changed := fastpathTV.DecMapIntfInt16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]int16)
+ fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[interface{}]int16, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfInt32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]int32)
+ v, changed := fastpathTV.DecMapIntfInt32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]int32)
+ fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[interface{}]int32, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfInt64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]int64)
+ v, changed := fastpathTV.DecMapIntfInt64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]int64)
+ fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]int64, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfFloat32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]float32)
+ v, changed := fastpathTV.DecMapIntfFloat32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]float32)
+ fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[interface{}]float32, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfFloat64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]float64)
+ v, changed := fastpathTV.DecMapIntfFloat64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]float64)
+ fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]float64, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfBoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]bool)
+ v, changed := fastpathTV.DecMapIntfBoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]bool)
+ fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[interface{}]bool, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringIntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]interface{})
+ v, changed := fastpathTV.DecMapStringIntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]interface{})
+ fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32)
+ v = make(map[string]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk string
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringStringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]string)
+ v, changed := fastpathTV.DecMapStringStringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]string)
+ fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringStringX(vp *map[string]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringStringV(v map[string]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32)
+ v = make(map[string]string, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringUintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]uint)
+ v, changed := fastpathTV.DecMapStringUintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]uint)
+ fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringUintX(vp *map[string]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUintV(v map[string]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]uint, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringUint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]uint8)
+ v, changed := fastpathTV.DecMapStringUint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]uint8)
+ fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringUint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[string]uint8, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringUint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]uint16)
+ v, changed := fastpathTV.DecMapStringUint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]uint16)
+ fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[string]uint16, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringUint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]uint32)
+ v, changed := fastpathTV.DecMapStringUint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]uint32)
+ fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[string]uint32, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringUint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]uint64)
+ v, changed := fastpathTV.DecMapStringUint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]uint64)
+ fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]uint64, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringUintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]uintptr)
+ v, changed := fastpathTV.DecMapStringUintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]uintptr)
+ fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]uintptr, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringIntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]int)
+ v, changed := fastpathTV.DecMapStringIntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]int)
+ fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringIntX(vp *map[string]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringIntV(v map[string]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]int, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringInt8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]int8)
+ v, changed := fastpathTV.DecMapStringInt8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]int8)
+ fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringInt8V(v map[string]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[string]int8, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringInt16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]int16)
+ v, changed := fastpathTV.DecMapStringInt16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]int16)
+ fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringInt16V(v map[string]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[string]int16, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringInt32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]int32)
+ v, changed := fastpathTV.DecMapStringInt32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]int32)
+ fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringInt32V(v map[string]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[string]int32, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringInt64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]int64)
+ v, changed := fastpathTV.DecMapStringInt64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]int64)
+ fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringInt64V(v map[string]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]int64, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringFloat32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]float32)
+ v, changed := fastpathTV.DecMapStringFloat32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]float32)
+ fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[string]float32, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringFloat64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]float64)
+ v, changed := fastpathTV.DecMapStringFloat64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]float64)
+ fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]float64, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringBoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]bool)
+ v, changed := fastpathTV.DecMapStringBoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]bool)
+ fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringBoolV(v map[string]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[string]bool, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]interface{})
+ v, changed := fastpathTV.DecMapFloat32IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]interface{})
+ fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[float32]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk float32
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]string)
+ v, changed := fastpathTV.DecMapFloat32StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]string)
+ fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[float32]string, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]uint)
+ v, changed := fastpathTV.DecMapFloat32UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]uint)
+ fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]uint, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]uint8)
+ v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]uint8)
+ fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[float32]uint8, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]uint16)
+ v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]uint16)
+ fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[float32]uint16, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]uint32)
+ v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]uint32)
+ fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[float32]uint32, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]uint64)
+ v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]uint64)
+ fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]uint64, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]uintptr)
+ v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]uintptr)
+ fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]uintptr, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]int)
+ v, changed := fastpathTV.DecMapFloat32IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]int)
+ fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]int, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]int8)
+ v, changed := fastpathTV.DecMapFloat32Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]int8)
+ fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[float32]int8, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]int16)
+ v, changed := fastpathTV.DecMapFloat32Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]int16)
+ fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[float32]int16, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]int32)
+ v, changed := fastpathTV.DecMapFloat32Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]int32)
+ fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[float32]int32, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]int64)
+ v, changed := fastpathTV.DecMapFloat32Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]int64)
+ fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]int64, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]float32)
+ v, changed := fastpathTV.DecMapFloat32Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]float32)
+ fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[float32]float32, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]float64)
+ v, changed := fastpathTV.DecMapFloat32Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]float64)
+ fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]float64, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]bool)
+ v, changed := fastpathTV.DecMapFloat32BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]bool)
+ fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[float32]bool, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]interface{})
+ v, changed := fastpathTV.DecMapFloat64IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]interface{})
+ fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[float64]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk float64
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]string)
+ v, changed := fastpathTV.DecMapFloat64StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]string)
+ fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[float64]string, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]uint)
+ v, changed := fastpathTV.DecMapFloat64UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]uint)
+ fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]uint, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]uint8)
+ v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]uint8)
+ fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[float64]uint8, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]uint16)
+ v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]uint16)
+ fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[float64]uint16, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]uint32)
+ v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]uint32)
+ fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float64]uint32, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]uint64)
+ v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]uint64)
+ fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]uint64, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]uintptr)
+ v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]uintptr)
+ fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]uintptr, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]int)
+ v, changed := fastpathTV.DecMapFloat64IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]int)
+ fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]int, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]int8)
+ v, changed := fastpathTV.DecMapFloat64Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]int8)
+ fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[float64]int8, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]int16)
+ v, changed := fastpathTV.DecMapFloat64Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]int16)
+ fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[float64]int16, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]int32)
+ v, changed := fastpathTV.DecMapFloat64Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]int32)
+ fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float64]int32, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]int64)
+ v, changed := fastpathTV.DecMapFloat64Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]int64)
+ fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]int64, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]float32)
+ v, changed := fastpathTV.DecMapFloat64Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]float32)
+ fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float64]float32, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]float64)
+ v, changed := fastpathTV.DecMapFloat64Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]float64)
+ fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]float64, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]bool)
+ v, changed := fastpathTV.DecMapFloat64BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]bool)
+ fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[float64]bool, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintIntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]interface{})
+ v, changed := fastpathTV.DecMapUintIntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]interface{})
+ fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uint]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintStringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]string)
+ v, changed := fastpathTV.DecMapUintStringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]string)
+ fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintStringX(vp *map[uint]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintStringV(v map[uint]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uint]string, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintUintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]uint)
+ v, changed := fastpathTV.DecMapUintUintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]uint)
+ fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUintV(v map[uint]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]uint, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintUint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]uint8)
+ v, changed := fastpathTV.DecMapUintUint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]uint8)
+ fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintUint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint]uint8, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintUint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]uint16)
+ v, changed := fastpathTV.DecMapUintUint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]uint16)
+ fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint]uint16, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintUint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]uint32)
+ v, changed := fastpathTV.DecMapUintUint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]uint32)
+ fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint]uint32, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintUint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]uint64)
+ v, changed := fastpathTV.DecMapUintUint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]uint64)
+ fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]uint64, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintUintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]uintptr)
+ v, changed := fastpathTV.DecMapUintUintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]uintptr)
+ fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]uintptr, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintIntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]int)
+ v, changed := fastpathTV.DecMapUintIntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]int)
+ fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintIntX(vp *map[uint]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintIntV(v map[uint]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]int, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintInt8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]int8)
+ v, changed := fastpathTV.DecMapUintInt8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]int8)
+ fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint]int8, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintInt16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]int16)
+ v, changed := fastpathTV.DecMapUintInt16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]int16)
+ fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint]int16, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintInt32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]int32)
+ v, changed := fastpathTV.DecMapUintInt32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]int32)
+ fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint]int32, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintInt64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]int64)
+ v, changed := fastpathTV.DecMapUintInt64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]int64)
+ fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]int64, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintFloat32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]float32)
+ v, changed := fastpathTV.DecMapUintFloat32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]float32)
+ fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint]float32, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintFloat64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]float64)
+ v, changed := fastpathTV.DecMapUintFloat64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]float64)
+ fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]float64, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintBoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]bool)
+ v, changed := fastpathTV.DecMapUintBoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]bool)
+ fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint]bool, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]interface{})
+ v, changed := fastpathTV.DecMapUint8IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]interface{})
+ fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[uint8]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint8
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]string)
+ v, changed := fastpathTV.DecMapUint8StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]string)
+ fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[uint8]string, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]uint)
+ v, changed := fastpathTV.DecMapUint8UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]uint)
+ fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]uint, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]uint8)
+ v, changed := fastpathTV.DecMapUint8Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]uint8)
+ fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[uint8]uint8, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]uint16)
+ v, changed := fastpathTV.DecMapUint8Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]uint16)
+ fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint8]uint16, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]uint32)
+ v, changed := fastpathTV.DecMapUint8Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]uint32)
+ fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint8]uint32, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]uint64)
+ v, changed := fastpathTV.DecMapUint8Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]uint64)
+ fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]uint64, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]uintptr)
+ v, changed := fastpathTV.DecMapUint8UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]uintptr)
+ fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]uintptr, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]int)
+ v, changed := fastpathTV.DecMapUint8IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]int)
+ fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]int, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]int8)
+ v, changed := fastpathTV.DecMapUint8Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]int8)
+ fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[uint8]int8, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]int16)
+ v, changed := fastpathTV.DecMapUint8Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]int16)
+ fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint8]int16, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]int32)
+ v, changed := fastpathTV.DecMapUint8Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]int32)
+ fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint8]int32, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]int64)
+ v, changed := fastpathTV.DecMapUint8Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]int64)
+ fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]int64, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]float32)
+ v, changed := fastpathTV.DecMapUint8Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]float32)
+ fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint8]float32, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]float64)
+ v, changed := fastpathTV.DecMapUint8Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]float64)
+ fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]float64, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]bool)
+ v, changed := fastpathTV.DecMapUint8BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]bool)
+ fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[uint8]bool, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]interface{})
+ v, changed := fastpathTV.DecMapUint16IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]interface{})
+ fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[uint16]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint16
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]string)
+ v, changed := fastpathTV.DecMapUint16StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]string)
+ fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[uint16]string, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]uint)
+ v, changed := fastpathTV.DecMapUint16UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]uint)
+ fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]uint, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]uint8)
+ v, changed := fastpathTV.DecMapUint16Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]uint8)
+ fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint16]uint8, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]uint16)
+ v, changed := fastpathTV.DecMapUint16Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]uint16)
+ fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4)
+ v = make(map[uint16]uint16, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]uint32)
+ v, changed := fastpathTV.DecMapUint16Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]uint32)
+ fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint16]uint32, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]uint64)
+ v, changed := fastpathTV.DecMapUint16Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]uint64)
+ fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]uint64, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]uintptr)
+ v, changed := fastpathTV.DecMapUint16UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]uintptr)
+ fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]uintptr, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]int)
+ v, changed := fastpathTV.DecMapUint16IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]int)
+ fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]int, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]int8)
+ v, changed := fastpathTV.DecMapUint16Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]int8)
+ fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint16]int8, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]int16)
+ v, changed := fastpathTV.DecMapUint16Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]int16)
+ fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4)
+ v = make(map[uint16]int16, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]int32)
+ v, changed := fastpathTV.DecMapUint16Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]int32)
+ fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint16]int32, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]int64)
+ v, changed := fastpathTV.DecMapUint16Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]int64)
+ fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]int64, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]float32)
+ v, changed := fastpathTV.DecMapUint16Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]float32)
+ fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint16]float32, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]float64)
+ v, changed := fastpathTV.DecMapUint16Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]float64)
+ fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]float64, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]bool)
+ v, changed := fastpathTV.DecMapUint16BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]bool)
+ fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint16]bool, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]interface{})
+ v, changed := fastpathTV.DecMapUint32IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]interface{})
+ fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[uint32]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint32
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]string)
+ v, changed := fastpathTV.DecMapUint32StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]string)
+ fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[uint32]string, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]uint)
+ v, changed := fastpathTV.DecMapUint32UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]uint)
+ fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]uint, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]uint8)
+ v, changed := fastpathTV.DecMapUint32Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]uint8)
+ fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint32]uint8, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]uint16)
+ v, changed := fastpathTV.DecMapUint32Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]uint16)
+ fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint32]uint16, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]uint32)
+ v, changed := fastpathTV.DecMapUint32Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]uint32)
+ fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[uint32]uint32, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]uint64)
+ v, changed := fastpathTV.DecMapUint32Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]uint64)
+ fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]uint64, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]uintptr)
+ v, changed := fastpathTV.DecMapUint32UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]uintptr)
+ fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]uintptr, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]int)
+ v, changed := fastpathTV.DecMapUint32IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]int)
+ fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]int, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]int8)
+ v, changed := fastpathTV.DecMapUint32Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]int8)
+ fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint32]int8, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]int16)
+ v, changed := fastpathTV.DecMapUint32Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]int16)
+ fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint32]int16, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]int32)
+ v, changed := fastpathTV.DecMapUint32Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]int32)
+ fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[uint32]int32, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]int64)
+ v, changed := fastpathTV.DecMapUint32Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]int64)
+ fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]int64, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]float32)
+ v, changed := fastpathTV.DecMapUint32Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]float32)
+ fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[uint32]float32, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]float64)
+ v, changed := fastpathTV.DecMapUint32Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]float64)
+ fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]float64, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]bool)
+ v, changed := fastpathTV.DecMapUint32BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]bool)
+ fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint32]bool, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]interface{})
+ v, changed := fastpathTV.DecMapUint64IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]interface{})
+ fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uint64]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint64
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]string)
+ v, changed := fastpathTV.DecMapUint64StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]string)
+ fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uint64]string, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]uint)
+ v, changed := fastpathTV.DecMapUint64UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]uint)
+ fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]uint, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]uint8)
+ v, changed := fastpathTV.DecMapUint64Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]uint8)
+ fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint64]uint8, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]uint16)
+ v, changed := fastpathTV.DecMapUint64Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]uint16)
+ fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint64]uint16, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]uint32)
+ v, changed := fastpathTV.DecMapUint64Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]uint32)
+ fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint64]uint32, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]uint64)
+ v, changed := fastpathTV.DecMapUint64Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]uint64)
+ fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]uint64, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]uintptr)
+ v, changed := fastpathTV.DecMapUint64UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]uintptr)
+ fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]uintptr, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]int)
+ v, changed := fastpathTV.DecMapUint64IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]int)
+ fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]int, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]int8)
+ v, changed := fastpathTV.DecMapUint64Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]int8)
+ fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint64]int8, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]int16)
+ v, changed := fastpathTV.DecMapUint64Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]int16)
+ fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint64]int16, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]int32)
+ v, changed := fastpathTV.DecMapUint64Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]int32)
+ fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint64]int32, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]int64)
+ v, changed := fastpathTV.DecMapUint64Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]int64)
+ fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]int64, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]float32)
+ v, changed := fastpathTV.DecMapUint64Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]float32)
+ fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint64]float32, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]float64)
+ v, changed := fastpathTV.DecMapUint64Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]float64)
+ fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]float64, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]bool)
+ v, changed := fastpathTV.DecMapUint64BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]bool)
+ fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint64]bool, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrIntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]interface{})
+ v, changed := fastpathTV.DecMapUintptrIntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]interface{})
+ fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uintptr]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uintptr
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrStringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]string)
+ v, changed := fastpathTV.DecMapUintptrStringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]string)
+ fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uintptr]string, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrUintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]uint)
+ v, changed := fastpathTV.DecMapUintptrUintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]uint)
+ fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]uint, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrUint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]uint8)
+ v, changed := fastpathTV.DecMapUintptrUint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]uint8)
+ fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrUint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uintptr]uint8, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrUint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]uint16)
+ v, changed := fastpathTV.DecMapUintptrUint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]uint16)
+ fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uintptr]uint16, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrUint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]uint32)
+ v, changed := fastpathTV.DecMapUintptrUint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]uint32)
+ fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uintptr]uint32, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrUint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]uint64)
+ v, changed := fastpathTV.DecMapUintptrUint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]uint64)
+ fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]uint64, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrUintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]uintptr)
+ v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]uintptr)
+ fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]uintptr, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrIntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]int)
+ v, changed := fastpathTV.DecMapUintptrIntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]int)
+ fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]int, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrInt8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]int8)
+ v, changed := fastpathTV.DecMapUintptrInt8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]int8)
+ fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uintptr]int8, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrInt16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]int16)
+ v, changed := fastpathTV.DecMapUintptrInt16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]int16)
+ fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uintptr]int16, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrInt32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]int32)
+ v, changed := fastpathTV.DecMapUintptrInt32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]int32)
+ fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uintptr]int32, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrInt64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]int64)
+ v, changed := fastpathTV.DecMapUintptrInt64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]int64)
+ fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]int64, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrFloat32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]float32)
+ v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]float32)
+ fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uintptr]float32, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrFloat64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]float64)
+ v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]float64)
+ fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]float64, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrBoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]bool)
+ v, changed := fastpathTV.DecMapUintptrBoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]bool)
+ fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uintptr]bool, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntIntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]interface{})
+ v, changed := fastpathTV.DecMapIntIntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]interface{})
+ fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[int]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntStringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]string)
+ v, changed := fastpathTV.DecMapIntStringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]string)
+ fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntStringX(vp *map[int]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntStringV(v map[int]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[int]string, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntUintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]uint)
+ v, changed := fastpathTV.DecMapIntUintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]uint)
+ fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntUintX(vp *map[int]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUintV(v map[int]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]uint, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntUint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]uint8)
+ v, changed := fastpathTV.DecMapIntUint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]uint8)
+ fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntUint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int]uint8, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntUint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]uint16)
+ v, changed := fastpathTV.DecMapIntUint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]uint16)
+ fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int]uint16, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntUint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]uint32)
+ v, changed := fastpathTV.DecMapIntUint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]uint32)
+ fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int]uint32, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntUint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]uint64)
+ v, changed := fastpathTV.DecMapIntUint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]uint64)
+ fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]uint64, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntUintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]uintptr)
+ v, changed := fastpathTV.DecMapIntUintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]uintptr)
+ fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]uintptr, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntIntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]int)
+ v, changed := fastpathTV.DecMapIntIntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]int)
+ fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntIntX(vp *map[int]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntIntV(v map[int]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]int, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntInt8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]int8)
+ v, changed := fastpathTV.DecMapIntInt8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]int8)
+ fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntInt8V(v map[int]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int]int8, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntInt16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]int16)
+ v, changed := fastpathTV.DecMapIntInt16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]int16)
+ fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntInt16V(v map[int]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int]int16, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntInt32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]int32)
+ v, changed := fastpathTV.DecMapIntInt32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]int32)
+ fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntInt32V(v map[int]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int]int32, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntInt64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]int64)
+ v, changed := fastpathTV.DecMapIntInt64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]int64)
+ fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntInt64V(v map[int]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]int64, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntFloat32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]float32)
+ v, changed := fastpathTV.DecMapIntFloat32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]float32)
+ fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int]float32, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntFloat64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]float64)
+ v, changed := fastpathTV.DecMapIntFloat64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]float64)
+ fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]float64, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntBoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]bool)
+ v, changed := fastpathTV.DecMapIntBoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]bool)
+ fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntBoolV(v map[int]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int]bool, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]interface{})
+ v, changed := fastpathTV.DecMapInt8IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]interface{})
+ fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[int8]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int8
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]string)
+ v, changed := fastpathTV.DecMapInt8StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]string)
+ fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8StringV(v map[int8]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[int8]string, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]uint)
+ v, changed := fastpathTV.DecMapInt8UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]uint)
+ fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]uint, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]uint8)
+ v, changed := fastpathTV.DecMapInt8Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]uint8)
+ fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[int8]uint8, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]uint16)
+ v, changed := fastpathTV.DecMapInt8Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]uint16)
+ fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int8]uint16, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]uint32)
+ v, changed := fastpathTV.DecMapInt8Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]uint32)
+ fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int8]uint32, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]uint64)
+ v, changed := fastpathTV.DecMapInt8Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]uint64)
+ fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]uint64, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]uintptr)
+ v, changed := fastpathTV.DecMapInt8UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]uintptr)
+ fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]uintptr, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]int)
+ v, changed := fastpathTV.DecMapInt8IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]int)
+ fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8IntV(v map[int8]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]int, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]int8)
+ v, changed := fastpathTV.DecMapInt8Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]int8)
+ fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[int8]int8, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]int16)
+ v, changed := fastpathTV.DecMapInt8Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]int16)
+ fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int8]int16, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]int32)
+ v, changed := fastpathTV.DecMapInt8Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]int32)
+ fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int8]int32, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]int64)
+ v, changed := fastpathTV.DecMapInt8Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]int64)
+ fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]int64, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]float32)
+ v, changed := fastpathTV.DecMapInt8Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]float32)
+ fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int8]float32, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]float64)
+ v, changed := fastpathTV.DecMapInt8Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]float64)
+ fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]float64, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]bool)
+ v, changed := fastpathTV.DecMapInt8BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]bool)
+ fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[int8]bool, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]interface{})
+ v, changed := fastpathTV.DecMapInt16IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]interface{})
+ fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[int16]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int16
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]string)
+ v, changed := fastpathTV.DecMapInt16StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]string)
+ fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16StringV(v map[int16]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[int16]string, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]uint)
+ v, changed := fastpathTV.DecMapInt16UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]uint)
+ fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]uint, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]uint8)
+ v, changed := fastpathTV.DecMapInt16Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]uint8)
+ fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int16]uint8, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]uint16)
+ v, changed := fastpathTV.DecMapInt16Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]uint16)
+ fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4)
+ v = make(map[int16]uint16, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]uint32)
+ v, changed := fastpathTV.DecMapInt16Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]uint32)
+ fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int16]uint32, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]uint64)
+ v, changed := fastpathTV.DecMapInt16Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]uint64)
+ fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]uint64, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]uintptr)
+ v, changed := fastpathTV.DecMapInt16UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]uintptr)
+ fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]uintptr, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]int)
+ v, changed := fastpathTV.DecMapInt16IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]int)
+ fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16IntV(v map[int16]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]int, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]int8)
+ v, changed := fastpathTV.DecMapInt16Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]int8)
+ fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int16]int8, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]int16)
+ v, changed := fastpathTV.DecMapInt16Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]int16)
+ fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4)
+ v = make(map[int16]int16, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]int32)
+ v, changed := fastpathTV.DecMapInt16Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]int32)
+ fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int16]int32, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]int64)
+ v, changed := fastpathTV.DecMapInt16Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]int64)
+ fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]int64, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]float32)
+ v, changed := fastpathTV.DecMapInt16Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]float32)
+ fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int16]float32, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]float64)
+ v, changed := fastpathTV.DecMapInt16Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]float64)
+ fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]float64, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]bool)
+ v, changed := fastpathTV.DecMapInt16BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]bool)
+ fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int16]bool, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]interface{})
+ v, changed := fastpathTV.DecMapInt32IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]interface{})
+ fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[int32]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int32
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]string)
+ v, changed := fastpathTV.DecMapInt32StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]string)
+ fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32StringV(v map[int32]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[int32]string, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]uint)
+ v, changed := fastpathTV.DecMapInt32UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]uint)
+ fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]uint, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]uint8)
+ v, changed := fastpathTV.DecMapInt32Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]uint8)
+ fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int32]uint8, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]uint16)
+ v, changed := fastpathTV.DecMapInt32Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]uint16)
+ fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int32]uint16, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]uint32)
+ v, changed := fastpathTV.DecMapInt32Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]uint32)
+ fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[int32]uint32, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]uint64)
+ v, changed := fastpathTV.DecMapInt32Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]uint64)
+ fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]uint64, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]uintptr)
+ v, changed := fastpathTV.DecMapInt32UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]uintptr)
+ fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]uintptr, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]int)
+ v, changed := fastpathTV.DecMapInt32IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]int)
+ fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32IntV(v map[int32]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]int, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]int8)
+ v, changed := fastpathTV.DecMapInt32Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]int8)
+ fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int32]int8, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]int16)
+ v, changed := fastpathTV.DecMapInt32Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]int16)
+ fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int32]int16, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]int32)
+ v, changed := fastpathTV.DecMapInt32Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]int32)
+ fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[int32]int32, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]int64)
+ v, changed := fastpathTV.DecMapInt32Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]int64)
+ fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]int64, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]float32)
+ v, changed := fastpathTV.DecMapInt32Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]float32)
+ fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[int32]float32, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]float64)
+ v, changed := fastpathTV.DecMapInt32Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]float64)
+ fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]float64, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]bool)
+ v, changed := fastpathTV.DecMapInt32BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]bool)
+ fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int32]bool, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]interface{})
+ v, changed := fastpathTV.DecMapInt64IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]interface{})
+ fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[int64]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int64
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]string)
+ v, changed := fastpathTV.DecMapInt64StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]string)
+ fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64StringV(v map[int64]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[int64]string, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]uint)
+ v, changed := fastpathTV.DecMapInt64UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]uint)
+ fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]uint, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]uint8)
+ v, changed := fastpathTV.DecMapInt64Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]uint8)
+ fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int64]uint8, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]uint16)
+ v, changed := fastpathTV.DecMapInt64Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]uint16)
+ fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int64]uint16, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]uint32)
+ v, changed := fastpathTV.DecMapInt64Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]uint32)
+ fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int64]uint32, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]uint64)
+ v, changed := fastpathTV.DecMapInt64Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]uint64)
+ fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]uint64, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]uintptr)
+ v, changed := fastpathTV.DecMapInt64UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]uintptr)
+ fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]uintptr, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]int)
+ v, changed := fastpathTV.DecMapInt64IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]int)
+ fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64IntV(v map[int64]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]int, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]int8)
+ v, changed := fastpathTV.DecMapInt64Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]int8)
+ fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int64]int8, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]int16)
+ v, changed := fastpathTV.DecMapInt64Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]int16)
+ fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int64]int16, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]int32)
+ v, changed := fastpathTV.DecMapInt64Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]int32)
+ fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int64]int32, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]int64)
+ v, changed := fastpathTV.DecMapInt64Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]int64)
+ fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]int64, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]float32)
+ v, changed := fastpathTV.DecMapInt64Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]float32)
+ fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int64]float32, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]float64)
+ v, changed := fastpathTV.DecMapInt64Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]float64)
+ fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]float64, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]bool)
+ v, changed := fastpathTV.DecMapInt64BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]bool)
+ fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int64]bool, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolIntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]interface{})
+ v, changed := fastpathTV.DecMapBoolIntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]interface{})
+ fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[bool]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk bool
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolStringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]string)
+ v, changed := fastpathTV.DecMapBoolStringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]string)
+ fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolStringV(v map[bool]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[bool]string, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolUintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]uint)
+ v, changed := fastpathTV.DecMapBoolUintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]uint)
+ fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]uint, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolUint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]uint8)
+ v, changed := fastpathTV.DecMapBoolUint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]uint8)
+ fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolUint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[bool]uint8, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolUint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]uint16)
+ v, changed := fastpathTV.DecMapBoolUint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]uint16)
+ fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[bool]uint16, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolUint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]uint32)
+ v, changed := fastpathTV.DecMapBoolUint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]uint32)
+ fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[bool]uint32, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolUint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]uint64)
+ v, changed := fastpathTV.DecMapBoolUint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]uint64)
+ fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]uint64, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolUintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]uintptr)
+ v, changed := fastpathTV.DecMapBoolUintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]uintptr)
+ fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]uintptr, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolIntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]int)
+ v, changed := fastpathTV.DecMapBoolIntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]int)
+ fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolIntV(v map[bool]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]int, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolInt8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]int8)
+ v, changed := fastpathTV.DecMapBoolInt8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]int8)
+ fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[bool]int8, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolInt16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]int16)
+ v, changed := fastpathTV.DecMapBoolInt16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]int16)
+ fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[bool]int16, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolInt32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]int32)
+ v, changed := fastpathTV.DecMapBoolInt32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]int32)
+ fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[bool]int32, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolInt64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]int64)
+ v, changed := fastpathTV.DecMapBoolInt64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]int64)
+ fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]int64, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolFloat32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]float32)
+ v, changed := fastpathTV.DecMapBoolFloat32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]float32)
+ fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[bool]float32, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolFloat64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]float64)
+ v, changed := fastpathTV.DecMapBoolFloat64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]float64)
+ fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]float64, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolBoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]bool)
+ v, changed := fastpathTV.DecMapBoolBoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]bool)
+ fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[bool]bool, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
diff --git a/vendor/github.com/ugorji/go/codec/fast-path.not.go b/vendor/github.com/ugorji/go/codec/fast-path.not.go
new file mode 100644
index 000000000..63e591145
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/fast-path.not.go
@@ -0,0 +1,34 @@
+// +build notfastpath
+
+package codec
+
+import "reflect"
+
+const fastpathEnabled = false
+
+// The generated fast-path code is very large, and adds a few seconds to the build time.
+// This causes test execution, execution of small tools which use codec, etc
+// to take a long time.
+//
+// To mitigate, we now support the notfastpath tag.
+// This tag disables fastpath during build, allowing for faster build, test execution,
+// short-program runs, etc.
+
+func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
+func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
+func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
+func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
+
+type fastpathT struct{}
+type fastpathE struct {
+ rtid uintptr
+ rt reflect.Type
+ encfn func(*encFnInfo, reflect.Value)
+ decfn func(*decFnInfo, reflect.Value)
+}
+type fastpathA [0]fastpathE
+
+func (x fastpathA) index(rtid uintptr) int { return -1 }
+
+var fastpathAV fastpathA
+var fastpathTV fastpathT
diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go
new file mode 100644
index 000000000..eb0bdad35
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go
@@ -0,0 +1,243 @@
+/* // +build ignore */
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl
+// ************************************************************
+
+package codec
+
+import (
+ "encoding"
+ "reflect"
+)
+
+// This file is used to generate helper code for codecgen.
+// The values here i.e. genHelper(En|De)coder are not to be used directly by
+// library users. They WILL change continuously and without notice.
+//
+// To help enforce this, we create an unexported type with exported members.
+// The only way to get the type is via the one exported type that we control (somewhat).
+//
+// When static codecs are created for types, they will use this value
+// to perform encoding or decoding of primitives or known slice or map types.
+
+// GenHelperEncoder is exported so that it can be used externally by codecgen.
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) {
+ return genHelperEncoder{e: e}, e.e
+}
+
+// GenHelperDecoder is exported so that it can be used externally by codecgen.
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) {
+ return genHelperDecoder{d: d}, d.d
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperEncoder struct {
+ e *Encoder
+ F fastpathT
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperDecoder struct {
+ d *Decoder
+ F fastpathT
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
+ return f.e.h
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinary() bool {
+ return f.e.be // f.e.hh.isBinaryEncoding()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncFallback(iv interface{}) {
+ // println(">>>>>>>>> EncFallback")
+ f.e.encodeI(iv, false, false)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
+ bs, fnerr := iv.MarshalText()
+ f.e.marshal(bs, fnerr, false, c_UTF8)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
+ bs, fnerr := iv.MarshalJSON()
+ f.e.marshal(bs, fnerr, true, c_UTF8)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
+ bs, fnerr := iv.MarshalBinary()
+ f.e.marshal(bs, fnerr, false, c_RAW)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncRaw(iv Raw) {
+ f.e.raw(iv)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
+ if _, ok := f.e.hh.(*BincHandle); ok {
+ return timeTypId
+ }
+ return 0
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) IsJSONHandle() bool {
+ return f.e.js
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) HasExtensions() bool {
+ return len(f.e.h.extHandle) != 0
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
+ rt := reflect.TypeOf(v)
+ if rt.Kind() == reflect.Ptr {
+ rt = rt.Elem()
+ }
+ rtid := reflect.ValueOf(rt).Pointer()
+ if xfFn := f.e.h.getExt(rtid); xfFn != nil {
+ f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
+ return true
+ }
+ return false
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncSendContainerState(c containerState) {
+ if f.e.cr != nil {
+ f.e.cr.sendContainerState(c)
+ }
+}
+
+// ---------------- DECODER FOLLOWS -----------------
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
+ return f.d.h
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinary() bool {
+ return f.d.be // f.d.hh.isBinaryEncoding()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSwallow() {
+ f.d.swallow()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecScratchBuffer() []byte {
+ return f.d.b[:]
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
+ // println(">>>>>>>>> DecFallback")
+ f.d.decodeI(iv, chkPtr, false, false, false)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
+ return f.d.decSliceHelperStart()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
+ f.d.structFieldNotFound(index, name)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
+ f.d.arrayCannotExpand(sliceLen, streamLen)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
+ fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true))
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
+ // bs := f.dd.DecodeBytes(f.d.b[:], true, true)
+ // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
+ fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
+ fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true))
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecRaw() []byte {
+ return f.d.raw()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
+ if _, ok := f.d.hh.(*BincHandle); ok {
+ return timeTypId
+ }
+ return 0
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) IsJSONHandle() bool {
+ return f.d.js
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) HasExtensions() bool {
+ return len(f.d.h.extHandle) != 0
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
+ rt := reflect.TypeOf(v).Elem()
+ rtid := reflect.ValueOf(rt).Pointer()
+ if xfFn := f.d.h.getExt(rtid); xfFn != nil {
+ f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
+ return true
+ }
+ return false
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) {
+ return decInferLen(clen, maxlen, unit)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSendContainerState(c containerState) {
+ if f.d.cr != nil {
+ f.d.cr.sendContainerState(c)
+ }
+}
diff --git a/vendor/github.com/ugorji/go/codec/gen.generated.go b/vendor/github.com/ugorji/go/codec/gen.generated.go
new file mode 100644
index 000000000..2ace97b78
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen.generated.go
@@ -0,0 +1,175 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
+
+const genDecMapTmpl = `
+{{var "v"}} := *{{ .Varname }}
+{{var "l"}} := r.ReadMapStart()
+{{var "bh"}} := z.DecBasicHandle()
+if {{var "v"}} == nil {
+ {{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }})
+ {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
+ *{{ .Varname }} = {{var "v"}}
+}
+var {{var "mk"}} {{ .KTyp }}
+var {{var "mv"}} {{ .Typ }}
+var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
+if {{var "bh"}}.MapValueReset {
+ {{if decElemKindPtr}}{{var "mg"}} = true
+ {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
+ {{else if not decElemKindImmutable}}{{var "mg"}} = true
+ {{end}} }
+if {{var "l"}} > 0 {
+for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
+ {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
+{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
+ {{var "mk"}} = string({{var "bv"}})
+ }{{ end }}{{if decElemKindPtr}}
+ {{var "ms"}} = true{{end}}
+ if {{var "mg"}} {
+ {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
+ if {{var "mok"}} {
+ {{var "ms"}} = false
+ } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
+ } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+ z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
+ {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
+ if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
+ {{var "v"}}[{{var "mk"}}] = {{var "mv"}}
+ }
+}
+} else if {{var "l"}} < 0 {
+for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
+ {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
+{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
+ {{var "mk"}} = string({{var "bv"}})
+ }{{ end }}{{if decElemKindPtr}}
+ {{var "ms"}} = true {{ end }}
+ if {{var "mg"}} {
+ {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
+ if {{var "mok"}} {
+ {{var "ms"}} = false
+ } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
+ } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+ z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
+ {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
+ if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
+ {{var "v"}}[{{var "mk"}}] = {{var "mv"}}
+ }
+}
+} // else len==0: TODO: Should we clear map entries?
+z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }})
+`
+
+const genDecListTmpl = `
+{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
+{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}}
+var {{var "c"}} bool {{/* // changed */}}
+_ = {{var "c"}}{{end}}
+if {{var "l"}} == 0 {
+ {{if isSlice }}if {{var "v"}} == nil {
+ {{var "v"}} = []{{ .Typ }}{}
+ {{var "c"}} = true
+ } else if len({{var "v"}}) != 0 {
+ {{var "v"}} = {{var "v"}}[:0]
+ {{var "c"}} = true
+ } {{end}} {{if isChan }}if {{var "v"}} == nil {
+ {{var "v"}} = make({{ .CTyp }}, 0)
+ {{var "c"}} = true
+ } {{end}}
+} else if {{var "l"}} > 0 {
+ {{if isChan }}if {{var "v"}} == nil {
+ {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ {{var "v"}} = make({{ .CTyp }}, {{var "rl"}})
+ {{var "c"}} = true
+ }
+ for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ {
+ {{var "h"}}.ElemContainerState({{var "r"}})
+ var {{var "t"}} {{ .Typ }}
+ {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
+ {{var "v"}} <- {{var "t"}}
+ }
+ {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}}
+ var {{var "rt"}} bool {{/* truncated */}}
+ _, _ = {{var "rl"}}, {{var "rt"}}
+ {{var "rr"}} = {{var "l"}} // len({{var "v"}})
+ if {{var "l"}} > cap({{var "v"}}) {
+ {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}})
+ {{ else }}{{if not .Immutable }}
+ {{var "rg"}} := len({{var "v"}}) > 0
+ {{var "v2"}} := {{var "v"}} {{end}}
+ {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ if {{var "rt"}} {
+ if {{var "rl"}} <= cap({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "rl"}}]
+ } else {
+ {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+ }
+ } else {
+ {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+ }
+ {{var "c"}} = true
+ {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }}
+ if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}}
+ } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "l"}}]
+ {{var "c"}} = true
+ } {{end}} {{/* end isSlice:47 */}}
+ {{var "j"}} := 0
+ for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ {
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
+ }
+ {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ z.DecSwallow()
+ }
+ {{ else }}if {{var "rt"}} {
+ for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
+ {{var "v"}} = append({{var "v"}}, {{ zero}})
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
+ }
+ } {{end}} {{/* end isArray:56 */}}
+ {{end}} {{/* end isChan:16 */}}
+} else { {{/* len < 0 */}}
+ {{var "j"}} := 0
+ for ; !r.CheckBreak(); {{var "j"}}++ {
+ {{if isChan }}
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ var {{var "t"}} {{ .Typ }}
+ {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
+ {{var "v"}} <- {{var "t"}}
+ {{ else }}
+ if {{var "j"}} >= len({{var "v"}}) {
+ {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1)
+ {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }}
+ {{var "c"}} = true {{end}}
+ }
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ if {{var "j"}} < len({{var "v"}}) {
+ {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
+ } else {
+ z.DecSwallow()
+ }
+ {{end}}
+ }
+ {{if isSlice }}if {{var "j"}} < len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "j"}}]
+ {{var "c"}} = true
+ } else if {{var "j"}} == 0 && {{var "v"}} == nil {
+ {{var "v"}} = []{{ .Typ }}{}
+ {{var "c"}} = true
+ }{{end}}
+}
+{{var "h"}}.End()
+{{if not isArray }}if {{var "c"}} {
+ *{{ .Varname }} = {{var "v"}}
+}{{end}}
+`
+
diff --git a/vendor/github.com/ugorji/go/codec/gen.go b/vendor/github.com/ugorji/go/codec/gen.go
new file mode 100644
index 000000000..c4944dbff
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen.go
@@ -0,0 +1,2014 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "bytes"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "go/format"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "text/template"
+ "time"
+ "unicode"
+ "unicode/utf8"
+)
+
+// ---------------------------------------------------
+// codecgen supports the full cycle of reflection-based codec:
+// - RawExt
+// - Raw
+// - Builtins
+// - Extensions
+// - (Binary|Text|JSON)(Unm|M)arshal
+// - generic by-kind
+//
+// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type.
+// In those areas, we try to only do reflection or interface-conversion when NECESSARY:
+// - Extensions, only if Extensions are configured.
+//
+// However, codecgen doesn't support the following:
+// - Canonical option. (codecgen IGNORES it currently)
+// This is just because it has not been implemented.
+//
+// During encode/decode, Selfer takes precedence.
+// A type implementing Selfer will know how to encode/decode itself statically.
+//
+// The following field types are supported:
+// array: [n]T
+// slice: []T
+// map: map[K]V
+// primitive: [u]int[n], float(32|64), bool, string
+// struct
+//
+// ---------------------------------------------------
+// Note that a Selfer cannot call (e|d).(En|De)code on itself,
+// as this will cause a circular reference, as (En|De)code will call Selfer methods.
+// Any type that implements Selfer must implement completely and not fallback to (En|De)code.
+//
+// In addition, code in this file manages the generation of fast-path implementations of
+// encode/decode of slices/maps of primitive keys/values.
+//
+// Users MUST re-generate their implementations whenever the code shape changes.
+// The generated code will panic if it was generated with a version older than the supporting library.
+// ---------------------------------------------------
+//
+// codec framework is very feature rich.
+// When encoding or decoding into an interface, it depends on the runtime type of the interface.
+// The type of the interface may be a named type, an extension, etc.
+// Consequently, we fallback to runtime codec for encoding/decoding interfaces.
+// In addition, we fallback for any value which cannot be guaranteed at runtime.
+// This allows us support ANY value, including any named types, specifically those which
+// do not implement our interfaces (e.g. Selfer).
+//
+// This explains some slowness compared to other code generation codecs (e.g. msgp).
+// This reduction in speed is only seen when your refers to interfaces,
+// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} }
+//
+// codecgen will panic if the file was generated with an old version of the library in use.
+//
+// Note:
+// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil.
+// This way, there isn't a function call overhead just to see that we should not enter a block of code.
+
+// GenVersion is the current version of codecgen.
+//
+// NOTE: Increment this value each time codecgen changes fundamentally.
+// Fundamental changes are:
+// - helper methods change (signature change, new ones added, some removed, etc)
+// - codecgen command line changes
+//
+// v1: Initial Version
+// v2:
+// v3: Changes for Kubernetes:
+// changes in signature of some unpublished helper methods and codecgen cmdline arguments.
+// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen)
+// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections.
+const GenVersion = 5
+
+const (
+ genCodecPkg = "codec1978"
+ genTempVarPfx = "yy"
+ genTopLevelVarName = "x"
+
+ // ignore canBeNil parameter, and always set to true.
+ // This is because nil can appear anywhere, so we should always check.
+ genAnythingCanBeNil = true
+
+ // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function;
+ // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals
+ // are not executed a lot.
+ //
+ // From testing, it didn't make much difference in runtime, so keep as true (one function only)
+ genUseOneFunctionForDecStructMap = true
+)
+
+type genStructMapStyle uint8
+
+const (
+ genStructMapStyleConsolidated genStructMapStyle = iota
+ genStructMapStyleLenPrefix
+ genStructMapStyleCheckBreak
+)
+
+var (
+ genAllTypesSamePkgErr = errors.New("All types must be in the same package")
+ genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice")
+ genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__")
+ genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`)
+ genCheckVendor bool
+)
+
+// genRunner holds some state used during a Gen run.
+type genRunner struct {
+ w io.Writer // output
+ c uint64 // counter used for generating varsfx
+ t []reflect.Type // list of types to run selfer on
+
+ tc reflect.Type // currently running selfer on this type
+ te map[uintptr]bool // types for which the encoder has been created
+ td map[uintptr]bool // types for which the decoder has been created
+ cp string // codec import path
+
+ im map[string]reflect.Type // imports to add
+ imn map[string]string // package names of imports to add
+ imc uint64 // counter for import numbers
+
+ is map[reflect.Type]struct{} // types seen during import search
+ bp string // base PkgPath, for which we are generating for
+
+ cpfx string // codec package prefix
+ unsafe bool // is unsafe to be used in generated code?
+
+ tm map[reflect.Type]struct{} // types for which enc/dec must be generated
+ ts []reflect.Type // types for which enc/dec must be generated
+
+ xs string // top level variable/constant suffix
+ hn string // fn helper type name
+
+ ti *TypeInfos
+ // rr *rand.Rand // random generator for file-specific types
+}
+
+// Gen will write a complete go file containing Selfer implementations for each
+// type passed. All the types must be in the same package.
+//
+// Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.*
+func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) {
+ // All types passed to this method do not have a codec.Selfer method implemented directly.
+ // codecgen already checks the AST and skips any types that define the codec.Selfer methods.
+ // Consequently, there's no need to check and trim them if they implement codec.Selfer
+
+ if len(typ) == 0 {
+ return
+ }
+ x := genRunner{
+ unsafe: useUnsafe,
+ w: w,
+ t: typ,
+ te: make(map[uintptr]bool),
+ td: make(map[uintptr]bool),
+ im: make(map[string]reflect.Type),
+ imn: make(map[string]string),
+ is: make(map[reflect.Type]struct{}),
+ tm: make(map[reflect.Type]struct{}),
+ ts: []reflect.Type{},
+ bp: genImportPath(typ[0]),
+ xs: uid,
+ ti: ti,
+ }
+ if x.ti == nil {
+ x.ti = defTypeInfos
+ }
+ if x.xs == "" {
+ rr := rand.New(rand.NewSource(time.Now().UnixNano()))
+ x.xs = strconv.FormatInt(rr.Int63n(9999), 10)
+ }
+
+ // gather imports first:
+ x.cp = genImportPath(reflect.TypeOf(x))
+ x.imn[x.cp] = genCodecPkg
+ for _, t := range typ {
+ // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name())
+ if genImportPath(t) != x.bp {
+ panic(genAllTypesSamePkgErr)
+ }
+ x.genRefPkgs(t)
+ }
+ if buildTags != "" {
+ x.line("// +build " + buildTags)
+ x.line("")
+ }
+ x.line(`
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+`)
+ x.line("package " + pkgName)
+ x.line("")
+ x.line("import (")
+ if x.cp != x.bp {
+ x.cpfx = genCodecPkg + "."
+ x.linef("%s \"%s\"", genCodecPkg, x.cp)
+ }
+ // use a sorted set of im keys, so that we can get consistent output
+ imKeys := make([]string, 0, len(x.im))
+ for k, _ := range x.im {
+ imKeys = append(imKeys, k)
+ }
+ sort.Strings(imKeys)
+ for _, k := range imKeys { // for k, _ := range x.im {
+ x.linef("%s \"%s\"", x.imn[k], k)
+ }
+ // add required packages
+ for _, k := range [...]string{"reflect", "unsafe", "runtime", "fmt", "errors"} {
+ if _, ok := x.im[k]; !ok {
+ if k == "unsafe" && !x.unsafe {
+ continue
+ }
+ x.line("\"" + k + "\"")
+ }
+ }
+ x.line(")")
+ x.line("")
+
+ x.line("const (")
+ x.linef("// ----- content types ----")
+ x.linef("codecSelferC_UTF8%s = %v", x.xs, int64(c_UTF8))
+ x.linef("codecSelferC_RAW%s = %v", x.xs, int64(c_RAW))
+ x.linef("// ----- value types used ----")
+ x.linef("codecSelferValueTypeArray%s = %v", x.xs, int64(valueTypeArray))
+ x.linef("codecSelferValueTypeMap%s = %v", x.xs, int64(valueTypeMap))
+ x.linef("// ----- containerStateValues ----")
+ x.linef("codecSelfer_containerMapKey%s = %v", x.xs, int64(containerMapKey))
+ x.linef("codecSelfer_containerMapValue%s = %v", x.xs, int64(containerMapValue))
+ x.linef("codecSelfer_containerMapEnd%s = %v", x.xs, int64(containerMapEnd))
+ x.linef("codecSelfer_containerArrayElem%s = %v", x.xs, int64(containerArrayElem))
+ x.linef("codecSelfer_containerArrayEnd%s = %v", x.xs, int64(containerArrayEnd))
+ x.line(")")
+ x.line("var (")
+ x.line("codecSelferBitsize" + x.xs + " = uint8(reflect.TypeOf(uint(0)).Bits())")
+ x.line("codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)")
+ x.line(")")
+ x.line("")
+
+ if x.unsafe {
+ x.line("type codecSelferUnsafeString" + x.xs + " struct { Data uintptr; Len int}")
+ x.line("")
+ }
+ x.hn = "codecSelfer" + x.xs
+ x.line("type " + x.hn + " struct{}")
+ x.line("")
+
+ x.varsfxreset()
+ x.line("func init() {")
+ x.linef("if %sGenVersion != %v {", x.cpfx, GenVersion)
+ x.line("_, file, _, _ := runtime.Caller(0)")
+ x.line(`err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `)
+ x.linef(`%v, %sGenVersion, file)`, GenVersion, x.cpfx)
+ x.line("panic(err)")
+ x.linef("}")
+ x.line("if false { // reference the types, but skip this branch at build/run time")
+ var n int
+ // for k, t := range x.im {
+ for _, k := range imKeys {
+ t := x.im[k]
+ x.linef("var v%v %s.%s", n, x.imn[k], t.Name())
+ n++
+ }
+ if x.unsafe {
+ x.linef("var v%v unsafe.Pointer", n)
+ n++
+ }
+ if n > 0 {
+ x.out("_")
+ for i := 1; i < n; i++ {
+ x.out(", _")
+ }
+ x.out(" = v0")
+ for i := 1; i < n; i++ {
+ x.outf(", v%v", i)
+ }
+ }
+ x.line("} ") // close if false
+ x.line("}") // close init
+ x.line("")
+
+ // generate rest of type info
+ for _, t := range typ {
+ x.tc = t
+ x.selfer(true)
+ x.selfer(false)
+ }
+
+ for _, t := range x.ts {
+ rtid := reflect.ValueOf(t).Pointer()
+ // generate enc functions for all these slice/map types.
+ x.varsfxreset()
+ x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx)
+ x.genRequiredMethodVars(true)
+ switch t.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ x.encListFallback("v", t)
+ case reflect.Map:
+ x.encMapFallback("v", t)
+ default:
+ panic(genExpectArrayOrMapErr)
+ }
+ x.line("}")
+ x.line("")
+
+ // generate dec functions for all these slice/map types.
+ x.varsfxreset()
+ x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx)
+ x.genRequiredMethodVars(false)
+ switch t.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ x.decListFallback("v", rtid, t)
+ case reflect.Map:
+ x.decMapFallback("v", rtid, t)
+ default:
+ panic(genExpectArrayOrMapErr)
+ }
+ x.line("}")
+ x.line("")
+ }
+
+ x.line("")
+}
+
+func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool {
+ // return varname != genTopLevelVarName && t != x.tc
+ // the only time we checkForSelfer is if we are not at the TOP of the generated code.
+ return varname != genTopLevelVarName
+}
+
+func (x *genRunner) arr2str(t reflect.Type, s string) string {
+ if t.Kind() == reflect.Array {
+ return s
+ }
+ return ""
+}
+
+func (x *genRunner) genRequiredMethodVars(encode bool) {
+ x.line("var h " + x.hn)
+ if encode {
+ x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)")
+ } else {
+ x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)")
+ }
+ x.line("_, _, _ = h, z, r")
+}
+
+func (x *genRunner) genRefPkgs(t reflect.Type) {
+ if _, ok := x.is[t]; ok {
+ return
+ }
+ // fmt.Printf(">>>>>>: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name())
+ x.is[t] = struct{}{}
+ tpkg, tname := genImportPath(t), t.Name()
+ if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' {
+ if _, ok := x.im[tpkg]; !ok {
+ x.im[tpkg] = t
+ if idx := strings.LastIndex(tpkg, "/"); idx < 0 {
+ x.imn[tpkg] = tpkg
+ } else {
+ x.imc++
+ x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false)
+ }
+ }
+ }
+ switch t.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan:
+ x.genRefPkgs(t.Elem())
+ case reflect.Map:
+ x.genRefPkgs(t.Elem())
+ x.genRefPkgs(t.Key())
+ case reflect.Struct:
+ for i := 0; i < t.NumField(); i++ {
+ if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' {
+ x.genRefPkgs(t.Field(i).Type)
+ }
+ }
+ }
+}
+
+func (x *genRunner) line(s string) {
+ x.out(s)
+ if len(s) == 0 || s[len(s)-1] != '\n' {
+ x.out("\n")
+ }
+}
+
+func (x *genRunner) varsfx() string {
+ x.c++
+ return strconv.FormatUint(x.c, 10)
+}
+
+func (x *genRunner) varsfxreset() {
+ x.c = 0
+}
+
+func (x *genRunner) out(s string) {
+ if _, err := io.WriteString(x.w, s); err != nil {
+ panic(err)
+ }
+}
+
+func (x *genRunner) linef(s string, params ...interface{}) {
+ x.line(fmt.Sprintf(s, params...))
+}
+
+func (x *genRunner) outf(s string, params ...interface{}) {
+ x.out(fmt.Sprintf(s, params...))
+}
+
+func (x *genRunner) genTypeName(t reflect.Type) (n string) {
+ // defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }()
+
+ // if the type has a PkgPath, which doesn't match the current package,
+ // then include it.
+ // We cannot depend on t.String() because it includes current package,
+ // or t.PkgPath because it includes full import path,
+ //
+ var ptrPfx string
+ for t.Kind() == reflect.Ptr {
+ ptrPfx += "*"
+ t = t.Elem()
+ }
+ if tn := t.Name(); tn != "" {
+ return ptrPfx + x.genTypeNamePrim(t)
+ }
+ switch t.Kind() {
+ case reflect.Map:
+ return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem())
+ case reflect.Slice:
+ return ptrPfx + "[]" + x.genTypeName(t.Elem())
+ case reflect.Array:
+ return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem())
+ case reflect.Chan:
+ return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem())
+ default:
+ if t == intfTyp {
+ return ptrPfx + "interface{}"
+ } else {
+ return ptrPfx + x.genTypeNamePrim(t)
+ }
+ }
+}
+
+func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) {
+ if t.Name() == "" {
+ return t.String()
+ } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) {
+ return t.Name()
+ } else {
+ return x.imn[genImportPath(t)] + "." + t.Name()
+ // return t.String() // best way to get the package name inclusive
+ }
+}
+
+func (x *genRunner) genZeroValueR(t reflect.Type) string {
+ // if t is a named type, w
+ switch t.Kind() {
+ case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func,
+ reflect.Slice, reflect.Map, reflect.Invalid:
+ return "nil"
+ case reflect.Bool:
+ return "false"
+ case reflect.String:
+ return `""`
+ case reflect.Struct, reflect.Array:
+ return x.genTypeName(t) + "{}"
+ default: // all numbers
+ return "0"
+ }
+}
+
+func (x *genRunner) genMethodNameT(t reflect.Type) (s string) {
+ return genMethodNameT(t, x.tc)
+}
+
+func (x *genRunner) selfer(encode bool) {
+ t := x.tc
+ t0 := t
+ // always make decode use a pointer receiver,
+ // and structs always use a ptr receiver (encode|decode)
+ isptr := !encode || t.Kind() == reflect.Struct
+ x.varsfxreset()
+ fnSigPfx := "func (x "
+ if isptr {
+ fnSigPfx += "*"
+ }
+ fnSigPfx += x.genTypeName(t)
+
+ x.out(fnSigPfx)
+ if isptr {
+ t = reflect.PtrTo(t)
+ }
+ if encode {
+ x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {")
+ x.genRequiredMethodVars(true)
+ // x.enc(genTopLevelVarName, t)
+ x.encVar(genTopLevelVarName, t)
+ } else {
+ x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ // do not use decVar, as there is no need to check TryDecodeAsNil
+ // or way to elegantly handle that, and also setting it to a
+ // non-nil value doesn't affect the pointer passed.
+ // x.decVar(genTopLevelVarName, t, false)
+ x.dec(genTopLevelVarName, t0)
+ }
+ x.line("}")
+ x.line("")
+
+ if encode || t0.Kind() != reflect.Struct {
+ return
+ }
+
+ // write is containerMap
+ if genUseOneFunctionForDecStructMap {
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleConsolidated)
+ x.line("}")
+ x.line("")
+ } else {
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleLenPrefix)
+ x.line("}")
+ x.line("")
+
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleCheckBreak)
+ x.line("}")
+ x.line("")
+ }
+
+ // write containerArray
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructArray(genTopLevelVarName, "l", "return", reflect.ValueOf(t0).Pointer(), t0)
+ x.line("}")
+ x.line("")
+
+}
+
+// used for chan, array, slice, map
+func (x *genRunner) xtraSM(varname string, encode bool, t reflect.Type) {
+ if encode {
+ x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), varname)
+ } else {
+ x.linef("h.dec%s((*%s)(%s), d)", x.genMethodNameT(t), x.genTypeName(t), varname)
+ }
+ x.registerXtraT(t)
+}
+
+func (x *genRunner) registerXtraT(t reflect.Type) {
+ // recursively register the types
+ if _, ok := x.tm[t]; ok {
+ return
+ }
+ var tkey reflect.Type
+ switch t.Kind() {
+ case reflect.Chan, reflect.Slice, reflect.Array:
+ case reflect.Map:
+ tkey = t.Key()
+ default:
+ return
+ }
+ x.tm[t] = struct{}{}
+ x.ts = append(x.ts, t)
+ // check if this refers to any xtra types eg. a slice of array: add the array
+ x.registerXtraT(t.Elem())
+ if tkey != nil {
+ x.registerXtraT(tkey)
+ }
+}
+
+// encVar will encode a variable.
+// The parameter, t, is the reflect.Type of the variable itself
+func (x *genRunner) encVar(varname string, t reflect.Type) {
+ // fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t)
+ var checkNil bool
+ switch t.Kind() {
+ case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan:
+ checkNil = true
+ }
+ if checkNil {
+ x.linef("if %s == nil { r.EncodeNil() } else { ", varname)
+ }
+ switch t.Kind() {
+ case reflect.Ptr:
+ switch t.Elem().Kind() {
+ case reflect.Struct, reflect.Array:
+ x.enc(varname, genNonPtr(t))
+ default:
+ i := x.varsfx()
+ x.line(genTempVarPfx + i + " := *" + varname)
+ x.enc(genTempVarPfx+i, genNonPtr(t))
+ }
+ case reflect.Struct, reflect.Array:
+ i := x.varsfx()
+ x.line(genTempVarPfx + i + " := &" + varname)
+ x.enc(genTempVarPfx+i, t)
+ default:
+ x.enc(varname, t)
+ }
+
+ if checkNil {
+ x.line("}")
+ }
+
+}
+
+// enc will encode a variable (varname) of type t,
+// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type ptrTo(T) (to prevent copying)
+func (x *genRunner) enc(varname string, t reflect.Type) {
+ rtid := reflect.ValueOf(t).Pointer()
+ // We call CodecEncodeSelf if one of the following are honored:
+ // - the type already implements Selfer, call that
+ // - the type has a Selfer implementation just created, use that
+ // - the type is in the list of the ones we will generate for, but it is not currently being generated
+
+ mi := x.varsfx()
+ tptr := reflect.PtrTo(t)
+ tk := t.Kind()
+ if x.checkForSelfer(t, varname) {
+ if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T
+ if tptr.Implements(selferTyp) || t.Implements(selferTyp) {
+ x.line(varname + ".CodecEncodeSelf(e)")
+ return
+ }
+ } else { // varname is of type T
+ if t.Implements(selferTyp) {
+ x.line(varname + ".CodecEncodeSelf(e)")
+ return
+ } else if tptr.Implements(selferTyp) {
+ x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname)
+ x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi)
+ return
+ }
+ }
+
+ if _, ok := x.te[rtid]; ok {
+ x.line(varname + ".CodecEncodeSelf(e)")
+ return
+ }
+ }
+
+ inlist := false
+ for _, t0 := range x.t {
+ if t == t0 {
+ inlist = true
+ if x.checkForSelfer(t, varname) {
+ x.line(varname + ".CodecEncodeSelf(e)")
+ return
+ }
+ break
+ }
+ }
+
+ var rtidAdded bool
+ if t == x.tc {
+ x.te[rtid] = true
+ rtidAdded = true
+ }
+
+ // check if
+ // - type is RawExt, Raw
+ // - the type implements (Text|JSON|Binary)(Unm|M)arshal
+ x.linef("%sm%s := z.EncBinary()", genTempVarPfx, mi)
+ x.linef("_ = %sm%s", genTempVarPfx, mi)
+ x.line("if false {") //start if block
+ defer func() { x.line("}") }() //end if block
+
+ if t == rawTyp {
+ x.linef("} else { z.EncRaw(%v)", varname)
+ return
+ }
+ if t == rawExtTyp {
+ x.linef("} else { r.EncodeRawExt(%v, e)", varname)
+ return
+ }
+ // HACK: Support for Builtins.
+ // Currently, only Binc supports builtins, and the only builtin type is time.Time.
+ // Have a method that returns the rtid for time.Time if Handle is Binc.
+ if t == timeTyp {
+ vrtid := genTempVarPfx + "m" + x.varsfx()
+ x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid)
+ x.linef("r.EncodeBuiltin(%s, %s)", vrtid, varname)
+ }
+ // only check for extensions if the type is named, and has a packagePath.
+ if genImportPath(t) != "" && t.Name() != "" {
+ // first check if extensions are configued, before doing the interface conversion
+ x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname)
+ }
+ if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T
+ if t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) {
+ x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname)
+ }
+ if t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) {
+ x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname)
+ } else if t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) {
+ x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname)
+ }
+ } else { // varname is of type T
+ if t.Implements(binaryMarshalerTyp) {
+ x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname)
+ } else if tptr.Implements(binaryMarshalerTyp) {
+ x.linef("} else if %sm%s { z.EncBinaryMarshal(&%v) ", genTempVarPfx, mi, varname)
+ }
+ if t.Implements(jsonMarshalerTyp) {
+ x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname)
+ } else if tptr.Implements(jsonMarshalerTyp) {
+ x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", genTempVarPfx, mi, varname)
+ } else if t.Implements(textMarshalerTyp) {
+ x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname)
+ } else if tptr.Implements(textMarshalerTyp) {
+ x.linef("} else if !%sm%s { z.EncTextMarshal(&%v) ", genTempVarPfx, mi, varname)
+ }
+ }
+ x.line("} else {")
+
+ switch t.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ x.line("r.EncodeInt(int64(" + varname + "))")
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ x.line("r.EncodeUint(uint64(" + varname + "))")
+ case reflect.Float32:
+ x.line("r.EncodeFloat32(float32(" + varname + "))")
+ case reflect.Float64:
+ x.line("r.EncodeFloat64(float64(" + varname + "))")
+ case reflect.Bool:
+ x.line("r.EncodeBool(bool(" + varname + "))")
+ case reflect.String:
+ x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(" + varname + "))")
+ case reflect.Chan:
+ x.xtraSM(varname, true, t)
+ // x.encListFallback(varname, rtid, t)
+ case reflect.Array:
+ x.xtraSM(varname, true, t)
+ case reflect.Slice:
+ // if nil, call dedicated function
+ // if a []uint8, call dedicated function
+ // if a known fastpath slice, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+ if rtid == uint8SliceTypId {
+ x.line("r.EncodeStringBytes(codecSelferC_RAW" + x.xs + ", []byte(" + varname + "))")
+ } else if fastpathAV.index(rtid) != -1 {
+ g := x.newGenV(t)
+ x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)")
+ } else {
+ x.xtraSM(varname, true, t)
+ // x.encListFallback(varname, rtid, t)
+ }
+ case reflect.Map:
+ // if nil, call dedicated function
+ // if a known fastpath map, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+ // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ")
+ if fastpathAV.index(rtid) != -1 {
+ g := x.newGenV(t)
+ x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)")
+ } else {
+ x.xtraSM(varname, true, t)
+ // x.encMapFallback(varname, rtid, t)
+ }
+ case reflect.Struct:
+ if !inlist {
+ delete(x.te, rtid)
+ x.line("z.EncFallback(" + varname + ")")
+ break
+ }
+ x.encStruct(varname, rtid, t)
+ default:
+ if rtidAdded {
+ delete(x.te, rtid)
+ }
+ x.line("z.EncFallback(" + varname + ")")
+ }
+}
+
+func (x *genRunner) encZero(t reflect.Type) {
+ switch t.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ x.line("r.EncodeInt(0)")
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ x.line("r.EncodeUint(0)")
+ case reflect.Float32:
+ x.line("r.EncodeFloat32(0)")
+ case reflect.Float64:
+ x.line("r.EncodeFloat64(0)")
+ case reflect.Bool:
+ x.line("r.EncodeBool(false)")
+ case reflect.String:
+ x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + `, "")`)
+ default:
+ x.line("r.EncodeNil()")
+ }
+}
+
+func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
+ // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. )
+ // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it
+
+ // if t === type currently running selfer on, do for all
+ ti := x.ti.get(rtid, t)
+ i := x.varsfx()
+ sepVarname := genTempVarPfx + "sep" + i
+ numfieldsvar := genTempVarPfx + "q" + i
+ ti2arrayvar := genTempVarPfx + "r" + i
+ struct2arrvar := genTempVarPfx + "2arr" + i
+
+ x.line(sepVarname + " := !z.EncBinary()")
+ x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar)
+ tisfi := ti.sfip // always use sequence from file. decStruct expects same thing.
+ // due to omitEmpty, we need to calculate the
+ // number of non-empty things we write out first.
+ // This is required as we need to pre-determine the size of the container,
+ // to support length-prefixing.
+ x.linef("var %s [%v]bool", numfieldsvar, len(tisfi))
+ x.linef("_, _, _ = %s, %s, %s", sepVarname, numfieldsvar, struct2arrvar)
+ x.linef("const %s bool = %v", ti2arrayvar, ti.toArray)
+ nn := 0
+ for j, si := range tisfi {
+ if !si.omitEmpty {
+ nn++
+ continue
+ }
+ var t2 reflect.StructField
+ var omitline string
+ if si.i != -1 {
+ t2 = t.Field(int(si.i))
+ } else {
+ t2typ := t
+ varname3 := varname
+ for _, ix := range si.is {
+ for t2typ.Kind() == reflect.Ptr {
+ t2typ = t2typ.Elem()
+ }
+ t2 = t2typ.Field(ix)
+ t2typ = t2.Type
+ varname3 = varname3 + "." + t2.Name
+ if t2typ.Kind() == reflect.Ptr {
+ omitline += varname3 + " != nil && "
+ }
+ }
+ }
+ // never check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc.
+ // also, for maps/slices/arrays, check if len ! 0 (not if == zero value)
+ switch t2.Type.Kind() {
+ case reflect.Struct:
+ omitline += " true"
+ case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan:
+ omitline += "len(" + varname + "." + t2.Name + ") != 0"
+ default:
+ omitline += varname + "." + t2.Name + " != " + x.genZeroValueR(t2.Type)
+ }
+ x.linef("%s[%v] = %s", numfieldsvar, j, omitline)
+ }
+ x.linef("var %snn%s int", genTempVarPfx, i)
+ x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray {
+ x.line("r.EncodeArrayStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")")
+ x.linef("} else {") // if not ti.toArray
+ x.linef("%snn%s = %v", genTempVarPfx, i, nn)
+ x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i)
+ x.linef("r.EncodeMapStart(%snn%s)", genTempVarPfx, i)
+ x.linef("%snn%s = %v", genTempVarPfx, i, 0)
+ // x.line("r.EncodeMapStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")")
+ x.line("}") // close if not StructToArray
+
+ for j, si := range tisfi {
+ i := x.varsfx()
+ isNilVarName := genTempVarPfx + "n" + i
+ var labelUsed bool
+ var t2 reflect.StructField
+ if si.i != -1 {
+ t2 = t.Field(int(si.i))
+ } else {
+ t2typ := t
+ varname3 := varname
+ for _, ix := range si.is {
+ // fmt.Printf("%%%% %v, ix: %v\n", t2typ, ix)
+ for t2typ.Kind() == reflect.Ptr {
+ t2typ = t2typ.Elem()
+ }
+ t2 = t2typ.Field(ix)
+ t2typ = t2.Type
+ varname3 = varname3 + "." + t2.Name
+ if t2typ.Kind() == reflect.Ptr {
+ if !labelUsed {
+ x.line("var " + isNilVarName + " bool")
+ }
+ x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ")
+ x.line("goto LABEL" + i)
+ x.line("}")
+ labelUsed = true
+ // "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }")
+ }
+ }
+ // t2 = t.FieldByIndex(si.is)
+ }
+ if labelUsed {
+ x.line("LABEL" + i + ":")
+ }
+ // if the type of the field is a Selfer, or one of the ones
+
+ x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray
+ if labelUsed {
+ x.line("if " + isNilVarName + " { r.EncodeNil() } else { ")
+ }
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
+ if si.omitEmpty {
+ x.linef("if %s[%v] {", numfieldsvar, j)
+ }
+ x.encVar(varname+"."+t2.Name, t2.Type)
+ if si.omitEmpty {
+ x.linef("} else {")
+ x.encZero(t2.Type)
+ x.linef("}")
+ }
+ if labelUsed {
+ x.line("}")
+ }
+
+ x.linef("} else {") // if not ti.toArray
+
+ if si.omitEmpty {
+ x.linef("if %s[%v] {", numfieldsvar, j)
+ }
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
+ x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(\"" + si.encName + "\"))")
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
+ if labelUsed {
+ x.line("if " + isNilVarName + " { r.EncodeNil() } else { ")
+ x.encVar(varname+"."+t2.Name, t2.Type)
+ x.line("}")
+ } else {
+ x.encVar(varname+"."+t2.Name, t2.Type)
+ }
+ if si.omitEmpty {
+ x.line("}")
+ }
+ x.linef("} ") // end if/else ti.toArray
+ }
+ x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray {
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
+ x.line("} else {")
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
+ x.line("}")
+
+}
+
+func (x *genRunner) encListFallback(varname string, t reflect.Type) {
+ if t.AssignableTo(uint8SliceTyp) {
+ x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, []byte(%s))", x.xs, varname)
+ return
+ }
+ if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 {
+ x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, ([%v]byte(%s))[:])", x.xs, t.Len(), varname)
+ return
+ }
+ i := x.varsfx()
+ g := genTempVarPfx
+ x.line("r.EncodeArrayStart(len(" + varname + "))")
+ if t.Kind() == reflect.Chan {
+ x.linef("for %si%s, %si2%s := 0, len(%s); %si%s < %si2%s; %si%s++ {", g, i, g, i, varname, g, i, g, i, g, i)
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
+ x.linef("%sv%s := <-%s", g, i, varname)
+ } else {
+ // x.linef("for %si%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname)
+ x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname)
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
+ }
+ x.encVar(genTempVarPfx+"v"+i, t.Elem())
+ x.line("}")
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
+}
+
+func (x *genRunner) encMapFallback(varname string, t reflect.Type) {
+ // TODO: expand this to handle canonical.
+ i := x.varsfx()
+ x.line("r.EncodeMapStart(len(" + varname + "))")
+ x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname)
+ // x.line("for " + genTempVarPfx + "k" + i + ", " + genTempVarPfx + "v" + i + " := range " + varname + " {")
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
+ x.encVar(genTempVarPfx+"k"+i, t.Key())
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
+ x.encVar(genTempVarPfx+"v"+i, t.Elem())
+ x.line("}")
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
+}
+
+func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) {
+ // We only encode as nil if a nillable value.
+ // This removes some of the wasted checks for TryDecodeAsNil.
+ // We need to think about this more, to see what happens if omitempty, etc
+ // cause a nil value to be stored when something is expected.
+ // This could happen when decoding from a struct encoded as an array.
+ // For that, decVar should be called with canNil=true, to force true as its value.
+ i := x.varsfx()
+ if !canBeNil {
+ canBeNil = genAnythingCanBeNil || !genIsImmutable(t)
+ }
+ if canBeNil {
+ x.line("if r.TryDecodeAsNil() {")
+ if t.Kind() == reflect.Ptr {
+ x.line("if " + varname + " != nil { ")
+
+ // if varname is a field of a struct (has a dot in it),
+ // then just set it to nil
+ if strings.IndexByte(varname, '.') != -1 {
+ x.line(varname + " = nil")
+ } else {
+ x.line("*" + varname + " = " + x.genZeroValueR(t.Elem()))
+ }
+ x.line("}")
+ } else {
+ x.line(varname + " = " + x.genZeroValueR(t))
+ }
+ x.line("} else {")
+ } else {
+ x.line("// cannot be nil")
+ }
+ if t.Kind() != reflect.Ptr {
+ if x.decTryAssignPrimitive(varname, t) {
+ x.line(genTempVarPfx + "v" + i + " := &" + varname)
+ x.dec(genTempVarPfx+"v"+i, t)
+ }
+ } else {
+ x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem()))
+ // Ensure we set underlying ptr to a non-nil value (so we can deref to it later).
+ // There's a chance of a **T in here which is nil.
+ var ptrPfx string
+ for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() {
+ ptrPfx += "*"
+ x.linef("if %s%s == nil { %s%s = new(%s)}",
+ ptrPfx, varname, ptrPfx, varname, x.genTypeName(t))
+ }
+ // if varname has [ in it, then create temp variable for this ptr thingie
+ if strings.Index(varname, "[") >= 0 {
+ varname2 := genTempVarPfx + "w" + i
+ x.line(varname2 + " := " + varname)
+ varname = varname2
+ }
+
+ if ptrPfx == "" {
+ x.dec(varname, t)
+ } else {
+ x.line(genTempVarPfx + "z" + i + " := " + ptrPfx + varname)
+ x.dec(genTempVarPfx+"z"+i, t)
+ }
+
+ }
+
+ if canBeNil {
+ x.line("} ")
+ }
+}
+
+// dec will decode a variable (varname) of type ptrTo(t).
+// t is always a basetype (i.e. not of kind reflect.Ptr).
+func (x *genRunner) dec(varname string, t reflect.Type) {
+ // assumptions:
+ // - the varname is to a pointer already. No need to take address of it
+ // - t is always a baseType T (not a *T, etc).
+ rtid := reflect.ValueOf(t).Pointer()
+ tptr := reflect.PtrTo(t)
+ if x.checkForSelfer(t, varname) {
+ if t.Implements(selferTyp) || tptr.Implements(selferTyp) {
+ x.line(varname + ".CodecDecodeSelf(d)")
+ return
+ }
+ if _, ok := x.td[rtid]; ok {
+ x.line(varname + ".CodecDecodeSelf(d)")
+ return
+ }
+ }
+
+ inlist := false
+ for _, t0 := range x.t {
+ if t == t0 {
+ inlist = true
+ if x.checkForSelfer(t, varname) {
+ x.line(varname + ".CodecDecodeSelf(d)")
+ return
+ }
+ break
+ }
+ }
+
+ var rtidAdded bool
+ if t == x.tc {
+ x.td[rtid] = true
+ rtidAdded = true
+ }
+
+ // check if
+ // - type is Raw, RawExt
+ // - the type implements (Text|JSON|Binary)(Unm|M)arshal
+ mi := x.varsfx()
+ x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi)
+ x.linef("_ = %sm%s", genTempVarPfx, mi)
+ x.line("if false {") //start if block
+ defer func() { x.line("}") }() //end if block
+
+ if t == rawTyp {
+ x.linef("} else { *%v = z.DecRaw()", varname)
+ return
+ }
+ if t == rawExtTyp {
+ x.linef("} else { r.DecodeExt(%v, 0, nil)", varname)
+ return
+ }
+
+ // HACK: Support for Builtins.
+ // Currently, only Binc supports builtins, and the only builtin type is time.Time.
+ // Have a method that returns the rtid for time.Time if Handle is Binc.
+ if t == timeTyp {
+ vrtid := genTempVarPfx + "m" + x.varsfx()
+ x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid)
+ x.linef("r.DecodeBuiltin(%s, %s)", vrtid, varname)
+ }
+ // only check for extensions if the type is named, and has a packagePath.
+ if genImportPath(t) != "" && t.Name() != "" {
+ // first check if extensions are configued, before doing the interface conversion
+ x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname)
+ }
+
+ if t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) {
+ x.linef("} else if %sm%s { z.DecBinaryUnmarshal(%v) ", genTempVarPfx, mi, varname)
+ }
+ if t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) {
+ x.linef("} else if !%sm%s && z.IsJSONHandle() { z.DecJSONUnmarshal(%v)", genTempVarPfx, mi, varname)
+ } else if t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) {
+ x.linef("} else if !%sm%s { z.DecTextUnmarshal(%v)", genTempVarPfx, mi, varname)
+ }
+
+ x.line("} else {")
+
+ // Since these are pointers, we cannot share, and have to use them one by one
+ switch t.Kind() {
+ case reflect.Int:
+ x.line("*((*int)(" + varname + ")) = int(r.DecodeInt(codecSelferBitsize" + x.xs + "))")
+ // x.line("z.DecInt((*int)(" + varname + "))")
+ case reflect.Int8:
+ x.line("*((*int8)(" + varname + ")) = int8(r.DecodeInt(8))")
+ // x.line("z.DecInt8((*int8)(" + varname + "))")
+ case reflect.Int16:
+ x.line("*((*int16)(" + varname + ")) = int16(r.DecodeInt(16))")
+ // x.line("z.DecInt16((*int16)(" + varname + "))")
+ case reflect.Int32:
+ x.line("*((*int32)(" + varname + ")) = int32(r.DecodeInt(32))")
+ // x.line("z.DecInt32((*int32)(" + varname + "))")
+ case reflect.Int64:
+ x.line("*((*int64)(" + varname + ")) = int64(r.DecodeInt(64))")
+ // x.line("z.DecInt64((*int64)(" + varname + "))")
+
+ case reflect.Uint:
+ x.line("*((*uint)(" + varname + ")) = uint(r.DecodeUint(codecSelferBitsize" + x.xs + "))")
+ // x.line("z.DecUint((*uint)(" + varname + "))")
+ case reflect.Uint8:
+ x.line("*((*uint8)(" + varname + ")) = uint8(r.DecodeUint(8))")
+ // x.line("z.DecUint8((*uint8)(" + varname + "))")
+ case reflect.Uint16:
+ x.line("*((*uint16)(" + varname + ")) = uint16(r.DecodeUint(16))")
+ //x.line("z.DecUint16((*uint16)(" + varname + "))")
+ case reflect.Uint32:
+ x.line("*((*uint32)(" + varname + ")) = uint32(r.DecodeUint(32))")
+ //x.line("z.DecUint32((*uint32)(" + varname + "))")
+ case reflect.Uint64:
+ x.line("*((*uint64)(" + varname + ")) = uint64(r.DecodeUint(64))")
+ //x.line("z.DecUint64((*uint64)(" + varname + "))")
+ case reflect.Uintptr:
+ x.line("*((*uintptr)(" + varname + ")) = uintptr(r.DecodeUint(codecSelferBitsize" + x.xs + "))")
+
+ case reflect.Float32:
+ x.line("*((*float32)(" + varname + ")) = float32(r.DecodeFloat(true))")
+ //x.line("z.DecFloat32((*float32)(" + varname + "))")
+ case reflect.Float64:
+ x.line("*((*float64)(" + varname + ")) = float64(r.DecodeFloat(false))")
+ // x.line("z.DecFloat64((*float64)(" + varname + "))")
+
+ case reflect.Bool:
+ x.line("*((*bool)(" + varname + ")) = r.DecodeBool()")
+ // x.line("z.DecBool((*bool)(" + varname + "))")
+ case reflect.String:
+ x.line("*((*string)(" + varname + ")) = r.DecodeString()")
+ // x.line("z.DecString((*string)(" + varname + "))")
+ case reflect.Array, reflect.Chan:
+ x.xtraSM(varname, false, t)
+ // x.decListFallback(varname, rtid, true, t)
+ case reflect.Slice:
+ // if a []uint8, call dedicated function
+ // if a known fastpath slice, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+ if rtid == uint8SliceTypId {
+ x.line("*" + varname + " = r.DecodeBytes(*(*[]byte)(" + varname + "), false, false)")
+ } else if fastpathAV.index(rtid) != -1 {
+ g := x.newGenV(t)
+ x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)")
+ } else {
+ x.xtraSM(varname, false, t)
+ // x.decListFallback(varname, rtid, false, t)
+ }
+ case reflect.Map:
+ // if a known fastpath map, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+ if fastpathAV.index(rtid) != -1 {
+ g := x.newGenV(t)
+ x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)")
+ } else {
+ x.xtraSM(varname, false, t)
+ // x.decMapFallback(varname, rtid, t)
+ }
+ case reflect.Struct:
+ if inlist {
+ x.decStruct(varname, rtid, t)
+ } else {
+ // delete(x.td, rtid)
+ x.line("z.DecFallback(" + varname + ", false)")
+ }
+ default:
+ if rtidAdded {
+ delete(x.te, rtid)
+ }
+ x.line("z.DecFallback(" + varname + ", true)")
+ }
+}
+
+func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAsPtr bool) {
+ // This should only be used for exact primitives (ie un-named types).
+ // Named types may be implementations of Selfer, Unmarshaler, etc.
+ // They should be handled by dec(...)
+
+ if t.Name() != "" {
+ tryAsPtr = true
+ return
+ }
+
+ switch t.Kind() {
+ case reflect.Int:
+ x.linef("%s = r.DecodeInt(codecSelferBitsize%s)", varname, x.xs)
+ case reflect.Int8:
+ x.linef("%s = r.DecodeInt(8)", varname)
+ case reflect.Int16:
+ x.linef("%s = r.DecodeInt(16)", varname)
+ case reflect.Int32:
+ x.linef("%s = r.DecodeInt(32)", varname)
+ case reflect.Int64:
+ x.linef("%s = r.DecodeInt(64)", varname)
+
+ case reflect.Uint:
+ x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs)
+ case reflect.Uint8:
+ x.linef("%s = r.DecodeUint(8)", varname)
+ case reflect.Uint16:
+ x.linef("%s = r.DecodeUint(16)", varname)
+ case reflect.Uint32:
+ x.linef("%s = r.DecodeUint(32)", varname)
+ case reflect.Uint64:
+ x.linef("%s = r.DecodeUint(64)", varname)
+ case reflect.Uintptr:
+ x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs)
+
+ case reflect.Float32:
+ x.linef("%s = r.DecodeFloat(true)", varname)
+ case reflect.Float64:
+ x.linef("%s = r.DecodeFloat(false)", varname)
+
+ case reflect.Bool:
+ x.linef("%s = r.DecodeBool()", varname)
+ case reflect.String:
+ x.linef("%s = r.DecodeString()", varname)
+ default:
+ tryAsPtr = true
+ }
+ return
+}
+
+func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) {
+ if t.AssignableTo(uint8SliceTyp) {
+ x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false, false)")
+ return
+ }
+ if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 {
+ x.linef("r.DecodeBytes( ((*[%s]byte)(%s))[:], false, true)", t.Len(), varname)
+ return
+ }
+ type tstruc struct {
+ TempVar string
+ Rand string
+ Varname string
+ CTyp string
+ Typ string
+ Immutable bool
+ Size int
+ }
+ telem := t.Elem()
+ ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())}
+
+ funcs := make(template.FuncMap)
+
+ funcs["decLineVar"] = func(varname string) string {
+ x.decVar(varname, telem, false)
+ return ""
+ }
+ funcs["decLine"] = func(pfx string) string {
+ x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false)
+ return ""
+ }
+ funcs["var"] = func(s string) string {
+ return ts.TempVar + s + ts.Rand
+ }
+ funcs["zero"] = func() string {
+ return x.genZeroValueR(telem)
+ }
+ funcs["isArray"] = func() bool {
+ return t.Kind() == reflect.Array
+ }
+ funcs["isSlice"] = func() bool {
+ return t.Kind() == reflect.Slice
+ }
+ funcs["isChan"] = func() bool {
+ return t.Kind() == reflect.Chan
+ }
+ tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl)
+ if err != nil {
+ panic(err)
+ }
+ if err = tm.Execute(x.w, &ts); err != nil {
+ panic(err)
+ }
+}
+
+func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) {
+ type tstruc struct {
+ TempVar string
+ Sfx string
+ Rand string
+ Varname string
+ KTyp string
+ Typ string
+ Size int
+ }
+ telem := t.Elem()
+ tkey := t.Key()
+ ts := tstruc{
+ genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey),
+ x.genTypeName(telem), int(telem.Size() + tkey.Size()),
+ }
+
+ funcs := make(template.FuncMap)
+ funcs["decElemZero"] = func() string {
+ return x.genZeroValueR(telem)
+ }
+ funcs["decElemKindImmutable"] = func() bool {
+ return genIsImmutable(telem)
+ }
+ funcs["decElemKindPtr"] = func() bool {
+ return telem.Kind() == reflect.Ptr
+ }
+ funcs["decElemKindIntf"] = func() bool {
+ return telem.Kind() == reflect.Interface
+ }
+ funcs["decLineVarK"] = func(varname string) string {
+ x.decVar(varname, tkey, false)
+ return ""
+ }
+ funcs["decLineVar"] = func(varname string) string {
+ x.decVar(varname, telem, false)
+ return ""
+ }
+ funcs["decLineK"] = func(pfx string) string {
+ x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(tkey), false)
+ return ""
+ }
+ funcs["decLine"] = func(pfx string) string {
+ x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false)
+ return ""
+ }
+ funcs["var"] = func(s string) string {
+ return ts.TempVar + s + ts.Rand
+ }
+
+ tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl)
+ if err != nil {
+ panic(err)
+ }
+ if err = tm.Execute(x.w, &ts); err != nil {
+ panic(err)
+ }
+}
+
+func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) {
+ ti := x.ti.get(rtid, t)
+ tisfi := ti.sfip // always use sequence from file. decStruct expects same thing.
+ x.line("switch (" + kName + ") {")
+ for _, si := range tisfi {
+ x.line("case \"" + si.encName + "\":")
+ var t2 reflect.StructField
+ if si.i != -1 {
+ t2 = t.Field(int(si.i))
+ } else {
+ //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value.
+ // t2 = t.FieldByIndex(si.is)
+ t2typ := t
+ varname3 := varname
+ for _, ix := range si.is {
+ for t2typ.Kind() == reflect.Ptr {
+ t2typ = t2typ.Elem()
+ }
+ t2 = t2typ.Field(ix)
+ t2typ = t2.Type
+ varname3 = varname3 + "." + t2.Name
+ if t2typ.Kind() == reflect.Ptr {
+ x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem()))
+ }
+ }
+ }
+ x.decVar(varname+"."+t2.Name, t2.Type, false)
+ }
+ x.line("default:")
+ // pass the slice here, so that the string will not escape, and maybe save allocation
+ x.line("z.DecStructFieldNotFound(-1, " + kName + ")")
+ x.line("} // end switch " + kName)
+}
+
+func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) {
+ tpfx := genTempVarPfx
+ i := x.varsfx()
+ kName := tpfx + "s" + i
+
+ // We thought to use ReadStringAsBytes, as go compiler might optimize the copy out.
+ // However, using that was more expensive, as it seems that the switch expression
+ // is evaluated each time.
+ //
+ // We could depend on decodeString using a temporary/shared buffer internally.
+ // However, this model of creating a byte array, and using explicitly is faster,
+ // and allows optional use of unsafe []byte->string conversion without alloc.
+
+ // Also, ensure that the slice array doesn't escape.
+ // That will help escape analysis prevent allocation when it gets better.
+
+ // x.line("var " + kName + "Arr = [32]byte{} // default string to decode into")
+ // x.line("var " + kName + "Slc = " + kName + "Arr[:] // default slice to decode into")
+ // use the scratch buffer to avoid allocation (most field names are < 32).
+
+ x.line("var " + kName + "Slc = z.DecScratchBuffer() // default slice to decode into")
+
+ x.line("_ = " + kName + "Slc")
+ switch style {
+ case genStructMapStyleLenPrefix:
+ x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i)
+ case genStructMapStyleCheckBreak:
+ x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i)
+ default: // 0, otherwise.
+ x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length
+ x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i)
+ x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname)
+ x.line("} else { if r.CheckBreak() { break }; }")
+ }
+ x.linef("z.DecSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
+ x.line(kName + "Slc = r.DecodeBytes(" + kName + "Slc, true, true)")
+ // let string be scoped to this loop alone, so it doesn't escape.
+ if x.unsafe {
+ x.line(kName + "SlcHdr := codecSelferUnsafeString" + x.xs + "{uintptr(unsafe.Pointer(&" +
+ kName + "Slc[0])), len(" + kName + "Slc)}")
+ x.line(kName + " := *(*string)(unsafe.Pointer(&" + kName + "SlcHdr))")
+ } else {
+ x.line(kName + " := string(" + kName + "Slc)")
+ }
+ x.linef("z.DecSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
+ x.decStructMapSwitch(kName, varname, rtid, t)
+
+ x.line("} // end for " + tpfx + "j" + i)
+ x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
+}
+
+func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) {
+ tpfx := genTempVarPfx
+ i := x.varsfx()
+ ti := x.ti.get(rtid, t)
+ tisfi := ti.sfip // always use sequence from file. decStruct expects same thing.
+ x.linef("var %sj%s int", tpfx, i)
+ x.linef("var %sb%s bool", tpfx, i) // break
+ x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length
+ for _, si := range tisfi {
+ var t2 reflect.StructField
+ if si.i != -1 {
+ t2 = t.Field(int(si.i))
+ } else {
+ //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value.
+ // t2 = t.FieldByIndex(si.is)
+ t2typ := t
+ varname3 := varname
+ for _, ix := range si.is {
+ for t2typ.Kind() == reflect.Ptr {
+ t2typ = t2typ.Elem()
+ }
+ t2 = t2typ.Field(ix)
+ t2typ = t2.Type
+ varname3 = varname3 + "." + t2.Name
+ if t2typ.Kind() == reflect.Ptr {
+ x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem()))
+ }
+ }
+ }
+
+ x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }",
+ tpfx, i, tpfx, i, tpfx, i,
+ tpfx, i, lenvarname, tpfx, i)
+ x.linef("if %sb%s { z.DecSendContainerState(codecSelfer_containerArrayEnd%s); %s }",
+ tpfx, i, x.xs, breakString)
+ x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
+ x.decVar(varname+"."+t2.Name, t2.Type, true)
+ }
+ // read remaining values and throw away.
+ x.line("for {")
+ x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }",
+ tpfx, i, tpfx, i, tpfx, i,
+ tpfx, i, lenvarname, tpfx, i)
+ x.linef("if %sb%s { break }", tpfx, i)
+ x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
+ x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i)
+ x.line("}")
+ x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
+}
+
+func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) {
+ // if container is map
+ i := x.varsfx()
+ x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i)
+ x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs)
+ x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()")
+ x.linef("if %sl%s == 0 {", genTempVarPfx, i)
+ x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
+ if genUseOneFunctionForDecStructMap {
+ x.line("} else { ")
+ x.linef("x.codecDecodeSelfFromMap(%sl%s, d)", genTempVarPfx, i)
+ } else {
+ x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ")
+ x.line("x.codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)")
+ x.line("} else {")
+ x.line("x.codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)")
+ }
+ x.line("}")
+
+ // else if container is array
+ x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs)
+ x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()")
+ x.linef("if %sl%s == 0 {", genTempVarPfx, i)
+ x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
+ x.line("} else { ")
+ x.linef("x.codecDecodeSelfFromArray(%sl%s, d)", genTempVarPfx, i)
+ x.line("}")
+ // else panic
+ x.line("} else { ")
+ x.line("panic(codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + ")")
+ x.line("} ")
+}
+
+// --------
+
+type genV struct {
+ // genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice
+ MapKey string
+ Elem string
+ Primitive string
+ Size int
+}
+
+func (x *genRunner) newGenV(t reflect.Type) (v genV) {
+ switch t.Kind() {
+ case reflect.Slice, reflect.Array:
+ te := t.Elem()
+ v.Elem = x.genTypeName(te)
+ v.Size = int(te.Size())
+ case reflect.Map:
+ te, tk := t.Elem(), t.Key()
+ v.Elem = x.genTypeName(te)
+ v.MapKey = x.genTypeName(tk)
+ v.Size = int(te.Size() + tk.Size())
+ default:
+ panic("unexpected type for newGenV. Requires map or slice type")
+ }
+ return
+}
+
+func (x *genV) MethodNamePfx(prefix string, prim bool) string {
+ var name []byte
+ if prefix != "" {
+ name = append(name, prefix...)
+ }
+ if prim {
+ name = append(name, genTitleCaseName(x.Primitive)...)
+ } else {
+ if x.MapKey == "" {
+ name = append(name, "Slice"...)
+ } else {
+ name = append(name, "Map"...)
+ name = append(name, genTitleCaseName(x.MapKey)...)
+ }
+ name = append(name, genTitleCaseName(x.Elem)...)
+ }
+ return string(name)
+
+}
+
+// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise.
+//
+// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled,
+// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped.
+// We strip it here.
+func genImportPath(t reflect.Type) (s string) {
+ s = t.PkgPath()
+ if genCheckVendor {
+ // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later.
+ // if s contains /vendor/ OR startsWith vendor/, then return everything after it.
+ const vendorStart = "vendor/"
+ const vendorInline = "/vendor/"
+ if i := strings.LastIndex(s, vendorInline); i >= 0 {
+ s = s[i+len(vendorInline):]
+ } else if strings.HasPrefix(s, vendorStart) {
+ s = s[len(vendorStart):]
+ }
+ }
+ return
+}
+
+// A go identifier is (letter|_)[letter|number|_]*
+func genGoIdentifier(s string, checkFirstChar bool) string {
+ b := make([]byte, 0, len(s))
+ t := make([]byte, 4)
+ var n int
+ for i, r := range s {
+ if checkFirstChar && i == 0 && !unicode.IsLetter(r) {
+ b = append(b, '_')
+ }
+ // r must be unicode_letter, unicode_digit or _
+ if unicode.IsLetter(r) || unicode.IsDigit(r) {
+ n = utf8.EncodeRune(t, r)
+ b = append(b, t[:n]...)
+ } else {
+ b = append(b, '_')
+ }
+ }
+ return string(b)
+}
+
+func genNonPtr(t reflect.Type) reflect.Type {
+ for t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ return t
+}
+
+func genTitleCaseName(s string) string {
+ switch s {
+ case "interface{}", "interface {}":
+ return "Intf"
+ default:
+ return strings.ToUpper(s[0:1]) + s[1:]
+ }
+}
+
+func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) {
+ var ptrPfx string
+ for t.Kind() == reflect.Ptr {
+ ptrPfx += "Ptrto"
+ t = t.Elem()
+ }
+ tstr := t.String()
+ if tn := t.Name(); tn != "" {
+ if tRef != nil && genImportPath(t) == genImportPath(tRef) {
+ return ptrPfx + tn
+ } else {
+ if genQNameRegex.MatchString(tstr) {
+ return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
+ } else {
+ return ptrPfx + genCustomTypeName(tstr)
+ }
+ }
+ }
+ switch t.Kind() {
+ case reflect.Map:
+ return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef)
+ case reflect.Slice:
+ return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef)
+ case reflect.Array:
+ return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef)
+ case reflect.Chan:
+ var cx string
+ switch t.ChanDir() {
+ case reflect.SendDir:
+ cx = "ChanSend"
+ case reflect.RecvDir:
+ cx = "ChanRecv"
+ default:
+ cx = "Chan"
+ }
+ return ptrPfx + cx + genMethodNameT(t.Elem(), tRef)
+ default:
+ if t == intfTyp {
+ return ptrPfx + "Interface"
+ } else {
+ if tRef != nil && genImportPath(t) == genImportPath(tRef) {
+ if t.Name() != "" {
+ return ptrPfx + t.Name()
+ } else {
+ return ptrPfx + genCustomTypeName(tstr)
+ }
+ } else {
+ // best way to get the package name inclusive
+ // return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
+ // return ptrPfx + genBase64enc.EncodeToString([]byte(tstr))
+ if t.Name() != "" && genQNameRegex.MatchString(tstr) {
+ return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
+ } else {
+ return ptrPfx + genCustomTypeName(tstr)
+ }
+ }
+ }
+ }
+}
+
+// genCustomNameForType base64encodes the t.String() value in such a way
+// that it can be used within a function name.
+func genCustomTypeName(tstr string) string {
+ len2 := genBase64enc.EncodedLen(len(tstr))
+ bufx := make([]byte, len2)
+ genBase64enc.Encode(bufx, []byte(tstr))
+ for i := len2 - 1; i >= 0; i-- {
+ if bufx[i] == '=' {
+ len2--
+ } else {
+ break
+ }
+ }
+ return string(bufx[:len2])
+}
+
+func genIsImmutable(t reflect.Type) (v bool) {
+ return isImmutableKind(t.Kind())
+}
+
+type genInternal struct {
+ Values []genV
+ Unsafe bool
+}
+
+func (x genInternal) FastpathLen() (l int) {
+ for _, v := range x.Values {
+ if v.Primitive == "" {
+ l++
+ }
+ }
+ return
+}
+
+func genInternalZeroValue(s string) string {
+ switch s {
+ case "interface{}", "interface {}":
+ return "nil"
+ case "bool":
+ return "false"
+ case "string":
+ return `""`
+ default:
+ return "0"
+ }
+}
+
+func genInternalEncCommandAsString(s string, vname string) string {
+ switch s {
+ case "uint", "uint8", "uint16", "uint32", "uint64":
+ return "ee.EncodeUint(uint64(" + vname + "))"
+ case "int", "int8", "int16", "int32", "int64":
+ return "ee.EncodeInt(int64(" + vname + "))"
+ case "string":
+ return "ee.EncodeString(c_UTF8, " + vname + ")"
+ case "float32":
+ return "ee.EncodeFloat32(" + vname + ")"
+ case "float64":
+ return "ee.EncodeFloat64(" + vname + ")"
+ case "bool":
+ return "ee.EncodeBool(" + vname + ")"
+ case "symbol":
+ return "ee.EncodeSymbol(" + vname + ")"
+ default:
+ return "e.encode(" + vname + ")"
+ }
+}
+
+func genInternalDecCommandAsString(s string) string {
+ switch s {
+ case "uint":
+ return "uint(dd.DecodeUint(uintBitsize))"
+ case "uint8":
+ return "uint8(dd.DecodeUint(8))"
+ case "uint16":
+ return "uint16(dd.DecodeUint(16))"
+ case "uint32":
+ return "uint32(dd.DecodeUint(32))"
+ case "uint64":
+ return "dd.DecodeUint(64)"
+ case "uintptr":
+ return "uintptr(dd.DecodeUint(uintBitsize))"
+ case "int":
+ return "int(dd.DecodeInt(intBitsize))"
+ case "int8":
+ return "int8(dd.DecodeInt(8))"
+ case "int16":
+ return "int16(dd.DecodeInt(16))"
+ case "int32":
+ return "int32(dd.DecodeInt(32))"
+ case "int64":
+ return "dd.DecodeInt(64)"
+
+ case "string":
+ return "dd.DecodeString()"
+ case "float32":
+ return "float32(dd.DecodeFloat(true))"
+ case "float64":
+ return "dd.DecodeFloat(false)"
+ case "bool":
+ return "dd.DecodeBool()"
+ default:
+ panic(errors.New("gen internal: unknown type for decode: " + s))
+ }
+}
+
+func genInternalSortType(s string, elem bool) string {
+ for _, v := range [...]string{"int", "uint", "float", "bool", "string"} {
+ if strings.HasPrefix(s, v) {
+ if elem {
+ if v == "int" || v == "uint" || v == "float" {
+ return v + "64"
+ } else {
+ return v
+ }
+ }
+ return v + "Slice"
+ }
+ }
+ panic("sorttype: unexpected type: " + s)
+}
+
+// var genInternalMu sync.Mutex
+var genInternalV genInternal
+var genInternalTmplFuncs template.FuncMap
+var genInternalOnce sync.Once
+
+func genInternalInit() {
+ types := [...]string{
+ "interface{}",
+ "string",
+ "float32",
+ "float64",
+ "uint",
+ "uint8",
+ "uint16",
+ "uint32",
+ "uint64",
+ "uintptr",
+ "int",
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ "bool",
+ }
+ // keep as slice, so it is in specific iteration order.
+ // Initial order was uint64, string, interface{}, int, int64
+ mapvaltypes := [...]string{
+ "interface{}",
+ "string",
+ "uint",
+ "uint8",
+ "uint16",
+ "uint32",
+ "uint64",
+ "uintptr",
+ "int",
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ "float32",
+ "float64",
+ "bool",
+ }
+ wordSizeBytes := int(intBitsize) / 8
+
+ mapvaltypes2 := map[string]int{
+ "interface{}": 2 * wordSizeBytes,
+ "string": 2 * wordSizeBytes,
+ "uint": 1 * wordSizeBytes,
+ "uint8": 1,
+ "uint16": 2,
+ "uint32": 4,
+ "uint64": 8,
+ "uintptr": 1 * wordSizeBytes,
+ "int": 1 * wordSizeBytes,
+ "int8": 1,
+ "int16": 2,
+ "int32": 4,
+ "int64": 8,
+ "float32": 4,
+ "float64": 8,
+ "bool": 1,
+ }
+ var gt genInternal
+
+ // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function
+ for _, s := range types {
+ gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]})
+ if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already.
+ gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]})
+ }
+ if _, ok := mapvaltypes2[s]; !ok {
+ gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]})
+ }
+ for _, ms := range mapvaltypes {
+ gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]})
+ }
+ }
+
+ funcs := make(template.FuncMap)
+ // funcs["haspfx"] = strings.HasPrefix
+ funcs["encmd"] = genInternalEncCommandAsString
+ funcs["decmd"] = genInternalDecCommandAsString
+ funcs["zerocmd"] = genInternalZeroValue
+ funcs["hasprefix"] = strings.HasPrefix
+ funcs["sorttype"] = genInternalSortType
+
+ genInternalV = gt
+ genInternalTmplFuncs = funcs
+}
+
+// genInternalGoFile is used to generate source files from templates.
+// It is run by the program author alone.
+// Unfortunately, it has to be exported so that it can be called from a command line tool.
+// *** DO NOT USE ***
+func genInternalGoFile(r io.Reader, w io.Writer, safe bool) (err error) {
+ genInternalOnce.Do(genInternalInit)
+
+ gt := genInternalV
+ gt.Unsafe = !safe
+
+ t := template.New("").Funcs(genInternalTmplFuncs)
+
+ tmplstr, err := ioutil.ReadAll(r)
+ if err != nil {
+ return
+ }
+
+ if t, err = t.Parse(string(tmplstr)); err != nil {
+ return
+ }
+
+ var out bytes.Buffer
+ err = t.Execute(&out, gt)
+ if err != nil {
+ return
+ }
+
+ bout, err := format.Source(out.Bytes())
+ if err != nil {
+ w.Write(out.Bytes()) // write out if error, so we can still see.
+ // w.Write(bout) // write out if error, as much as possible, so we can still see.
+ return
+ }
+ w.Write(bout)
+ return
+}
diff --git a/vendor/github.com/ugorji/go/codec/gen_15.go b/vendor/github.com/ugorji/go/codec/gen_15.go
new file mode 100644
index 000000000..ab76c3102
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen_15.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build go1.5,!go1.6
+
+package codec
+
+import "os"
+
+func init() {
+ genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1"
+}
diff --git a/vendor/github.com/ugorji/go/codec/gen_16.go b/vendor/github.com/ugorji/go/codec/gen_16.go
new file mode 100644
index 000000000..87c04e2e1
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen_16.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build go1.6
+
+package codec
+
+import "os"
+
+func init() {
+ genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0"
+}
diff --git a/vendor/github.com/ugorji/go/codec/gen_17.go b/vendor/github.com/ugorji/go/codec/gen_17.go
new file mode 100644
index 000000000..3881a43ce
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen_17.go
@@ -0,0 +1,10 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build go1.7
+
+package codec
+
+func init() {
+ genCheckVendor = true
+}
diff --git a/vendor/github.com/ugorji/go/codec/helper.go b/vendor/github.com/ugorji/go/codec/helper.go
new file mode 100644
index 000000000..8b94fc1e4
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper.go
@@ -0,0 +1,1314 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// Contains code shared by both encode and decode.
+
+// Some shared ideas around encoding/decoding
+// ------------------------------------------
+//
+// If an interface{} is passed, we first do a type assertion to see if it is
+// a primitive type or a map/slice of primitive types, and use a fastpath to handle it.
+//
+// If we start with a reflect.Value, we are already in reflect.Value land and
+// will try to grab the function for the underlying Type and directly call that function.
+// This is more performant than calling reflect.Value.Interface().
+//
+// This still helps us bypass many layers of reflection, and give best performance.
+//
+// Containers
+// ------------
+// Containers in the stream are either associative arrays (key-value pairs) or
+// regular arrays (indexed by incrementing integers).
+//
+// Some streams support indefinite-length containers, and use a breaking
+// byte-sequence to denote that the container has come to an end.
+//
+// Some streams also are text-based, and use explicit separators to denote the
+// end/beginning of different values.
+//
+// During encode, we use a high-level condition to determine how to iterate through
+// the container. That decision is based on whether the container is text-based (with
+// separators) or binary (without separators). If binary, we do not even call the
+// encoding of separators.
+//
+// During decode, we use a different high-level condition to determine how to iterate
+// through the containers. That decision is based on whether the stream contained
+// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that
+// it has to be binary, and we do not even try to read separators.
+//
+// Philosophy
+// ------------
+// On decode, this codec will update containers appropriately:
+// - If struct, update fields from stream into fields of struct.
+// If field in stream not found in struct, handle appropriately (based on option).
+// If a struct field has no corresponding value in the stream, leave it AS IS.
+// If nil in stream, set value to nil/zero value.
+// - If map, update map from stream.
+// If the stream value is NIL, set the map to nil.
+// - if slice, try to update up to length of array in stream.
+// if container len is less than stream array length,
+// and container cannot be expanded, handled (based on option).
+// This means you can decode 4-element stream array into 1-element array.
+//
+// ------------------------------------
+// On encode, user can specify omitEmpty. This means that the value will be omitted
+// if the zero value. The problem may occur during decode, where omitted values do not affect
+// the value being decoded into. This means that if decoding into a struct with an
+// int field with current value=5, and the field is omitted in the stream, then after
+// decoding, the value will still be 5 (not 0).
+// omitEmpty only works if you guarantee that you always decode into zero-values.
+//
+// ------------------------------------
+// We could have truncated a map to remove keys not available in the stream,
+// or set values in the struct which are not in the stream to their zero values.
+// We decided against it because there is no efficient way to do it.
+// We may introduce it as an option later.
+// However, that will require enabling it for both runtime and code generation modes.
+//
+// To support truncate, we need to do 2 passes over the container:
+// map
+// - first collect all keys (e.g. in k1)
+// - for each key in stream, mark k1 that the key should not be removed
+// - after updating map, do second pass and call delete for all keys in k1 which are not marked
+// struct:
+// - for each field, track the *typeInfo s1
+// - iterate through all s1, and for each one not marked, set value to zero
+// - this involves checking the possible anonymous fields which are nil ptrs.
+// too much work.
+//
+// ------------------------------------------
+// Error Handling is done within the library using panic.
+//
+// This way, the code doesn't have to keep checking if an error has happened,
+// and we don't have to keep sending the error value along with each call
+// or storing it in the En|Decoder and checking it constantly along the way.
+//
+// The disadvantage is that small functions which use panics cannot be inlined.
+// The code accounts for that by only using panics behind an interface;
+// since interface calls cannot be inlined, this is irrelevant.
+//
+// We considered storing the error is En|Decoder.
+// - once it has its err field set, it cannot be used again.
+// - panicing will be optional, controlled by const flag.
+// - code should always check error first and return early.
+// We eventually decided against it as it makes the code clumsier to always
+// check for these error conditions.
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ scratchByteArrayLen = 32
+ initCollectionCap = 32 // 32 is defensive. 16 is preferred.
+
+ // Support encoding.(Binary|Text)(Unm|M)arshaler.
+ // This constant flag will enable or disable it.
+ supportMarshalInterfaces = true
+
+ // Each Encoder or Decoder uses a cache of functions based on conditionals,
+ // so that the conditionals are not run every time.
+ //
+ // Either a map or a slice is used to keep track of the functions.
+ // The map is more natural, but has a higher cost than a slice/array.
+ // This flag (useMapForCodecCache) controls which is used.
+ //
+ // From benchmarks, slices with linear search perform better with < 32 entries.
+ // We have typically seen a high threshold of about 24 entries.
+ useMapForCodecCache = false
+
+ // for debugging, set this to false, to catch panic traces.
+ // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic.
+ recoverPanicToErr = true
+
+ // if resetSliceElemToZeroValue, then on decoding a slice, reset the element to a zero value first.
+ // Only concern is that, if the slice already contained some garbage, we will decode into that garbage.
+ // The chances of this are slim, so leave this "optimization".
+ // TODO: should this be true, to ensure that we always decode into a "zero" "empty" value?
+ resetSliceElemToZeroValue bool = false
+)
+
+var (
+ oneByteArr = [1]byte{0}
+ zeroByteSlice = oneByteArr[:0:0]
+)
+
+type charEncoding uint8
+
+const (
+ c_RAW charEncoding = iota
+ c_UTF8
+ c_UTF16LE
+ c_UTF16BE
+ c_UTF32LE
+ c_UTF32BE
+)
+
+// valueType is the stream type
+type valueType uint8
+
+const (
+ valueTypeUnset valueType = iota
+ valueTypeNil
+ valueTypeInt
+ valueTypeUint
+ valueTypeFloat
+ valueTypeBool
+ valueTypeString
+ valueTypeSymbol
+ valueTypeBytes
+ valueTypeMap
+ valueTypeArray
+ valueTypeTimestamp
+ valueTypeExt
+
+ // valueTypeInvalid = 0xff
+)
+
+type seqType uint8
+
+const (
+ _ seqType = iota
+ seqTypeArray
+ seqTypeSlice
+ seqTypeChan
+)
+
+// note that containerMapStart and containerArraySend are not sent.
+// This is because the ReadXXXStart and EncodeXXXStart already does these.
+type containerState uint8
+
+const (
+ _ containerState = iota
+
+ containerMapStart // slot left open, since Driver method already covers it
+ containerMapKey
+ containerMapValue
+ containerMapEnd
+ containerArrayStart // slot left open, since Driver methods already cover it
+ containerArrayElem
+ containerArrayEnd
+)
+
+// sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo
+type sfiIdx struct {
+ name string
+ index int
+}
+
+// do not recurse if a containing type refers to an embedded type
+// which refers back to its containing type (via a pointer).
+// The second time this back-reference happens, break out,
+// so as not to cause an infinite loop.
+const rgetMaxRecursion = 2
+
+// Anecdotally, we believe most types have <= 12 fields.
+// Java's PMD rules set TooManyFields threshold to 15.
+const rgetPoolTArrayLen = 12
+
+type rgetT struct {
+ fNames []string
+ encNames []string
+ etypes []uintptr
+ sfis []*structFieldInfo
+}
+
+type rgetPoolT struct {
+ fNames [rgetPoolTArrayLen]string
+ encNames [rgetPoolTArrayLen]string
+ etypes [rgetPoolTArrayLen]uintptr
+ sfis [rgetPoolTArrayLen]*structFieldInfo
+ sfiidx [rgetPoolTArrayLen]sfiIdx
+}
+
+var rgetPool = sync.Pool{
+ New: func() interface{} { return new(rgetPoolT) },
+}
+
+type containerStateRecv interface {
+ sendContainerState(containerState)
+}
+
+// mirror json.Marshaler and json.Unmarshaler here,
+// so we don't import the encoding/json package
+type jsonMarshaler interface {
+ MarshalJSON() ([]byte, error)
+}
+type jsonUnmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+var (
+ bigen = binary.BigEndian
+ structInfoFieldName = "_struct"
+
+ mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
+ mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
+ intfSliceTyp = reflect.TypeOf([]interface{}(nil))
+ intfTyp = intfSliceTyp.Elem()
+
+ stringTyp = reflect.TypeOf("")
+ timeTyp = reflect.TypeOf(time.Time{})
+ rawExtTyp = reflect.TypeOf(RawExt{})
+ rawTyp = reflect.TypeOf(Raw{})
+ uint8SliceTyp = reflect.TypeOf([]uint8(nil))
+
+ mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
+
+ binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
+ binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
+
+ textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+ textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+
+ jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
+ jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
+
+ selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem()
+
+ uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer()
+ rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer()
+ rawTypId = reflect.ValueOf(rawTyp).Pointer()
+ intfTypId = reflect.ValueOf(intfTyp).Pointer()
+ timeTypId = reflect.ValueOf(timeTyp).Pointer()
+ stringTypId = reflect.ValueOf(stringTyp).Pointer()
+
+ mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer()
+ mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer()
+ intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer()
+ // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer()
+
+ intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits())
+ uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits())
+
+ bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0}
+ bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+
+ chkOvf checkOverflow
+
+ noFieldNameToStructFieldInfoErr = errors.New("no field name passed to parseStructFieldInfo")
+)
+
+var defTypeInfos = NewTypeInfos([]string{"codec", "json"})
+
+// Selfer defines methods by which a value can encode or decode itself.
+//
+// Any type which implements Selfer will be able to encode or decode itself.
+// Consequently, during (en|de)code, this takes precedence over
+// (text|binary)(M|Unm)arshal or extension support.
+type Selfer interface {
+ CodecEncodeSelf(*Encoder)
+ CodecDecodeSelf(*Decoder)
+}
+
+// MapBySlice represents a slice which should be encoded as a map in the stream.
+// The slice contains a sequence of key-value pairs.
+// This affords storing a map in a specific sequence in the stream.
+//
+// The support of MapBySlice affords the following:
+// - A slice type which implements MapBySlice will be encoded as a map
+// - A slice can be decoded from a map in the stream
+type MapBySlice interface {
+ MapBySlice()
+}
+
+// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED.
+//
+// BasicHandle encapsulates the common options and extension functions.
+type BasicHandle struct {
+ // TypeInfos is used to get the type info for any type.
+ //
+ // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json
+ TypeInfos *TypeInfos
+
+ extHandle
+ EncodeOptions
+ DecodeOptions
+}
+
+func (x *BasicHandle) getBasicHandle() *BasicHandle {
+ return x
+}
+
+func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
+ if x.TypeInfos != nil {
+ return x.TypeInfos.get(rtid, rt)
+ }
+ return defTypeInfos.get(rtid, rt)
+}
+
+// Handle is the interface for a specific encoding format.
+//
+// Typically, a Handle is pre-configured before first time use,
+// and not modified while in use. Such a pre-configured Handle
+// is safe for concurrent access.
+type Handle interface {
+ getBasicHandle() *BasicHandle
+ newEncDriver(w *Encoder) encDriver
+ newDecDriver(r *Decoder) decDriver
+ isBinary() bool
+}
+
+// Raw represents raw formatted bytes.
+// We "blindly" store it during encode and store the raw bytes during decode.
+// Note: it is dangerous during encode, so we may gate the behaviour behind an Encode flag which must be explicitly set.
+type Raw []byte
+
+// RawExt represents raw unprocessed extension data.
+// Some codecs will decode extension data as a *RawExt if there is no registered extension for the tag.
+//
+// Only one of Data or Value is nil. If Data is nil, then the content of the RawExt is in the Value.
+type RawExt struct {
+ Tag uint64
+ // Data is the []byte which represents the raw ext. If Data is nil, ext is exposed in Value.
+ // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types
+ Data []byte
+ // Value represents the extension, if Data is nil.
+ // Value is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types.
+ Value interface{}
+}
+
+// BytesExt handles custom (de)serialization of types to/from []byte.
+// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
+type BytesExt interface {
+ // WriteExt converts a value to a []byte.
+ //
+ // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array.
+ WriteExt(v interface{}) []byte
+
+ // ReadExt updates a value from a []byte.
+ ReadExt(dst interface{}, src []byte)
+}
+
+// InterfaceExt handles custom (de)serialization of types to/from another interface{} value.
+// The Encoder or Decoder will then handle the further (de)serialization of that known type.
+//
+// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types.
+type InterfaceExt interface {
+ // ConvertExt converts a value into a simpler interface for easy encoding e.g. convert time.Time to int64.
+ //
+ // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array.
+ ConvertExt(v interface{}) interface{}
+
+ // UpdateExt updates a value from a simpler interface for easy decoding e.g. convert int64 to time.Time.
+ UpdateExt(dst interface{}, src interface{})
+}
+
+// Ext handles custom (de)serialization of custom types / extensions.
+type Ext interface {
+ BytesExt
+ InterfaceExt
+}
+
+// addExtWrapper is a wrapper implementation to support former AddExt exported method.
+type addExtWrapper struct {
+ encFn func(reflect.Value) ([]byte, error)
+ decFn func(reflect.Value, []byte) error
+}
+
+func (x addExtWrapper) WriteExt(v interface{}) []byte {
+ bs, err := x.encFn(reflect.ValueOf(v))
+ if err != nil {
+ panic(err)
+ }
+ return bs
+}
+
+func (x addExtWrapper) ReadExt(v interface{}, bs []byte) {
+ if err := x.decFn(reflect.ValueOf(v), bs); err != nil {
+ panic(err)
+ }
+}
+
+func (x addExtWrapper) ConvertExt(v interface{}) interface{} {
+ return x.WriteExt(v)
+}
+
+func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) {
+ x.ReadExt(dest, v.([]byte))
+}
+
+type setExtWrapper struct {
+ b BytesExt
+ i InterfaceExt
+}
+
+func (x *setExtWrapper) WriteExt(v interface{}) []byte {
+ if x.b == nil {
+ panic("BytesExt.WriteExt is not supported")
+ }
+ return x.b.WriteExt(v)
+}
+
+func (x *setExtWrapper) ReadExt(v interface{}, bs []byte) {
+ if x.b == nil {
+ panic("BytesExt.WriteExt is not supported")
+
+ }
+ x.b.ReadExt(v, bs)
+}
+
+func (x *setExtWrapper) ConvertExt(v interface{}) interface{} {
+ if x.i == nil {
+ panic("InterfaceExt.ConvertExt is not supported")
+
+ }
+ return x.i.ConvertExt(v)
+}
+
+func (x *setExtWrapper) UpdateExt(dest interface{}, v interface{}) {
+ if x.i == nil {
+ panic("InterfaceExxt.UpdateExt is not supported")
+
+ }
+ x.i.UpdateExt(dest, v)
+}
+
+// type errorString string
+// func (x errorString) Error() string { return string(x) }
+
+type binaryEncodingType struct{}
+
+func (_ binaryEncodingType) isBinary() bool { return true }
+
+type textEncodingType struct{}
+
+func (_ textEncodingType) isBinary() bool { return false }
+
+// noBuiltInTypes is embedded into many types which do not support builtins
+// e.g. msgpack, simple, cbor.
+type noBuiltInTypes struct{}
+
+func (_ noBuiltInTypes) IsBuiltinType(rt uintptr) bool { return false }
+func (_ noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {}
+func (_ noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {}
+
+type noStreamingCodec struct{}
+
+func (_ noStreamingCodec) CheckBreak() bool { return false }
+
+// bigenHelper.
+// Users must already slice the x completely, because we will not reslice.
+type bigenHelper struct {
+ x []byte // must be correctly sliced to appropriate len. slicing is a cost.
+ w encWriter
+}
+
+func (z bigenHelper) writeUint16(v uint16) {
+ bigen.PutUint16(z.x, v)
+ z.w.writeb(z.x)
+}
+
+func (z bigenHelper) writeUint32(v uint32) {
+ bigen.PutUint32(z.x, v)
+ z.w.writeb(z.x)
+}
+
+func (z bigenHelper) writeUint64(v uint64) {
+ bigen.PutUint64(z.x, v)
+ z.w.writeb(z.x)
+}
+
+type extTypeTagFn struct {
+ rtid uintptr
+ rt reflect.Type
+ tag uint64
+ ext Ext
+}
+
+type extHandle []extTypeTagFn
+
+// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead.
+//
+// AddExt registes an encode and decode function for a reflect.Type.
+// AddExt internally calls SetExt.
+// To deregister an Ext, call AddExt with nil encfn and/or nil decfn.
+func (o *extHandle) AddExt(
+ rt reflect.Type, tag byte,
+ encfn func(reflect.Value) ([]byte, error), decfn func(reflect.Value, []byte) error,
+) (err error) {
+ if encfn == nil || decfn == nil {
+ return o.SetExt(rt, uint64(tag), nil)
+ }
+ return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn})
+}
+
+// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead.
+//
+// Note that the type must be a named type, and specifically not
+// a pointer or Interface. An error is returned if that is not honored.
+//
+// To Deregister an ext, call SetExt with nil Ext
+func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
+ // o is a pointer, because we may need to initialize it
+ if rt.PkgPath() == "" || rt.Kind() == reflect.Interface {
+ err = fmt.Errorf("codec.Handle.AddExt: Takes named type, not a pointer or interface: %T",
+ reflect.Zero(rt).Interface())
+ return
+ }
+
+ rtid := reflect.ValueOf(rt).Pointer()
+ for _, v := range *o {
+ if v.rtid == rtid {
+ v.tag, v.ext = tag, ext
+ return
+ }
+ }
+
+ if *o == nil {
+ *o = make([]extTypeTagFn, 0, 4)
+ }
+ *o = append(*o, extTypeTagFn{rtid, rt, tag, ext})
+ return
+}
+
+func (o extHandle) getExt(rtid uintptr) *extTypeTagFn {
+ var v *extTypeTagFn
+ for i := range o {
+ v = &o[i]
+ if v.rtid == rtid {
+ return v
+ }
+ }
+ return nil
+}
+
+func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn {
+ var v *extTypeTagFn
+ for i := range o {
+ v = &o[i]
+ if v.tag == tag {
+ return v
+ }
+ }
+ return nil
+}
+
+type structFieldInfo struct {
+ encName string // encode name
+ fieldName string // field name
+
+ // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set.
+
+ is []int // (recursive/embedded) field index in struct
+ i int16 // field index in struct
+ omitEmpty bool
+ toArray bool // if field is _struct, is the toArray set?
+}
+
+// func (si *structFieldInfo) isZero() bool {
+// return si.encName == "" && len(si.is) == 0 && si.i == 0 && !si.omitEmpty && !si.toArray
+// }
+
+// rv returns the field of the struct.
+// If anonymous, it returns an Invalid
+func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value) {
+ if si.i != -1 {
+ v = v.Field(int(si.i))
+ return v
+ }
+ // replicate FieldByIndex
+ for _, x := range si.is {
+ for v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ if !update {
+ return
+ }
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ v = v.Field(x)
+ }
+ return v
+}
+
+func (si *structFieldInfo) setToZeroValue(v reflect.Value) {
+ if si.i != -1 {
+ v = v.Field(int(si.i))
+ v.Set(reflect.Zero(v.Type()))
+ // v.Set(reflect.New(v.Type()).Elem())
+ // v.Set(reflect.New(v.Type()))
+ } else {
+ // replicate FieldByIndex
+ for _, x := range si.is {
+ for v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return
+ }
+ v = v.Elem()
+ }
+ v = v.Field(x)
+ }
+ v.Set(reflect.Zero(v.Type()))
+ }
+}
+
+func parseStructFieldInfo(fname string, stag string) *structFieldInfo {
+ // if fname == "" {
+ // panic(noFieldNameToStructFieldInfoErr)
+ // }
+ si := structFieldInfo{
+ encName: fname,
+ }
+
+ if stag != "" {
+ for i, s := range strings.Split(stag, ",") {
+ if i == 0 {
+ if s != "" {
+ si.encName = s
+ }
+ } else {
+ if s == "omitempty" {
+ si.omitEmpty = true
+ } else if s == "toarray" {
+ si.toArray = true
+ }
+ }
+ }
+ }
+ // si.encNameBs = []byte(si.encName)
+ return &si
+}
+
+type sfiSortedByEncName []*structFieldInfo
+
+func (p sfiSortedByEncName) Len() int {
+ return len(p)
+}
+
+func (p sfiSortedByEncName) Less(i, j int) bool {
+ return p[i].encName < p[j].encName
+}
+
+func (p sfiSortedByEncName) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
+
+// typeInfo keeps information about each type referenced in the encode/decode sequence.
+//
+// During an encode/decode sequence, we work as below:
+// - If base is a built in type, en/decode base value
+// - If base is registered as an extension, en/decode base value
+// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method
+// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method
+// - Else decode appropriately based on the reflect.Kind
+type typeInfo struct {
+ sfi []*structFieldInfo // sorted. Used when enc/dec struct to map.
+ sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array.
+
+ rt reflect.Type
+ rtid uintptr
+
+ numMeth uint16 // number of methods
+
+ // baseId gives pointer to the base reflect.Type, after deferencing
+ // the pointers. E.g. base type of ***time.Time is time.Time.
+ base reflect.Type
+ baseId uintptr
+ baseIndir int8 // number of indirections to get to base
+
+ mbs bool // base type (T or *T) is a MapBySlice
+
+ bm bool // base type (T or *T) is a binaryMarshaler
+ bunm bool // base type (T or *T) is a binaryUnmarshaler
+ bmIndir int8 // number of indirections to get to binaryMarshaler type
+ bunmIndir int8 // number of indirections to get to binaryUnmarshaler type
+
+ tm bool // base type (T or *T) is a textMarshaler
+ tunm bool // base type (T or *T) is a textUnmarshaler
+ tmIndir int8 // number of indirections to get to textMarshaler type
+ tunmIndir int8 // number of indirections to get to textUnmarshaler type
+
+ jm bool // base type (T or *T) is a jsonMarshaler
+ junm bool // base type (T or *T) is a jsonUnmarshaler
+ jmIndir int8 // number of indirections to get to jsonMarshaler type
+ junmIndir int8 // number of indirections to get to jsonUnmarshaler type
+
+ cs bool // base type (T or *T) is a Selfer
+ csIndir int8 // number of indirections to get to Selfer type
+
+ toArray bool // whether this (struct) type should be encoded as an array
+}
+
+func (ti *typeInfo) indexForEncName(name string) int {
+ // NOTE: name may be a stringView, so don't pass it to another function.
+ //tisfi := ti.sfi
+ const binarySearchThreshold = 16
+ if sfilen := len(ti.sfi); sfilen < binarySearchThreshold {
+ // linear search. faster than binary search in my testing up to 16-field structs.
+ for i, si := range ti.sfi {
+ if si.encName == name {
+ return i
+ }
+ }
+ } else {
+ // binary search. adapted from sort/search.go.
+ h, i, j := 0, 0, sfilen
+ for i < j {
+ h = i + (j-i)/2
+ if ti.sfi[h].encName < name {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ if i < sfilen && ti.sfi[i].encName == name {
+ return i
+ }
+ }
+ return -1
+}
+
+// TypeInfos caches typeInfo for each type on first inspection.
+//
+// It is configured with a set of tag keys, which are used to get
+// configuration for the type.
+type TypeInfos struct {
+ infos map[uintptr]*typeInfo
+ mu sync.RWMutex
+ tags []string
+}
+
+// NewTypeInfos creates a TypeInfos given a set of struct tags keys.
+//
+// This allows users customize the struct tag keys which contain configuration
+// of their types.
+func NewTypeInfos(tags []string) *TypeInfos {
+ return &TypeInfos{tags: tags, infos: make(map[uintptr]*typeInfo, 64)}
+}
+
+func (x *TypeInfos) structTag(t reflect.StructTag) (s string) {
+ // check for tags: codec, json, in that order.
+ // this allows seamless support for many configured structs.
+ for _, x := range x.tags {
+ s = t.Get(x)
+ if s != "" {
+ return s
+ }
+ }
+ return
+}
+
+func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
+ var ok bool
+ x.mu.RLock()
+ pti, ok = x.infos[rtid]
+ x.mu.RUnlock()
+ if ok {
+ return
+ }
+
+ // do not hold lock while computing this.
+ // it may lead to duplication, but that's ok.
+ ti := typeInfo{rt: rt, rtid: rtid}
+ ti.numMeth = uint16(rt.NumMethod())
+
+ var indir int8
+ if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok {
+ ti.bm, ti.bmIndir = true, indir
+ }
+ if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok {
+ ti.bunm, ti.bunmIndir = true, indir
+ }
+ if ok, indir = implementsIntf(rt, textMarshalerTyp); ok {
+ ti.tm, ti.tmIndir = true, indir
+ }
+ if ok, indir = implementsIntf(rt, textUnmarshalerTyp); ok {
+ ti.tunm, ti.tunmIndir = true, indir
+ }
+ if ok, indir = implementsIntf(rt, jsonMarshalerTyp); ok {
+ ti.jm, ti.jmIndir = true, indir
+ }
+ if ok, indir = implementsIntf(rt, jsonUnmarshalerTyp); ok {
+ ti.junm, ti.junmIndir = true, indir
+ }
+ if ok, indir = implementsIntf(rt, selferTyp); ok {
+ ti.cs, ti.csIndir = true, indir
+ }
+ if ok, _ = implementsIntf(rt, mapBySliceTyp); ok {
+ ti.mbs = true
+ }
+
+ pt := rt
+ var ptIndir int8
+ // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { }
+ for pt.Kind() == reflect.Ptr {
+ pt = pt.Elem()
+ ptIndir++
+ }
+ if ptIndir == 0 {
+ ti.base = rt
+ ti.baseId = rtid
+ } else {
+ ti.base = pt
+ ti.baseId = reflect.ValueOf(pt).Pointer()
+ ti.baseIndir = ptIndir
+ }
+
+ if rt.Kind() == reflect.Struct {
+ var omitEmpty bool
+ if f, ok := rt.FieldByName(structInfoFieldName); ok {
+ siInfo := parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag))
+ ti.toArray = siInfo.toArray
+ omitEmpty = siInfo.omitEmpty
+ }
+ pi := rgetPool.Get()
+ pv := pi.(*rgetPoolT)
+ pv.etypes[0] = ti.baseId
+ vv := rgetT{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]}
+ x.rget(rt, rtid, omitEmpty, nil, &vv)
+ ti.sfip, ti.sfi = rgetResolveSFI(vv.sfis, pv.sfiidx[:0])
+ rgetPool.Put(pi)
+ }
+ // sfi = sfip
+
+ x.mu.Lock()
+ if pti, ok = x.infos[rtid]; !ok {
+ pti = &ti
+ x.infos[rtid] = pti
+ }
+ x.mu.Unlock()
+ return
+}
+
+func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool,
+ indexstack []int, pv *rgetT,
+) {
+ // Read up fields and store how to access the value.
+ //
+ // It uses go's rules for message selectors,
+ // which say that the field with the shallowest depth is selected.
+ //
+ // Note: we consciously use slices, not a map, to simulate a set.
+ // Typically, types have < 16 fields,
+ // and iteration using equals is faster than maps there
+
+LOOP:
+ for j, jlen := 0, rt.NumField(); j < jlen; j++ {
+ f := rt.Field(j)
+ fkind := f.Type.Kind()
+ // skip if a func type, or is unexported, or structTag value == "-"
+ switch fkind {
+ case reflect.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer:
+ continue LOOP
+ }
+
+ // if r1, _ := utf8.DecodeRuneInString(f.Name);
+ // r1 == utf8.RuneError || !unicode.IsUpper(r1) {
+ if f.PkgPath != "" && !f.Anonymous { // unexported, not embedded
+ continue
+ }
+ stag := x.structTag(f.Tag)
+ if stag == "-" {
+ continue
+ }
+ var si *structFieldInfo
+ // if anonymous and no struct tag (or it's blank),
+ // and a struct (or pointer to struct), inline it.
+ if f.Anonymous && fkind != reflect.Interface {
+ doInline := stag == ""
+ if !doInline {
+ si = parseStructFieldInfo("", stag)
+ doInline = si.encName == ""
+ // doInline = si.isZero()
+ }
+ if doInline {
+ ft := f.Type
+ for ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+ if ft.Kind() == reflect.Struct {
+ // if etypes contains this, don't call rget again (as fields are already seen here)
+ ftid := reflect.ValueOf(ft).Pointer()
+ // We cannot recurse forever, but we need to track other field depths.
+ // So - we break if we see a type twice (not the first time).
+ // This should be sufficient to handle an embedded type that refers to its
+ // owning type, which then refers to its embedded type.
+ processIt := true
+ numk := 0
+ for _, k := range pv.etypes {
+ if k == ftid {
+ numk++
+ if numk == rgetMaxRecursion {
+ processIt = false
+ break
+ }
+ }
+ }
+ if processIt {
+ pv.etypes = append(pv.etypes, ftid)
+ indexstack2 := make([]int, len(indexstack)+1)
+ copy(indexstack2, indexstack)
+ indexstack2[len(indexstack)] = j
+ // indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
+ x.rget(ft, ftid, omitEmpty, indexstack2, pv)
+ }
+ continue
+ }
+ }
+ }
+
+ // after the anonymous dance: if an unexported field, skip
+ if f.PkgPath != "" { // unexported
+ continue
+ }
+
+ if f.Name == "" {
+ panic(noFieldNameToStructFieldInfoErr)
+ }
+
+ pv.fNames = append(pv.fNames, f.Name)
+
+ if si == nil {
+ si = parseStructFieldInfo(f.Name, stag)
+ } else if si.encName == "" {
+ si.encName = f.Name
+ }
+ si.fieldName = f.Name
+
+ pv.encNames = append(pv.encNames, si.encName)
+
+ // si.ikind = int(f.Type.Kind())
+ if len(indexstack) == 0 {
+ si.i = int16(j)
+ } else {
+ si.i = -1
+ si.is = make([]int, len(indexstack)+1)
+ copy(si.is, indexstack)
+ si.is[len(indexstack)] = j
+ // si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
+ }
+
+ if omitEmpty {
+ si.omitEmpty = true
+ }
+ pv.sfis = append(pv.sfis, si)
+ }
+}
+
+// resolves the struct field info got from a call to rget.
+// Returns a trimmed, unsorted and sorted []*structFieldInfo.
+func rgetResolveSFI(x []*structFieldInfo, pv []sfiIdx) (y, z []*structFieldInfo) {
+ var n int
+ for i, v := range x {
+ xn := v.encName //TODO: fieldName or encName? use encName for now.
+ var found bool
+ for j, k := range pv {
+ if k.name == xn {
+ // one of them must be reset to nil, and the index updated appropriately to the other one
+ if len(v.is) == len(x[k.index].is) {
+ } else if len(v.is) < len(x[k.index].is) {
+ pv[j].index = i
+ if x[k.index] != nil {
+ x[k.index] = nil
+ n++
+ }
+ } else {
+ if x[i] != nil {
+ x[i] = nil
+ n++
+ }
+ }
+ found = true
+ break
+ }
+ }
+ if !found {
+ pv = append(pv, sfiIdx{xn, i})
+ }
+ }
+
+ // remove all the nils
+ y = make([]*structFieldInfo, len(x)-n)
+ n = 0
+ for _, v := range x {
+ if v == nil {
+ continue
+ }
+ y[n] = v
+ n++
+ }
+
+ z = make([]*structFieldInfo, len(y))
+ copy(z, y)
+ sort.Sort(sfiSortedByEncName(z))
+ return
+}
+
+func panicToErr(err *error) {
+ if recoverPanicToErr {
+ if x := recover(); x != nil {
+ //debug.PrintStack()
+ panicValToErr(x, err)
+ }
+ }
+}
+
+// func doPanic(tag string, format string, params ...interface{}) {
+// params2 := make([]interface{}, len(params)+1)
+// params2[0] = tag
+// copy(params2[1:], params)
+// panic(fmt.Errorf("%s: "+format, params2...))
+// }
+
+func isImmutableKind(k reflect.Kind) (v bool) {
+ return false ||
+ k == reflect.Int ||
+ k == reflect.Int8 ||
+ k == reflect.Int16 ||
+ k == reflect.Int32 ||
+ k == reflect.Int64 ||
+ k == reflect.Uint ||
+ k == reflect.Uint8 ||
+ k == reflect.Uint16 ||
+ k == reflect.Uint32 ||
+ k == reflect.Uint64 ||
+ k == reflect.Uintptr ||
+ k == reflect.Float32 ||
+ k == reflect.Float64 ||
+ k == reflect.Bool ||
+ k == reflect.String
+}
+
+// these functions must be inlinable, and not call anybody
+type checkOverflow struct{}
+
+func (_ checkOverflow) Float32(f float64) (overflow bool) {
+ if f < 0 {
+ f = -f
+ }
+ return math.MaxFloat32 < f && f <= math.MaxFloat64
+}
+
+func (_ checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) {
+ if bitsize == 0 || bitsize >= 64 || v == 0 {
+ return
+ }
+ if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
+ overflow = true
+ }
+ return
+}
+
+func (_ checkOverflow) Int(v int64, bitsize uint8) (overflow bool) {
+ if bitsize == 0 || bitsize >= 64 || v == 0 {
+ return
+ }
+ if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
+ overflow = true
+ }
+ return
+}
+
+func (_ checkOverflow) SignedInt(v uint64) (i int64, overflow bool) {
+ //e.g. -127 to 128 for int8
+ pos := (v >> 63) == 0
+ ui2 := v & 0x7fffffffffffffff
+ if pos {
+ if ui2 > math.MaxInt64 {
+ overflow = true
+ return
+ }
+ } else {
+ if ui2 > math.MaxInt64-1 {
+ overflow = true
+ return
+ }
+ }
+ i = int64(v)
+ return
+}
+
+// ------------------ SORT -----------------
+
+func isNaN(f float64) bool { return f != f }
+
+// -----------------------
+
+type intSlice []int64
+type uintSlice []uint64
+type floatSlice []float64
+type boolSlice []bool
+type stringSlice []string
+type bytesSlice [][]byte
+
+func (p intSlice) Len() int { return len(p) }
+func (p intSlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p intSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p uintSlice) Len() int { return len(p) }
+func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p uintSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p floatSlice) Len() int { return len(p) }
+func (p floatSlice) Less(i, j int) bool {
+ return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j])
+}
+func (p floatSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p stringSlice) Len() int { return len(p) }
+func (p stringSlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p stringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p bytesSlice) Len() int { return len(p) }
+func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == -1 }
+func (p bytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p boolSlice) Len() int { return len(p) }
+func (p boolSlice) Less(i, j int) bool { return !p[i] && p[j] }
+func (p boolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// ---------------------
+
+type intRv struct {
+ v int64
+ r reflect.Value
+}
+type intRvSlice []intRv
+type uintRv struct {
+ v uint64
+ r reflect.Value
+}
+type uintRvSlice []uintRv
+type floatRv struct {
+ v float64
+ r reflect.Value
+}
+type floatRvSlice []floatRv
+type boolRv struct {
+ v bool
+ r reflect.Value
+}
+type boolRvSlice []boolRv
+type stringRv struct {
+ v string
+ r reflect.Value
+}
+type stringRvSlice []stringRv
+type bytesRv struct {
+ v []byte
+ r reflect.Value
+}
+type bytesRvSlice []bytesRv
+
+func (p intRvSlice) Len() int { return len(p) }
+func (p intRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
+func (p intRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p uintRvSlice) Len() int { return len(p) }
+func (p uintRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
+func (p uintRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p floatRvSlice) Len() int { return len(p) }
+func (p floatRvSlice) Less(i, j int) bool {
+ return p[i].v < p[j].v || isNaN(p[i].v) && !isNaN(p[j].v)
+}
+func (p floatRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p stringRvSlice) Len() int { return len(p) }
+func (p stringRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
+func (p stringRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p bytesRvSlice) Len() int { return len(p) }
+func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 }
+func (p bytesRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p boolRvSlice) Len() int { return len(p) }
+func (p boolRvSlice) Less(i, j int) bool { return !p[i].v && p[j].v }
+func (p boolRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// -----------------
+
+type bytesI struct {
+ v []byte
+ i interface{}
+}
+
+type bytesISlice []bytesI
+
+func (p bytesISlice) Len() int { return len(p) }
+func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 }
+func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// -----------------
+
+type set []uintptr
+
+func (s *set) add(v uintptr) (exists bool) {
+ // e.ci is always nil, or len >= 1
+ // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Add: %v, exists: %v\n", v, exists) }()
+ x := *s
+ if x == nil {
+ x = make([]uintptr, 1, 8)
+ x[0] = v
+ *s = x
+ return
+ }
+ // typically, length will be 1. make this perform.
+ if len(x) == 1 {
+ if j := x[0]; j == 0 {
+ x[0] = v
+ } else if j == v {
+ exists = true
+ } else {
+ x = append(x, v)
+ *s = x
+ }
+ return
+ }
+ // check if it exists
+ for _, j := range x {
+ if j == v {
+ exists = true
+ return
+ }
+ }
+ // try to replace a "deleted" slot
+ for i, j := range x {
+ if j == 0 {
+ x[i] = v
+ return
+ }
+ }
+ // if unable to replace deleted slot, just append it.
+ x = append(x, v)
+ *s = x
+ return
+}
+
+func (s *set) remove(v uintptr) (exists bool) {
+ // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Rm: %v, exists: %v\n", v, exists) }()
+ x := *s
+ if len(x) == 0 {
+ return
+ }
+ if len(x) == 1 {
+ if x[0] == v {
+ x[0] = 0
+ }
+ return
+ }
+ for i, j := range x {
+ if j == v {
+ exists = true
+ x[i] = 0 // set it to 0, as way to delete it.
+ // copy(x[i:], x[i+1:])
+ // x = x[:len(x)-1]
+ return
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/ugorji/go/codec/helper_internal.go b/vendor/github.com/ugorji/go/codec/helper_internal.go
new file mode 100644
index 000000000..5d0727f77
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper_internal.go
@@ -0,0 +1,242 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// All non-std package dependencies live in this file,
+// so porting to different environment is easy (just update functions).
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+)
+
+func panicValToErr(panicVal interface{}, err *error) {
+ if panicVal == nil {
+ return
+ }
+ // case nil
+ switch xerr := panicVal.(type) {
+ case error:
+ *err = xerr
+ case string:
+ *err = errors.New(xerr)
+ default:
+ *err = fmt.Errorf("%v", panicVal)
+ }
+ return
+}
+
+func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool {
+ switch v.Kind() {
+ case reflect.Invalid:
+ return true
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ if deref {
+ if v.IsNil() {
+ return true
+ }
+ return hIsEmptyValue(v.Elem(), deref, checkStruct)
+ } else {
+ return v.IsNil()
+ }
+ case reflect.Struct:
+ if !checkStruct {
+ return false
+ }
+ // return true if all fields are empty. else return false.
+ // we cannot use equality check, because some fields may be maps/slices/etc
+ // and consequently the structs are not comparable.
+ // return v.Interface() == reflect.Zero(v.Type()).Interface()
+ for i, n := 0, v.NumField(); i < n; i++ {
+ if !hIsEmptyValue(v.Field(i), deref, checkStruct) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func isEmptyValue(v reflect.Value, deref, checkStruct bool) bool {
+ return hIsEmptyValue(v, deref, checkStruct)
+}
+
+func pruneSignExt(v []byte, pos bool) (n int) {
+ if len(v) < 2 {
+ } else if pos && v[0] == 0 {
+ for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ {
+ }
+ } else if !pos && v[0] == 0xff {
+ for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ {
+ }
+ }
+ return
+}
+
+func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) {
+ if typ == nil {
+ return
+ }
+ rt := typ
+ // The type might be a pointer and we need to keep
+ // dereferencing to the base type until we find an implementation.
+ for {
+ if rt.Implements(iTyp) {
+ return true, indir
+ }
+ if p := rt; p.Kind() == reflect.Ptr {
+ indir++
+ if indir >= math.MaxInt8 { // insane number of indirections
+ return false, 0
+ }
+ rt = p.Elem()
+ continue
+ }
+ break
+ }
+ // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy.
+ if typ.Kind() != reflect.Ptr {
+ // Not a pointer, but does the pointer work?
+ if reflect.PtrTo(typ).Implements(iTyp) {
+ return true, -1
+ }
+ }
+ return false, 0
+}
+
+// validate that this function is correct ...
+// culled from OGRE (Object-Oriented Graphics Rendering Engine)
+// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html)
+func halfFloatToFloatBits(yy uint16) (d uint32) {
+ y := uint32(yy)
+ s := (y >> 15) & 0x01
+ e := (y >> 10) & 0x1f
+ m := y & 0x03ff
+
+ if e == 0 {
+ if m == 0 { // plu or minus 0
+ return s << 31
+ } else { // Denormalized number -- renormalize it
+ for (m & 0x00000400) == 0 {
+ m <<= 1
+ e -= 1
+ }
+ e += 1
+ const zz uint32 = 0x0400
+ m &= ^zz
+ }
+ } else if e == 31 {
+ if m == 0 { // Inf
+ return (s << 31) | 0x7f800000
+ } else { // NaN
+ return (s << 31) | 0x7f800000 | (m << 13)
+ }
+ }
+ e = e + (127 - 15)
+ m = m << 13
+ return (s << 31) | (e << 23) | m
+}
+
+// GrowCap will return a new capacity for a slice, given the following:
+// - oldCap: current capacity
+// - unit: in-memory size of an element
+// - num: number of elements to add
+func growCap(oldCap, unit, num int) (newCap int) {
+ // appendslice logic (if cap < 1024, *2, else *1.25):
+ // leads to many copy calls, especially when copying bytes.
+ // bytes.Buffer model (2*cap + n): much better for bytes.
+ // smarter way is to take the byte-size of the appended element(type) into account
+
+ // maintain 3 thresholds:
+ // t1: if cap <= t1, newcap = 2x
+ // t2: if cap <= t2, newcap = 1.75x
+ // t3: if cap <= t3, newcap = 1.5x
+ // else newcap = 1.25x
+ //
+ // t1, t2, t3 >= 1024 always.
+ // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same)
+ //
+ // With this, appending for bytes increase by:
+ // 100% up to 4K
+ // 75% up to 8K
+ // 50% up to 16K
+ // 25% beyond that
+
+ // unit can be 0 e.g. for struct{}{}; handle that appropriately
+ var t1, t2, t3 int // thresholds
+ if unit <= 1 {
+ t1, t2, t3 = 4*1024, 8*1024, 16*1024
+ } else if unit < 16 {
+ t3 = 16 / unit * 1024
+ t1 = t3 * 1 / 4
+ t2 = t3 * 2 / 4
+ } else {
+ t1, t2, t3 = 1024, 1024, 1024
+ }
+
+ var x int // temporary variable
+
+ // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively
+ if oldCap <= t1 { // [0,t1]
+ x = 8
+ } else if oldCap > t3 { // (t3,infinity]
+ x = 5
+ } else if oldCap <= t2 { // (t1,t2]
+ x = 7
+ } else { // (t2,t3]
+ x = 6
+ }
+ newCap = x * oldCap / 4
+
+ if num > 0 {
+ newCap += num
+ }
+
+ // ensure newCap is a multiple of 64 (if it is > 64) or 16.
+ if newCap > 64 {
+ if x = newCap % 64; x != 0 {
+ x = newCap / 64
+ newCap = 64 * (x + 1)
+ }
+ } else {
+ if x = newCap % 16; x != 0 {
+ x = newCap / 16
+ newCap = 16 * (x + 1)
+ }
+ }
+ return
+}
+
+func expandSliceValue(s reflect.Value, num int) reflect.Value {
+ if num <= 0 {
+ return s
+ }
+ l0 := s.Len()
+ l1 := l0 + num // new slice length
+ if l1 < l0 {
+ panic("ExpandSlice: slice overflow")
+ }
+ c0 := s.Cap()
+ if l1 <= c0 {
+ return s.Slice(0, l1)
+ }
+ st := s.Type()
+ c1 := growCap(c0, int(st.Elem().Size()), num)
+ s2 := reflect.MakeSlice(st, l1, c1)
+ // println("expandslicevalue: cap-old: ", c0, ", cap-new: ", c1, ", len-new: ", l1)
+ reflect.Copy(s2, s)
+ return s2
+}
diff --git a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go
new file mode 100644
index 000000000..8b06a0045
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go
@@ -0,0 +1,20 @@
+// +build !unsafe
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// stringView returns a view of the []byte as a string.
+// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
+// In regular safe mode, it is an allocation and copy.
+func stringView(v []byte) string {
+ return string(v)
+}
+
+// bytesView returns a view of the string as a []byte.
+// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
+// In regular safe mode, it is an allocation and copy.
+func bytesView(v string) []byte {
+ return []byte(v)
+}
diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_unsafe.go
new file mode 100644
index 000000000..0f596c71a
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper_unsafe.go
@@ -0,0 +1,49 @@
+// +build unsafe
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "unsafe"
+)
+
+// This file has unsafe variants of some helper methods.
+
+type unsafeString struct {
+ Data uintptr
+ Len int
+}
+
+type unsafeSlice struct {
+ Data uintptr
+ Len int
+ Cap int
+}
+
+// stringView returns a view of the []byte as a string.
+// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
+// In regular safe mode, it is an allocation and copy.
+func stringView(v []byte) string {
+ if len(v) == 0 {
+ return ""
+ }
+
+ bx := (*unsafeSlice)(unsafe.Pointer(&v))
+ sx := unsafeString{bx.Data, bx.Len}
+ return *(*string)(unsafe.Pointer(&sx))
+}
+
+// bytesView returns a view of the string as a []byte.
+// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
+// In regular safe mode, it is an allocation and copy.
+func bytesView(v string) []byte {
+ if len(v) == 0 {
+ return zeroByteSlice
+ }
+
+ sx := (*unsafeString)(unsafe.Pointer(&v))
+ bx := unsafeSlice{sx.Data, sx.Len, sx.Len}
+ return *(*[]byte)(unsafe.Pointer(&bx))
+}
diff --git a/vendor/github.com/ugorji/go/codec/json.go b/vendor/github.com/ugorji/go/codec/json.go
new file mode 100644
index 000000000..5bb389628
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/json.go
@@ -0,0 +1,1234 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// By default, this json support uses base64 encoding for bytes, because you cannot
+// store and read any arbitrary string in json (only unicode).
+// However, the user can configre how to encode/decode bytes.
+//
+// This library specifically supports UTF-8 for encoding and decoding only.
+//
+// Note that the library will happily encode/decode things which are not valid
+// json e.g. a map[int64]string. We do it for consistency. With valid json,
+// we will encode and decode appropriately.
+// Users can specify their map type if necessary to force it.
+//
+// Note:
+// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently.
+// We implement it here.
+// - Also, strconv.ParseXXX for floats and integers
+// - only works on strings resulting in unnecessary allocation and []byte-string conversion.
+// - it does a lot of redundant checks, because json numbers are simpler that what it supports.
+// - We parse numbers (floats and integers) directly here.
+// We only delegate parsing floats if it is a hairy float which could cause a loss of precision.
+// In that case, we delegate to strconv.ParseFloat.
+//
+// Note:
+// - encode does not beautify. There is no whitespace when encoding.
+// - rpc calls which take single integer arguments or write single numeric arguments will need care.
+
+// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver
+// MUST not call one-another.
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "reflect"
+ "strconv"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+//--------------------------------
+
+var (
+ jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 'n', 'u', 'l', 'l'}
+
+ jsonFloat64Pow10 = [...]float64{
+ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+ 1e20, 1e21, 1e22,
+ }
+
+ jsonUint64Pow10 = [...]uint64{
+ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+ }
+
+ // jsonTabs and jsonSpaces are used as caches for indents
+ jsonTabs, jsonSpaces string
+)
+
+const (
+ // jsonUnreadAfterDecNum controls whether we unread after decoding a number.
+ //
+ // instead of unreading, just update d.tok (iff it's not a whitespace char)
+ // However, doing this means that we may HOLD onto some data which belongs to another stream.
+ // Thus, it is safest to unread the data when done.
+ // keep behind a constant flag for now.
+ jsonUnreadAfterDecNum = true
+
+ // If !jsonValidateSymbols, decoding will be faster, by skipping some checks:
+ // - If we see first character of null, false or true,
+ // do not validate subsequent characters.
+ // - e.g. if we see a n, assume null and skip next 3 characters,
+ // and do not validate they are ull.
+ // P.S. Do not expect a significant decoding boost from this.
+ jsonValidateSymbols = true
+
+ // if jsonTruncateMantissa, truncate mantissa if trailing 0's.
+ // This is important because it could allow some floats to be decoded without
+ // deferring to strconv.ParseFloat.
+ jsonTruncateMantissa = true
+
+ // if mantissa >= jsonNumUintCutoff before multiplying by 10, this is an overflow
+ jsonNumUintCutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base)
+
+ // if mantissa >= jsonNumUintMaxVal, this is an overflow
+ jsonNumUintMaxVal = 1<<uint64(64) - 1
+
+ // jsonNumDigitsUint64Largest = 19
+
+ jsonSpacesOrTabsLen = 128
+)
+
+func init() {
+ var bs [jsonSpacesOrTabsLen]byte
+ for i := 0; i < jsonSpacesOrTabsLen; i++ {
+ bs[i] = ' '
+ }
+ jsonSpaces = string(bs[:])
+
+ for i := 0; i < jsonSpacesOrTabsLen; i++ {
+ bs[i] = '\t'
+ }
+ jsonTabs = string(bs[:])
+}
+
+type jsonEncDriver struct {
+ e *Encoder
+ w encWriter
+ h *JsonHandle
+ b [64]byte // scratch
+ bs []byte // scratch
+ se setExtWrapper
+ ds string // indent string
+ dl uint16 // indent level
+ dt bool // indent using tabs
+ d bool // indent
+ c containerState
+ noBuiltInTypes
+}
+
+// indent is done as below:
+// - newline and indent are added before each mapKey or arrayElem
+// - newline and indent are added before each ending,
+// except there was no entry (so we can have {} or [])
+
+func (e *jsonEncDriver) sendContainerState(c containerState) {
+ // determine whether to output separators
+ if c == containerMapKey {
+ if e.c != containerMapStart {
+ e.w.writen1(',')
+ }
+ if e.d {
+ e.writeIndent()
+ }
+ } else if c == containerMapValue {
+ if e.d {
+ e.w.writen2(':', ' ')
+ } else {
+ e.w.writen1(':')
+ }
+ } else if c == containerMapEnd {
+ if e.d {
+ e.dl--
+ if e.c != containerMapStart {
+ e.writeIndent()
+ }
+ }
+ e.w.writen1('}')
+ } else if c == containerArrayElem {
+ if e.c != containerArrayStart {
+ e.w.writen1(',')
+ }
+ if e.d {
+ e.writeIndent()
+ }
+ } else if c == containerArrayEnd {
+ if e.d {
+ e.dl--
+ if e.c != containerArrayStart {
+ e.writeIndent()
+ }
+ }
+ e.w.writen1(']')
+ }
+ e.c = c
+}
+
+func (e *jsonEncDriver) writeIndent() {
+ e.w.writen1('\n')
+ if x := len(e.ds) * int(e.dl); x <= jsonSpacesOrTabsLen {
+ if e.dt {
+ e.w.writestr(jsonTabs[:x])
+ } else {
+ e.w.writestr(jsonSpaces[:x])
+ }
+ } else {
+ for i := uint16(0); i < e.dl; i++ {
+ e.w.writestr(e.ds)
+ }
+ }
+}
+
+func (e *jsonEncDriver) EncodeNil() {
+ e.w.writeb(jsonLiterals[9:13]) // null
+}
+
+func (e *jsonEncDriver) EncodeBool(b bool) {
+ if b {
+ e.w.writeb(jsonLiterals[0:4]) // true
+ } else {
+ e.w.writeb(jsonLiterals[4:9]) // false
+ }
+}
+
+func (e *jsonEncDriver) EncodeFloat32(f float32) {
+ e.encodeFloat(float64(f), 32)
+}
+
+func (e *jsonEncDriver) EncodeFloat64(f float64) {
+ // e.w.writestr(strconv.FormatFloat(f, 'E', -1, 64))
+ e.encodeFloat(f, 64)
+}
+
+func (e *jsonEncDriver) encodeFloat(f float64, numbits int) {
+ x := strconv.AppendFloat(e.b[:0], f, 'G', -1, numbits)
+ e.w.writeb(x)
+ if bytes.IndexByte(x, 'E') == -1 && bytes.IndexByte(x, '.') == -1 {
+ e.w.writen2('.', '0')
+ }
+}
+
+func (e *jsonEncDriver) EncodeInt(v int64) {
+ if x := e.h.IntegerAsString; x == 'A' || x == 'L' && (v > 1<<53 || v < -(1<<53)) {
+ e.w.writen1('"')
+ e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
+ e.w.writen1('"')
+ return
+ }
+ e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriver) EncodeUint(v uint64) {
+ if x := e.h.IntegerAsString; x == 'A' || x == 'L' && v > 1<<53 {
+ e.w.writen1('"')
+ e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
+ e.w.writen1('"')
+ return
+ }
+ e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
+ if v := ext.ConvertExt(rv); v == nil {
+ e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil()
+ } else {
+ en.encode(v)
+ }
+}
+
+func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
+ // only encodes re.Value (never re.Data)
+ if re.Value == nil {
+ e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil()
+ } else {
+ en.encode(re.Value)
+ }
+}
+
+func (e *jsonEncDriver) EncodeArrayStart(length int) {
+ if e.d {
+ e.dl++
+ }
+ e.w.writen1('[')
+ e.c = containerArrayStart
+}
+
+func (e *jsonEncDriver) EncodeMapStart(length int) {
+ if e.d {
+ e.dl++
+ }
+ e.w.writen1('{')
+ e.c = containerMapStart
+}
+
+func (e *jsonEncDriver) EncodeString(c charEncoding, v string) {
+ // e.w.writestr(strconv.Quote(v))
+ e.quoteStr(v)
+}
+
+func (e *jsonEncDriver) EncodeSymbol(v string) {
+ // e.EncodeString(c_UTF8, v)
+ e.quoteStr(v)
+}
+
+func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+ // if encoding raw bytes and RawBytesExt is configured, use it to encode
+ if c == c_RAW && e.se.i != nil {
+ e.EncodeExt(v, 0, &e.se, e.e)
+ return
+ }
+ if c == c_RAW {
+ slen := base64.StdEncoding.EncodedLen(len(v))
+ if cap(e.bs) >= slen {
+ e.bs = e.bs[:slen]
+ } else {
+ e.bs = make([]byte, slen)
+ }
+ base64.StdEncoding.Encode(e.bs, v)
+ e.w.writen1('"')
+ e.w.writeb(e.bs)
+ e.w.writen1('"')
+ } else {
+ // e.EncodeString(c, string(v))
+ e.quoteStr(stringView(v))
+ }
+}
+
+func (e *jsonEncDriver) EncodeAsis(v []byte) {
+ e.w.writeb(v)
+}
+
+func (e *jsonEncDriver) quoteStr(s string) {
+ // adapted from std pkg encoding/json
+ const hex = "0123456789abcdef"
+ w := e.w
+ w.writen1('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+ i++
+ continue
+ }
+ if start < i {
+ w.writestr(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ w.writen2('\\', b)
+ case '\n':
+ w.writen2('\\', 'n')
+ case '\r':
+ w.writen2('\\', 'r')
+ case '\b':
+ w.writen2('\\', 'b')
+ case '\f':
+ w.writen2('\\', 'f')
+ case '\t':
+ w.writen2('\\', 't')
+ default:
+ // encode all bytes < 0x20 (except \r, \n).
+ // also encode < > & to prevent security holes when served to some browsers.
+ w.writestr(`\u00`)
+ w.writen2(hex[b>>4], hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRuneInString(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ w.writestr(s[start:i])
+ }
+ w.writestr(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR.
+ // Both technically valid JSON, but bomb on JSONP, so fix here.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ w.writestr(s[start:i])
+ }
+ w.writestr(`\u202`)
+ w.writen1(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ w.writestr(s[start:])
+ }
+ w.writen1('"')
+}
+
+//--------------------------------
+
+type jsonNum struct {
+ // bytes []byte // may have [+-.eE0-9]
+ mantissa uint64 // where mantissa ends, and maybe dot begins.
+ exponent int16 // exponent value.
+ manOverflow bool
+ neg bool // started with -. No initial sign in the bytes above.
+ dot bool // has dot
+ explicitExponent bool // explicit exponent
+}
+
+func (x *jsonNum) reset() {
+ x.manOverflow = false
+ x.neg = false
+ x.dot = false
+ x.explicitExponent = false
+ x.mantissa = 0
+ x.exponent = 0
+}
+
+// uintExp is called only if exponent > 0.
+func (x *jsonNum) uintExp() (n uint64, overflow bool) {
+ n = x.mantissa
+ e := x.exponent
+ if e >= int16(len(jsonUint64Pow10)) {
+ overflow = true
+ return
+ }
+ n *= jsonUint64Pow10[e]
+ if n < x.mantissa || n > jsonNumUintMaxVal {
+ overflow = true
+ return
+ }
+ return
+ // for i := int16(0); i < e; i++ {
+ // if n >= jsonNumUintCutoff {
+ // overflow = true
+ // return
+ // }
+ // n *= 10
+ // }
+ // return
+}
+
+// these constants are only used withn floatVal.
+// They are brought out, so that floatVal can be inlined.
+const (
+ jsonUint64MantissaBits = 52
+ jsonMaxExponent = int16(len(jsonFloat64Pow10)) - 1
+)
+
+func (x *jsonNum) floatVal() (f float64, parseUsingStrConv bool) {
+ // We do not want to lose precision.
+ // Consequently, we will delegate to strconv.ParseFloat if any of the following happen:
+ // - There are more digits than in math.MaxUint64: 18446744073709551615 (20 digits)
+ // We expect up to 99.... (19 digits)
+ // - The mantissa cannot fit into a 52 bits of uint64
+ // - The exponent is beyond our scope ie beyong 22.
+ parseUsingStrConv = x.manOverflow ||
+ x.exponent > jsonMaxExponent ||
+ (x.exponent < 0 && -(x.exponent) > jsonMaxExponent) ||
+ x.mantissa>>jsonUint64MantissaBits != 0
+
+ if parseUsingStrConv {
+ return
+ }
+
+ // all good. so handle parse here.
+ f = float64(x.mantissa)
+ // fmt.Printf(".Float: uint64 value: %v, float: %v\n", m, f)
+ if x.neg {
+ f = -f
+ }
+ if x.exponent > 0 {
+ f *= jsonFloat64Pow10[x.exponent]
+ } else if x.exponent < 0 {
+ f /= jsonFloat64Pow10[-x.exponent]
+ }
+ return
+}
+
+type jsonDecDriver struct {
+ noBuiltInTypes
+ d *Decoder
+ h *JsonHandle
+ r decReader
+
+ c containerState
+ // tok is used to store the token read right after skipWhiteSpace.
+ tok uint8
+
+ bstr [8]byte // scratch used for string \UXXX parsing
+ b [64]byte // scratch, used for parsing strings or numbers
+ b2 [64]byte // scratch, used only for decodeBytes (after base64)
+ bs []byte // scratch. Initialized from b. Used for parsing strings or numbers.
+
+ se setExtWrapper
+
+ n jsonNum
+}
+
+func jsonIsWS(b byte) bool {
+ return b == ' ' || b == '\t' || b == '\r' || b == '\n'
+}
+
+// // This will skip whitespace characters and return the next byte to read.
+// // The next byte determines what the value will be one of.
+// func (d *jsonDecDriver) skipWhitespace() {
+// // fast-path: do not enter loop. Just check first (in case no whitespace).
+// b := d.r.readn1()
+// if jsonIsWS(b) {
+// r := d.r
+// for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+// }
+// }
+// d.tok = b
+// }
+
+func (d *jsonDecDriver) uncacheRead() {
+ if d.tok != 0 {
+ d.r.unreadn1()
+ d.tok = 0
+ }
+}
+
+func (d *jsonDecDriver) sendContainerState(c containerState) {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ var xc uint8 // char expected
+ if c == containerMapKey {
+ if d.c != containerMapStart {
+ xc = ','
+ }
+ } else if c == containerMapValue {
+ xc = ':'
+ } else if c == containerMapEnd {
+ xc = '}'
+ } else if c == containerArrayElem {
+ if d.c != containerArrayStart {
+ xc = ','
+ }
+ } else if c == containerArrayEnd {
+ xc = ']'
+ }
+ if xc != 0 {
+ if d.tok != xc {
+ d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok)
+ }
+ d.tok = 0
+ }
+ d.c = c
+}
+
+func (d *jsonDecDriver) CheckBreak() bool {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if d.tok == '}' || d.tok == ']' {
+ // d.tok = 0 // only checking, not consuming
+ return true
+ }
+ return false
+}
+
+func (d *jsonDecDriver) readStrIdx(fromIdx, toIdx uint8) {
+ bs := d.r.readx(int(toIdx - fromIdx))
+ d.tok = 0
+ if jsonValidateSymbols {
+ if !bytes.Equal(bs, jsonLiterals[fromIdx:toIdx]) {
+ d.d.errorf("json: expecting %s: got %s", jsonLiterals[fromIdx:toIdx], bs)
+ return
+ }
+ }
+}
+
+func (d *jsonDecDriver) TryDecodeAsNil() bool {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if d.tok == 'n' {
+ d.readStrIdx(10, 13) // ull
+ return true
+ }
+ return false
+}
+
+func (d *jsonDecDriver) DecodeBool() bool {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if d.tok == 'f' {
+ d.readStrIdx(5, 9) // alse
+ return false
+ }
+ if d.tok == 't' {
+ d.readStrIdx(1, 4) // rue
+ return true
+ }
+ d.d.errorf("json: decode bool: got first char %c", d.tok)
+ return false // "unreachable"
+}
+
+func (d *jsonDecDriver) ReadMapStart() int {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if d.tok != '{' {
+ d.d.errorf("json: expect char '%c' but got char '%c'", '{', d.tok)
+ }
+ d.tok = 0
+ d.c = containerMapStart
+ return -1
+}
+
+func (d *jsonDecDriver) ReadArrayStart() int {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if d.tok != '[' {
+ d.d.errorf("json: expect char '%c' but got char '%c'", '[', d.tok)
+ }
+ d.tok = 0
+ d.c = containerArrayStart
+ return -1
+}
+
+func (d *jsonDecDriver) ContainerType() (vt valueType) {
+ // check container type by checking the first char
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if b := d.tok; b == '{' {
+ return valueTypeMap
+ } else if b == '[' {
+ return valueTypeArray
+ } else if b == 'n' {
+ return valueTypeNil
+ } else if b == '"' {
+ return valueTypeString
+ }
+ return valueTypeUnset
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ // return false // "unreachable"
+}
+
+func (d *jsonDecDriver) decNum(storeBytes bool) {
+ // If it is has a . or an e|E, decode as a float; else decode as an int.
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ b := d.tok
+ var str bool
+ if b == '"' {
+ str = true
+ b = d.r.readn1()
+ }
+ if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) {
+ d.d.errorf("json: decNum: got first char '%c'", b)
+ return
+ }
+ d.tok = 0
+
+ const cutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base)
+ const jsonNumUintMaxVal = 1<<uint64(64) - 1
+
+ n := &d.n
+ r := d.r
+ n.reset()
+ d.bs = d.bs[:0]
+
+ if str && storeBytes {
+ d.bs = append(d.bs, '"')
+ }
+
+ // The format of a number is as below:
+ // parsing: sign? digit* dot? digit* e? sign? digit*
+ // states: 0 1* 2 3* 4 5* 6 7
+ // We honor this state so we can break correctly.
+ var state uint8 = 0
+ var eNeg bool
+ var e int16
+ var eof bool
+LOOP:
+ for !eof {
+ // fmt.Printf("LOOP: b: %q\n", b)
+ switch b {
+ case '+':
+ switch state {
+ case 0:
+ state = 2
+ // do not add sign to the slice ...
+ b, eof = r.readn1eof()
+ continue
+ case 6: // typ = jsonNumFloat
+ state = 7
+ default:
+ break LOOP
+ }
+ case '-':
+ switch state {
+ case 0:
+ state = 2
+ n.neg = true
+ // do not add sign to the slice ...
+ b, eof = r.readn1eof()
+ continue
+ case 6: // typ = jsonNumFloat
+ eNeg = true
+ state = 7
+ default:
+ break LOOP
+ }
+ case '.':
+ switch state {
+ case 0, 2: // typ = jsonNumFloat
+ state = 4
+ n.dot = true
+ default:
+ break LOOP
+ }
+ case 'e', 'E':
+ switch state {
+ case 0, 2, 4: // typ = jsonNumFloat
+ state = 6
+ // n.mantissaEndIndex = int16(len(n.bytes))
+ n.explicitExponent = true
+ default:
+ break LOOP
+ }
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ switch state {
+ case 0:
+ state = 2
+ fallthrough
+ case 2:
+ fallthrough
+ case 4:
+ if n.dot {
+ n.exponent--
+ }
+ if n.mantissa >= jsonNumUintCutoff {
+ n.manOverflow = true
+ break
+ }
+ v := uint64(b - '0')
+ n.mantissa *= 10
+ if v != 0 {
+ n1 := n.mantissa + v
+ if n1 < n.mantissa || n1 > jsonNumUintMaxVal {
+ n.manOverflow = true // n+v overflows
+ break
+ }
+ n.mantissa = n1
+ }
+ case 6:
+ state = 7
+ fallthrough
+ case 7:
+ if !(b == '0' && e == 0) {
+ e = e*10 + int16(b-'0')
+ }
+ default:
+ break LOOP
+ }
+ case '"':
+ if str {
+ if storeBytes {
+ d.bs = append(d.bs, '"')
+ }
+ b, eof = r.readn1eof()
+ }
+ break LOOP
+ default:
+ break LOOP
+ }
+ if storeBytes {
+ d.bs = append(d.bs, b)
+ }
+ b, eof = r.readn1eof()
+ }
+
+ if jsonTruncateMantissa && n.mantissa != 0 {
+ for n.mantissa%10 == 0 {
+ n.mantissa /= 10
+ n.exponent++
+ }
+ }
+
+ if e != 0 {
+ if eNeg {
+ n.exponent -= e
+ } else {
+ n.exponent += e
+ }
+ }
+
+ // d.n = n
+
+ if !eof {
+ if jsonUnreadAfterDecNum {
+ r.unreadn1()
+ } else {
+ if !jsonIsWS(b) {
+ d.tok = b
+ }
+ }
+ }
+ // fmt.Printf("1: n: bytes: %s, neg: %v, dot: %v, exponent: %v, mantissaEndIndex: %v\n",
+ // n.bytes, n.neg, n.dot, n.exponent, n.mantissaEndIndex)
+ return
+}
+
+func (d *jsonDecDriver) DecodeInt(bitsize uint8) (i int64) {
+ d.decNum(false)
+ n := &d.n
+ if n.manOverflow {
+ d.d.errorf("json: overflow integer after: %v", n.mantissa)
+ return
+ }
+ var u uint64
+ if n.exponent == 0 {
+ u = n.mantissa
+ } else if n.exponent < 0 {
+ d.d.errorf("json: fractional integer")
+ return
+ } else if n.exponent > 0 {
+ var overflow bool
+ if u, overflow = n.uintExp(); overflow {
+ d.d.errorf("json: overflow integer")
+ return
+ }
+ }
+ i = int64(u)
+ if n.neg {
+ i = -i
+ }
+ if chkOvf.Int(i, bitsize) {
+ d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs)
+ return
+ }
+ // fmt.Printf("DecodeInt: %v\n", i)
+ return
+}
+
+// floatVal MUST only be called after a decNum, as d.bs now contains the bytes of the number
+func (d *jsonDecDriver) floatVal() (f float64) {
+ f, useStrConv := d.n.floatVal()
+ if useStrConv {
+ var err error
+ if f, err = strconv.ParseFloat(stringView(d.bs), 64); err != nil {
+ panic(fmt.Errorf("parse float: %s, %v", d.bs, err))
+ }
+ if d.n.neg {
+ f = -f
+ }
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) {
+ d.decNum(false)
+ n := &d.n
+ if n.neg {
+ d.d.errorf("json: unsigned integer cannot be negative")
+ return
+ }
+ if n.manOverflow {
+ d.d.errorf("json: overflow integer after: %v", n.mantissa)
+ return
+ }
+ if n.exponent == 0 {
+ u = n.mantissa
+ } else if n.exponent < 0 {
+ d.d.errorf("json: fractional integer")
+ return
+ } else if n.exponent > 0 {
+ var overflow bool
+ if u, overflow = n.uintExp(); overflow {
+ d.d.errorf("json: overflow integer")
+ return
+ }
+ }
+ if chkOvf.Uint(u, bitsize) {
+ d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs)
+ return
+ }
+ // fmt.Printf("DecodeUint: %v\n", u)
+ return
+}
+
+func (d *jsonDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+ d.decNum(true)
+ f = d.floatVal()
+ if chkOverflow32 && chkOvf.Float32(f) {
+ d.d.errorf("json: overflow float32: %v, %s", f, d.bs)
+ return
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = xtag
+ d.d.decode(&re.Value)
+ } else {
+ var v interface{}
+ d.d.decode(&v)
+ ext.UpdateExt(rv, v)
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
+ // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode.
+ if !isstring && d.se.i != nil {
+ bsOut = bs
+ d.DecodeExt(&bsOut, 0, &d.se)
+ return
+ }
+ d.appendStringAsBytes()
+ // if isstring, then just return the bytes, even if it is using the scratch buffer.
+ // the bytes will be converted to a string as needed.
+ if isstring {
+ return d.bs
+ }
+ // if appendStringAsBytes returned a zero-len slice, then treat as nil.
+ // This should only happen for null, and "".
+ if len(d.bs) == 0 {
+ return nil
+ }
+ bs0 := d.bs
+ slen := base64.StdEncoding.DecodedLen(len(bs0))
+ if slen <= cap(bs) {
+ bsOut = bs[:slen]
+ } else if zerocopy && slen <= cap(d.b2) {
+ bsOut = d.b2[:slen]
+ } else {
+ bsOut = make([]byte, slen)
+ }
+ slen2, err := base64.StdEncoding.Decode(bsOut, bs0)
+ if err != nil {
+ d.d.errorf("json: error decoding base64 binary '%s': %v", bs0, err)
+ return nil
+ }
+ if slen != slen2 {
+ bsOut = bsOut[:slen2]
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeString() (s string) {
+ d.appendStringAsBytes()
+ // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key
+ if d.c == containerMapKey {
+ return d.d.string(d.bs)
+ }
+ return string(d.bs)
+}
+
+func (d *jsonDecDriver) appendStringAsBytes() {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+
+ // handle null as a string
+ if d.tok == 'n' {
+ d.readStrIdx(10, 13) // ull
+ d.bs = d.bs[:0]
+ return
+ }
+
+ if d.tok != '"' {
+ d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok)
+ }
+ d.tok = 0
+
+ v := d.bs[:0]
+ var c uint8
+ r := d.r
+ for {
+ c = r.readn1()
+ if c == '"' {
+ break
+ } else if c == '\\' {
+ c = r.readn1()
+ switch c {
+ case '"', '\\', '/', '\'':
+ v = append(v, c)
+ case 'b':
+ v = append(v, '\b')
+ case 'f':
+ v = append(v, '\f')
+ case 'n':
+ v = append(v, '\n')
+ case 'r':
+ v = append(v, '\r')
+ case 't':
+ v = append(v, '\t')
+ case 'u':
+ rr := d.jsonU4(false)
+ // fmt.Printf("$$$$$$$$$: is surrogate: %v\n", utf16.IsSurrogate(rr))
+ if utf16.IsSurrogate(rr) {
+ rr = utf16.DecodeRune(rr, d.jsonU4(true))
+ }
+ w2 := utf8.EncodeRune(d.bstr[:], rr)
+ v = append(v, d.bstr[:w2]...)
+ default:
+ d.d.errorf("json: unsupported escaped value: %c", c)
+ }
+ } else {
+ v = append(v, c)
+ }
+ }
+ d.bs = v
+}
+
+func (d *jsonDecDriver) jsonU4(checkSlashU bool) rune {
+ r := d.r
+ if checkSlashU && !(r.readn1() == '\\' && r.readn1() == 'u') {
+ d.d.errorf(`json: unquoteStr: invalid unicode sequence. Expecting \u`)
+ return 0
+ }
+ // u, _ := strconv.ParseUint(string(d.bstr[:4]), 16, 64)
+ var u uint32
+ for i := 0; i < 4; i++ {
+ v := r.readn1()
+ if '0' <= v && v <= '9' {
+ v = v - '0'
+ } else if 'a' <= v && v <= 'z' {
+ v = v - 'a' + 10
+ } else if 'A' <= v && v <= 'Z' {
+ v = v - 'A' + 10
+ } else {
+ d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, v)
+ return 0
+ }
+ u = u*16 + uint32(v)
+ }
+ return rune(u)
+}
+
+func (d *jsonDecDriver) DecodeNaked() {
+ z := &d.d.n
+ // var decodeFurther bool
+
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ switch d.tok {
+ case 'n':
+ d.readStrIdx(10, 13) // ull
+ z.v = valueTypeNil
+ case 'f':
+ d.readStrIdx(5, 9) // alse
+ z.v = valueTypeBool
+ z.b = false
+ case 't':
+ d.readStrIdx(1, 4) // rue
+ z.v = valueTypeBool
+ z.b = true
+ case '{':
+ z.v = valueTypeMap
+ // d.tok = 0 // don't consume. kInterfaceNaked will call ReadMapStart
+ // decodeFurther = true
+ case '[':
+ z.v = valueTypeArray
+ // d.tok = 0 // don't consume. kInterfaceNaked will call ReadArrayStart
+ // decodeFurther = true
+ case '"':
+ z.v = valueTypeString
+ z.s = d.DecodeString()
+ default: // number
+ d.decNum(true)
+ n := &d.n
+ // if the string had a any of [.eE], then decode as float.
+ switch {
+ case n.explicitExponent, n.dot, n.exponent < 0, n.manOverflow:
+ z.v = valueTypeFloat
+ z.f = d.floatVal()
+ case n.exponent == 0:
+ u := n.mantissa
+ switch {
+ case n.neg:
+ z.v = valueTypeInt
+ z.i = -int64(u)
+ case d.h.SignedInteger:
+ z.v = valueTypeInt
+ z.i = int64(u)
+ default:
+ z.v = valueTypeUint
+ z.u = u
+ }
+ default:
+ u, overflow := n.uintExp()
+ switch {
+ case overflow:
+ z.v = valueTypeFloat
+ z.f = d.floatVal()
+ case n.neg:
+ z.v = valueTypeInt
+ z.i = -int64(u)
+ case d.h.SignedInteger:
+ z.v = valueTypeInt
+ z.i = int64(u)
+ default:
+ z.v = valueTypeUint
+ z.u = u
+ }
+ }
+ // fmt.Printf("DecodeNaked: Number: %T, %v\n", v, v)
+ }
+ // if decodeFurther {
+ // d.s.sc.retryRead()
+ // }
+ return
+}
+
+//----------------------
+
+// JsonHandle is a handle for JSON encoding format.
+//
+// Json is comprehensively supported:
+// - decodes numbers into interface{} as int, uint or float64
+// - configurable way to encode/decode []byte .
+// by default, encodes and decodes []byte using base64 Std Encoding
+// - UTF-8 support for encoding and decoding
+//
+// It has better performance than the json library in the standard library,
+// by leveraging the performance improvements of the codec library and
+// minimizing allocations.
+//
+// In addition, it doesn't read more bytes than necessary during a decode, which allows
+// reading multiple values from a stream containing json and non-json content.
+// For example, a user can read a json value, then a cbor value, then a msgpack value,
+// all from the same stream in sequence.
+type JsonHandle struct {
+ textEncodingType
+ BasicHandle
+ // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way.
+ // If not configured, raw bytes are encoded to/from base64 text.
+ RawBytesExt InterfaceExt
+
+ // Indent indicates how a value is encoded.
+ // - If positive, indent by that number of spaces.
+ // - If negative, indent by that number of tabs.
+ Indent int8
+
+ // IntegerAsString controls how integers (signed and unsigned) are encoded.
+ //
+ // Per the JSON Spec, JSON numbers are 64-bit floating point numbers.
+ // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision.
+ // This can be mitigated by configuring how to encode integers.
+ //
+ // IntegerAsString interpretes the following values:
+ // - if 'L', then encode integers > 2^53 as a json string.
+ // - if 'A', then encode all integers as a json string
+ // containing the exact integer representation as a decimal.
+ // - else encode all integers as a json number (default)
+ IntegerAsString uint8
+}
+
+func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
+ return h.SetExt(rt, tag, &setExtWrapper{i: ext})
+}
+
+func (h *JsonHandle) newEncDriver(e *Encoder) encDriver {
+ hd := jsonEncDriver{e: e, h: h}
+ hd.bs = hd.b[:0]
+
+ hd.reset()
+
+ return &hd
+}
+
+func (h *JsonHandle) newDecDriver(d *Decoder) decDriver {
+ // d := jsonDecDriver{r: r.(*bytesDecReader), h: h}
+ hd := jsonDecDriver{d: d, h: h}
+ hd.bs = hd.b[:0]
+ hd.reset()
+ return &hd
+}
+
+func (e *jsonEncDriver) reset() {
+ e.w = e.e.w
+ e.se.i = e.h.RawBytesExt
+ if e.bs != nil {
+ e.bs = e.bs[:0]
+ }
+ e.d, e.dt, e.dl, e.ds = false, false, 0, ""
+ e.c = 0
+ if e.h.Indent > 0 {
+ e.d = true
+ e.ds = jsonSpaces[:e.h.Indent]
+ } else if e.h.Indent < 0 {
+ e.d = true
+ e.dt = true
+ e.ds = jsonTabs[:-(e.h.Indent)]
+ }
+}
+
+func (d *jsonDecDriver) reset() {
+ d.r = d.d.r
+ d.se.i = d.h.RawBytesExt
+ if d.bs != nil {
+ d.bs = d.bs[:0]
+ }
+ d.c, d.tok = 0, 0
+ d.n.reset()
+}
+
+var jsonEncodeTerminate = []byte{' '}
+
+func (h *JsonHandle) rpcEncodeTerminate() []byte {
+ return jsonEncodeTerminate
+}
+
+var _ decDriver = (*jsonDecDriver)(nil)
+var _ encDriver = (*jsonEncDriver)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/ugorji/go/codec/msgpack.go
new file mode 100644
index 000000000..e09d5a04a
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/msgpack.go
@@ -0,0 +1,852 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+/*
+MSGPACK
+
+Msgpack-c implementation powers the c, c++, python, ruby, etc libraries.
+We need to maintain compatibility with it and how it encodes integer values
+without caring about the type.
+
+For compatibility with behaviour of msgpack-c reference implementation:
+ - Go intX (>0) and uintX
+ IS ENCODED AS
+ msgpack +ve fixnum, unsigned
+ - Go intX (<0)
+ IS ENCODED AS
+ msgpack -ve fixnum, signed
+
+*/
+package codec
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "net/rpc"
+ "reflect"
+)
+
+const (
+ mpPosFixNumMin byte = 0x00
+ mpPosFixNumMax = 0x7f
+ mpFixMapMin = 0x80
+ mpFixMapMax = 0x8f
+ mpFixArrayMin = 0x90
+ mpFixArrayMax = 0x9f
+ mpFixStrMin = 0xa0
+ mpFixStrMax = 0xbf
+ mpNil = 0xc0
+ _ = 0xc1
+ mpFalse = 0xc2
+ mpTrue = 0xc3
+ mpFloat = 0xca
+ mpDouble = 0xcb
+ mpUint8 = 0xcc
+ mpUint16 = 0xcd
+ mpUint32 = 0xce
+ mpUint64 = 0xcf
+ mpInt8 = 0xd0
+ mpInt16 = 0xd1
+ mpInt32 = 0xd2
+ mpInt64 = 0xd3
+
+ // extensions below
+ mpBin8 = 0xc4
+ mpBin16 = 0xc5
+ mpBin32 = 0xc6
+ mpExt8 = 0xc7
+ mpExt16 = 0xc8
+ mpExt32 = 0xc9
+ mpFixExt1 = 0xd4
+ mpFixExt2 = 0xd5
+ mpFixExt4 = 0xd6
+ mpFixExt8 = 0xd7
+ mpFixExt16 = 0xd8
+
+ mpStr8 = 0xd9 // new
+ mpStr16 = 0xda
+ mpStr32 = 0xdb
+
+ mpArray16 = 0xdc
+ mpArray32 = 0xdd
+
+ mpMap16 = 0xde
+ mpMap32 = 0xdf
+
+ mpNegFixNumMin = 0xe0
+ mpNegFixNumMax = 0xff
+)
+
+// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec
+// that the backend RPC service takes multiple arguments, which have been arranged
+// in sequence in the slice.
+//
+// The Codec then passes it AS-IS to the rpc service (without wrapping it in an
+// array of 1 element).
+type MsgpackSpecRpcMultiArgs []interface{}
+
+// A MsgpackContainer type specifies the different types of msgpackContainers.
+type msgpackContainerType struct {
+ fixCutoff int
+ bFixMin, b8, b16, b32 byte
+ hasFixMin, has8, has8Always bool
+}
+
+var (
+ msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false}
+ msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true}
+ msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false}
+ msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false}
+)
+
+//---------------------------------------------
+
+type msgpackEncDriver struct {
+ noBuiltInTypes
+ encNoSeparator
+ e *Encoder
+ w encWriter
+ h *MsgpackHandle
+ x [8]byte
+}
+
+func (e *msgpackEncDriver) EncodeNil() {
+ e.w.writen1(mpNil)
+}
+
+func (e *msgpackEncDriver) EncodeInt(i int64) {
+ if i >= 0 {
+ e.EncodeUint(uint64(i))
+ } else if i >= -32 {
+ e.w.writen1(byte(i))
+ } else if i >= math.MinInt8 {
+ e.w.writen2(mpInt8, byte(i))
+ } else if i >= math.MinInt16 {
+ e.w.writen1(mpInt16)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
+ } else if i >= math.MinInt32 {
+ e.w.writen1(mpInt32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
+ } else {
+ e.w.writen1(mpInt64)
+ bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
+ }
+}
+
+func (e *msgpackEncDriver) EncodeUint(i uint64) {
+ if i <= math.MaxInt8 {
+ e.w.writen1(byte(i))
+ } else if i <= math.MaxUint8 {
+ e.w.writen2(mpUint8, byte(i))
+ } else if i <= math.MaxUint16 {
+ e.w.writen1(mpUint16)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
+ } else if i <= math.MaxUint32 {
+ e.w.writen1(mpUint32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
+ } else {
+ e.w.writen1(mpUint64)
+ bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
+ }
+}
+
+func (e *msgpackEncDriver) EncodeBool(b bool) {
+ if b {
+ e.w.writen1(mpTrue)
+ } else {
+ e.w.writen1(mpFalse)
+ }
+}
+
+func (e *msgpackEncDriver) EncodeFloat32(f float32) {
+ e.w.writen1(mpFloat)
+ bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *msgpackEncDriver) EncodeFloat64(f float64) {
+ e.w.writen1(mpDouble)
+ bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
+}
+
+func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) {
+ bs := ext.WriteExt(v)
+ if bs == nil {
+ e.EncodeNil()
+ return
+ }
+ if e.h.WriteExt {
+ e.encodeExtPreamble(uint8(xtag), len(bs))
+ e.w.writeb(bs)
+ } else {
+ e.EncodeStringBytes(c_RAW, bs)
+ }
+}
+
+func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
+ e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
+ e.w.writeb(re.Data)
+}
+
+func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) {
+ if l == 1 {
+ e.w.writen2(mpFixExt1, xtag)
+ } else if l == 2 {
+ e.w.writen2(mpFixExt2, xtag)
+ } else if l == 4 {
+ e.w.writen2(mpFixExt4, xtag)
+ } else if l == 8 {
+ e.w.writen2(mpFixExt8, xtag)
+ } else if l == 16 {
+ e.w.writen2(mpFixExt16, xtag)
+ } else if l < 256 {
+ e.w.writen2(mpExt8, byte(l))
+ e.w.writen1(xtag)
+ } else if l < 65536 {
+ e.w.writen1(mpExt16)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l))
+ e.w.writen1(xtag)
+ } else {
+ e.w.writen1(mpExt32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l))
+ e.w.writen1(xtag)
+ }
+}
+
+func (e *msgpackEncDriver) EncodeArrayStart(length int) {
+ e.writeContainerLen(msgpackContainerList, length)
+}
+
+func (e *msgpackEncDriver) EncodeMapStart(length int) {
+ e.writeContainerLen(msgpackContainerMap, length)
+}
+
+func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) {
+ if c == c_RAW && e.h.WriteExt {
+ e.writeContainerLen(msgpackContainerBin, len(s))
+ } else {
+ e.writeContainerLen(msgpackContainerStr, len(s))
+ }
+ if len(s) > 0 {
+ e.w.writestr(s)
+ }
+}
+
+func (e *msgpackEncDriver) EncodeSymbol(v string) {
+ e.EncodeString(c_UTF8, v)
+}
+
+func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) {
+ if c == c_RAW && e.h.WriteExt {
+ e.writeContainerLen(msgpackContainerBin, len(bs))
+ } else {
+ e.writeContainerLen(msgpackContainerStr, len(bs))
+ }
+ if len(bs) > 0 {
+ e.w.writeb(bs)
+ }
+}
+
+func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) {
+ if ct.hasFixMin && l < ct.fixCutoff {
+ e.w.writen1(ct.bFixMin | byte(l))
+ } else if ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt) {
+ e.w.writen2(ct.b8, uint8(l))
+ } else if l < 65536 {
+ e.w.writen1(ct.b16)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l))
+ } else {
+ e.w.writen1(ct.b32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l))
+ }
+}
+
+//---------------------------------------------
+
+type msgpackDecDriver struct {
+ d *Decoder
+ r decReader // *Decoder decReader decReaderT
+ h *MsgpackHandle
+ b [scratchByteArrayLen]byte
+ bd byte
+ bdRead bool
+ br bool // bytes reader
+ noBuiltInTypes
+ noStreamingCodec
+ decNoSeparator
+}
+
+// Note: This returns either a primitive (int, bool, etc) for non-containers,
+// or a containerType, or a specific type denoting nil or extension.
+// It is called when a nil interface{} is passed, leaving it up to the DecDriver
+// to introspect the stream and decide how best to decode.
+// It deciphers the value by looking at the stream first.
+func (d *msgpackDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ bd := d.bd
+ n := &d.d.n
+ var decodeFurther bool
+
+ switch bd {
+ case mpNil:
+ n.v = valueTypeNil
+ d.bdRead = false
+ case mpFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case mpTrue:
+ n.v = valueTypeBool
+ n.b = true
+
+ case mpFloat:
+ n.v = valueTypeFloat
+ n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+ case mpDouble:
+ n.v = valueTypeFloat
+ n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+
+ case mpUint8:
+ n.v = valueTypeUint
+ n.u = uint64(d.r.readn1())
+ case mpUint16:
+ n.v = valueTypeUint
+ n.u = uint64(bigen.Uint16(d.r.readx(2)))
+ case mpUint32:
+ n.v = valueTypeUint
+ n.u = uint64(bigen.Uint32(d.r.readx(4)))
+ case mpUint64:
+ n.v = valueTypeUint
+ n.u = uint64(bigen.Uint64(d.r.readx(8)))
+
+ case mpInt8:
+ n.v = valueTypeInt
+ n.i = int64(int8(d.r.readn1()))
+ case mpInt16:
+ n.v = valueTypeInt
+ n.i = int64(int16(bigen.Uint16(d.r.readx(2))))
+ case mpInt32:
+ n.v = valueTypeInt
+ n.i = int64(int32(bigen.Uint32(d.r.readx(4))))
+ case mpInt64:
+ n.v = valueTypeInt
+ n.i = int64(int64(bigen.Uint64(d.r.readx(8))))
+
+ default:
+ switch {
+ case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
+ // positive fixnum (always signed)
+ n.v = valueTypeInt
+ n.i = int64(int8(bd))
+ case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
+ // negative fixnum
+ n.v = valueTypeInt
+ n.i = int64(int8(bd))
+ case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
+ if d.h.RawToString {
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ } else {
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
+ }
+ case bd == mpBin8, bd == mpBin16, bd == mpBin32:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
+ case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
+ n.v = valueTypeMap
+ decodeFurther = true
+ case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
+ n.v = valueTypeExt
+ clen := d.readExtLen()
+ n.u = uint64(d.r.readn1())
+ n.l = d.r.readx(clen)
+ default:
+ d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
+ }
+ }
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ if n.v == valueTypeUint && d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = int64(n.u)
+ }
+ return
+}
+
+// int can be decoded from msgpack type: intXXX or uintXXX
+func (d *msgpackDecDriver) DecodeInt(bitsize uint8) (i int64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ switch d.bd {
+ case mpUint8:
+ i = int64(uint64(d.r.readn1()))
+ case mpUint16:
+ i = int64(uint64(bigen.Uint16(d.r.readx(2))))
+ case mpUint32:
+ i = int64(uint64(bigen.Uint32(d.r.readx(4))))
+ case mpUint64:
+ i = int64(bigen.Uint64(d.r.readx(8)))
+ case mpInt8:
+ i = int64(int8(d.r.readn1()))
+ case mpInt16:
+ i = int64(int16(bigen.Uint16(d.r.readx(2))))
+ case mpInt32:
+ i = int64(int32(bigen.Uint32(d.r.readx(4))))
+ case mpInt64:
+ i = int64(bigen.Uint64(d.r.readx(8)))
+ default:
+ switch {
+ case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
+ i = int64(int8(d.bd))
+ case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
+ i = int64(int8(d.bd))
+ default:
+ d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
+ return
+ }
+ }
+ // check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
+ if bitsize > 0 {
+ if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc {
+ d.d.errorf("Overflow int value: %v", i)
+ return
+ }
+ }
+ d.bdRead = false
+ return
+}
+
+// uint can be decoded from msgpack type: intXXX or uintXXX
+func (d *msgpackDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ switch d.bd {
+ case mpUint8:
+ ui = uint64(d.r.readn1())
+ case mpUint16:
+ ui = uint64(bigen.Uint16(d.r.readx(2)))
+ case mpUint32:
+ ui = uint64(bigen.Uint32(d.r.readx(4)))
+ case mpUint64:
+ ui = bigen.Uint64(d.r.readx(8))
+ case mpInt8:
+ if i := int64(int8(d.r.readn1())); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
+ return
+ }
+ case mpInt16:
+ if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
+ return
+ }
+ case mpInt32:
+ if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
+ return
+ }
+ case mpInt64:
+ if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
+ return
+ }
+ default:
+ switch {
+ case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
+ ui = uint64(d.bd)
+ case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
+ d.d.errorf("Assigning negative signed value: %v, to unsigned type", int(d.bd))
+ return
+ default:
+ d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
+ return
+ }
+ }
+ // check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
+ if bitsize > 0 {
+ if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc {
+ d.d.errorf("Overflow uint value: %v", ui)
+ return
+ }
+ }
+ d.bdRead = false
+ return
+}
+
+// float can either be decoded from msgpack type: float, double or intX
+func (d *msgpackDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == mpFloat {
+ f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+ } else if d.bd == mpDouble {
+ f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+ } else {
+ f = float64(d.DecodeInt(0))
+ }
+ if chkOverflow32 && chkOvf.Float32(f) {
+ d.d.errorf("msgpack: float32 overflow: %v", f)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool, fixnum 0 or 1.
+func (d *msgpackDecDriver) DecodeBool() (b bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == mpFalse || d.bd == 0 {
+ // b = false
+ } else if d.bd == mpTrue || d.bd == 1 {
+ b = true
+ } else {
+ d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *msgpackDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ var clen int
+ // ignore isstring. Expect that the bytes may be found from msgpackContainerStr or msgpackContainerBin
+ if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 {
+ clen = d.readContainerLen(msgpackContainerBin)
+ } else {
+ clen = d.readContainerLen(msgpackContainerStr)
+ }
+ // println("DecodeBytes: clen: ", clen)
+ d.bdRead = false
+ // bytes may be nil, so handle it. if nil, clen=-1.
+ if clen < 0 {
+ return nil
+ }
+ if zerocopy {
+ if d.br {
+ return d.r.readx(clen)
+ } else if len(bs) == 0 {
+ bs = d.b[:]
+ }
+ }
+ return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
+}
+
+func (d *msgpackDecDriver) DecodeString() (s string) {
+ return string(d.DecodeBytes(d.b[:], true, true))
+}
+
+func (d *msgpackDecDriver) readNextBd() {
+ d.bd = d.r.readn1()
+ d.bdRead = true
+}
+
+func (d *msgpackDecDriver) uncacheRead() {
+ if d.bdRead {
+ d.r.unreadn1()
+ d.bdRead = false
+ }
+}
+
+func (d *msgpackDecDriver) ContainerType() (vt valueType) {
+ bd := d.bd
+ if bd == mpNil {
+ return valueTypeNil
+ } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 ||
+ (!d.h.RawToString &&
+ (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))) {
+ return valueTypeBytes
+ } else if d.h.RawToString &&
+ (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)) {
+ return valueTypeString
+ } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) {
+ return valueTypeArray
+ } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) {
+ return valueTypeMap
+ } else {
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ }
+ return valueTypeUnset
+}
+
+func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == mpNil {
+ d.bdRead = false
+ v = true
+ }
+ return
+}
+
+func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) {
+ bd := d.bd
+ if bd == mpNil {
+ clen = -1 // to represent nil
+ } else if bd == ct.b8 {
+ clen = int(d.r.readn1())
+ } else if bd == ct.b16 {
+ clen = int(bigen.Uint16(d.r.readx(2)))
+ } else if bd == ct.b32 {
+ clen = int(bigen.Uint32(d.r.readx(4)))
+ } else if (ct.bFixMin & bd) == ct.bFixMin {
+ clen = int(ct.bFixMin ^ bd)
+ } else {
+ d.d.errorf("readContainerLen: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *msgpackDecDriver) ReadMapStart() int {
+ return d.readContainerLen(msgpackContainerMap)
+}
+
+func (d *msgpackDecDriver) ReadArrayStart() int {
+ return d.readContainerLen(msgpackContainerList)
+}
+
+func (d *msgpackDecDriver) readExtLen() (clen int) {
+ switch d.bd {
+ case mpNil:
+ clen = -1 // to represent nil
+ case mpFixExt1:
+ clen = 1
+ case mpFixExt2:
+ clen = 2
+ case mpFixExt4:
+ clen = 4
+ case mpFixExt8:
+ clen = 8
+ case mpFixExt16:
+ clen = 16
+ case mpExt8:
+ clen = int(d.r.readn1())
+ case mpExt16:
+ clen = int(bigen.Uint16(d.r.readx(2)))
+ case mpExt32:
+ clen = int(bigen.Uint32(d.r.readx(4)))
+ default:
+ d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd)
+ return
+ }
+ return
+}
+
+func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if xtag > 0xff {
+ d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
+ return
+ }
+ realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
+ realxtag = uint64(realxtag1)
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
+ } else {
+ ext.ReadExt(rv, xbs)
+ }
+ return
+}
+
+func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ xbd := d.bd
+ if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 {
+ xbs = d.DecodeBytes(nil, false, true)
+ } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 ||
+ (xbd >= mpFixStrMin && xbd <= mpFixStrMax) {
+ xbs = d.DecodeBytes(nil, true, true)
+ } else {
+ clen := d.readExtLen()
+ xtag = d.r.readn1()
+ if verifyTag && xtag != tag {
+ d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+ return
+ }
+ xbs = d.r.readx(clen)
+ }
+ d.bdRead = false
+ return
+}
+
+//--------------------------------------------------
+
+//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format.
+type MsgpackHandle struct {
+ BasicHandle
+
+ // RawToString controls how raw bytes are decoded into a nil interface{}.
+ RawToString bool
+
+ // WriteExt flag supports encoding configured extensions with extension tags.
+ // It also controls whether other elements of the new spec are encoded (ie Str8).
+ //
+ // With WriteExt=false, configured extensions are serialized as raw bytes
+ // and Str8 is not encoded.
+ //
+ // A stream can still be decoded into a typed value, provided an appropriate value
+ // is provided, but the type cannot be inferred from the stream. If no appropriate
+ // type is provided (e.g. decoding into a nil interface{}), you get back
+ // a []byte or string based on the setting of RawToString.
+ WriteExt bool
+ binaryEncodingType
+}
+
+func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+ return h.SetExt(rt, tag, &setExtWrapper{b: ext})
+}
+
+func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver {
+ return &msgpackEncDriver{e: e, w: e.w, h: h}
+}
+
+func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver {
+ return &msgpackDecDriver{d: d, r: d.r, h: h, br: d.bytes}
+}
+
+func (e *msgpackEncDriver) reset() {
+ e.w = e.e.w
+}
+
+func (d *msgpackDecDriver) reset() {
+ d.r = d.d.r
+ d.bd, d.bdRead = 0, false
+}
+
+//--------------------------------------------------
+
+type msgpackSpecRpcCodec struct {
+ rpcCodec
+}
+
+// /////////////// Spec RPC Codec ///////////////////
+func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
+ // WriteRequest can write to both a Go service, and other services that do
+ // not abide by the 1 argument rule of a Go service.
+ // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs
+ var bodyArr []interface{}
+ if m, ok := body.(MsgpackSpecRpcMultiArgs); ok {
+ bodyArr = ([]interface{})(m)
+ } else {
+ bodyArr = []interface{}{body}
+ }
+ r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr}
+ return c.write(r2, nil, false, true)
+}
+
+func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
+ var moe interface{}
+ if r.Error != "" {
+ moe = r.Error
+ }
+ if moe != nil && body != nil {
+ body = nil
+ }
+ r2 := []interface{}{1, uint32(r.Seq), moe, body}
+ return c.write(r2, nil, false, true)
+}
+
+func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error {
+ return c.parseCustomHeader(1, &r.Seq, &r.Error)
+}
+
+func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error {
+ return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod)
+}
+
+func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error {
+ if body == nil { // read and discard
+ return c.read(nil)
+ }
+ bodyArr := []interface{}{body}
+ return c.read(&bodyArr)
+}
+
+func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) {
+
+ if c.isClosed() {
+ return io.EOF
+ }
+
+ // We read the response header by hand
+ // so that the body can be decoded on its own from the stream at a later time.
+
+ const fia byte = 0x94 //four item array descriptor value
+ // Not sure why the panic of EOF is swallowed above.
+ // if bs1 := c.dec.r.readn1(); bs1 != fia {
+ // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1)
+ // return
+ // }
+ var b byte
+ b, err = c.br.ReadByte()
+ if err != nil {
+ return
+ }
+ if b != fia {
+ err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b)
+ return
+ }
+
+ if err = c.read(&b); err != nil {
+ return
+ }
+ if b != expectTypeByte {
+ err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b)
+ return
+ }
+ if err = c.read(msgid); err != nil {
+ return
+ }
+ if err = c.read(methodOrError); err != nil {
+ return
+ }
+ return
+}
+
+//--------------------------------------------------
+
+// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol
+// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+type msgpackSpecRpc struct{}
+
+// MsgpackSpecRpc implements Rpc using the communication protocol defined in
+// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md .
+// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered.
+var MsgpackSpecRpc msgpackSpecRpc
+
+func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
+ return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
+}
+
+func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
+ return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
+}
+
+var _ decDriver = (*msgpackDecDriver)(nil)
+var _ encDriver = (*msgpackEncDriver)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/noop.go b/vendor/github.com/ugorji/go/codec/noop.go
new file mode 100644
index 000000000..cfee3d084
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/noop.go
@@ -0,0 +1,213 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "math/rand"
+ "time"
+)
+
+// NoopHandle returns a no-op handle. It basically does nothing.
+// It is only useful for benchmarking, as it gives an idea of the
+// overhead from the codec framework.
+//
+// LIBRARY USERS: *** DO NOT USE ***
+func NoopHandle(slen int) *noopHandle {
+ h := noopHandle{}
+ h.rand = rand.New(rand.NewSource(time.Now().UnixNano()))
+ h.B = make([][]byte, slen)
+ h.S = make([]string, slen)
+ for i := 0; i < len(h.S); i++ {
+ b := make([]byte, i+1)
+ for j := 0; j < len(b); j++ {
+ b[j] = 'a' + byte(i)
+ }
+ h.B[i] = b
+ h.S[i] = string(b)
+ }
+ return &h
+}
+
+// noopHandle does nothing.
+// It is used to simulate the overhead of the codec framework.
+type noopHandle struct {
+ BasicHandle
+ binaryEncodingType
+ noopDrv // noopDrv is unexported here, so we can get a copy of it when needed.
+}
+
+type noopDrv struct {
+ d *Decoder
+ e *Encoder
+ i int
+ S []string
+ B [][]byte
+ mks []bool // stack. if map (true), else if array (false)
+ mk bool // top of stack. what container are we on? map or array?
+ ct valueType // last response for IsContainerType.
+ cb int // counter for ContainerType
+ rand *rand.Rand
+}
+
+func (h *noopDrv) r(v int) int { return h.rand.Intn(v) }
+func (h *noopDrv) m(v int) int { h.i++; return h.i % v }
+
+func (h *noopDrv) newEncDriver(e *Encoder) encDriver { h.e = e; return h }
+func (h *noopDrv) newDecDriver(d *Decoder) decDriver { h.d = d; return h }
+
+func (h *noopDrv) reset() {}
+func (h *noopDrv) uncacheRead() {}
+
+// --- encDriver
+
+// stack functions (for map and array)
+func (h *noopDrv) start(b bool) {
+ // println("start", len(h.mks)+1)
+ h.mks = append(h.mks, b)
+ h.mk = b
+}
+func (h *noopDrv) end() {
+ // println("end: ", len(h.mks)-1)
+ h.mks = h.mks[:len(h.mks)-1]
+ if len(h.mks) > 0 {
+ h.mk = h.mks[len(h.mks)-1]
+ } else {
+ h.mk = false
+ }
+}
+
+func (h *noopDrv) EncodeBuiltin(rt uintptr, v interface{}) {}
+func (h *noopDrv) EncodeNil() {}
+func (h *noopDrv) EncodeInt(i int64) {}
+func (h *noopDrv) EncodeUint(i uint64) {}
+func (h *noopDrv) EncodeBool(b bool) {}
+func (h *noopDrv) EncodeFloat32(f float32) {}
+func (h *noopDrv) EncodeFloat64(f float64) {}
+func (h *noopDrv) EncodeRawExt(re *RawExt, e *Encoder) {}
+func (h *noopDrv) EncodeArrayStart(length int) { h.start(true) }
+func (h *noopDrv) EncodeMapStart(length int) { h.start(false) }
+func (h *noopDrv) EncodeEnd() { h.end() }
+
+func (h *noopDrv) EncodeString(c charEncoding, v string) {}
+func (h *noopDrv) EncodeSymbol(v string) {}
+func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {}
+
+func (h *noopDrv) EncodeExt(rv interface{}, xtag uint64, ext Ext, e *Encoder) {}
+
+// ---- decDriver
+func (h *noopDrv) initReadNext() {}
+func (h *noopDrv) CheckBreak() bool { return false }
+func (h *noopDrv) IsBuiltinType(rt uintptr) bool { return false }
+func (h *noopDrv) DecodeBuiltin(rt uintptr, v interface{}) {}
+func (h *noopDrv) DecodeInt(bitsize uint8) (i int64) { return int64(h.m(15)) }
+func (h *noopDrv) DecodeUint(bitsize uint8) (ui uint64) { return uint64(h.m(35)) }
+func (h *noopDrv) DecodeFloat(chkOverflow32 bool) (f float64) { return float64(h.m(95)) }
+func (h *noopDrv) DecodeBool() (b bool) { return h.m(2) == 0 }
+func (h *noopDrv) DecodeString() (s string) { return h.S[h.m(8)] }
+
+// func (h *noopDrv) DecodeStringAsBytes(bs []byte) []byte { return h.DecodeBytes(bs) }
+
+func (h *noopDrv) DecodeBytes(bs []byte, isstring, zerocopy bool) []byte { return h.B[h.m(len(h.B))] }
+
+func (h *noopDrv) ReadEnd() { h.end() }
+
+// toggle map/slice
+func (h *noopDrv) ReadMapStart() int { h.start(true); return h.m(10) }
+func (h *noopDrv) ReadArrayStart() int { h.start(false); return h.m(10) }
+
+func (h *noopDrv) ContainerType() (vt valueType) {
+ // return h.m(2) == 0
+ // handle kStruct, which will bomb is it calls this and doesn't get back a map or array.
+ // consequently, if the return value is not map or array, reset it to one of them based on h.m(7) % 2
+ // for kstruct: at least one out of every 2 times, return one of valueTypeMap or Array (else kstruct bombs)
+ // however, every 10th time it is called, we just return something else.
+ var vals = [...]valueType{valueTypeArray, valueTypeMap}
+ // ------------ TAKE ------------
+ // if h.cb%2 == 0 {
+ // if h.ct == valueTypeMap || h.ct == valueTypeArray {
+ // } else {
+ // h.ct = vals[h.m(2)]
+ // }
+ // } else if h.cb%5 == 0 {
+ // h.ct = valueType(h.m(8))
+ // } else {
+ // h.ct = vals[h.m(2)]
+ // }
+ // ------------ TAKE ------------
+ // if h.cb%16 == 0 {
+ // h.ct = valueType(h.cb % 8)
+ // } else {
+ // h.ct = vals[h.cb%2]
+ // }
+ h.ct = vals[h.cb%2]
+ h.cb++
+ return h.ct
+
+ // if h.ct == valueTypeNil || h.ct == valueTypeString || h.ct == valueTypeBytes {
+ // return h.ct
+ // }
+ // return valueTypeUnset
+ // TODO: may need to tweak this so it works.
+ // if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap {
+ // h.cb = !h.cb
+ // h.ct = vt
+ // return h.cb
+ // }
+ // // go in a loop and check it.
+ // h.ct = vt
+ // h.cb = h.m(7) == 0
+ // return h.cb
+}
+func (h *noopDrv) TryDecodeAsNil() bool {
+ if h.mk {
+ return false
+ } else {
+ return h.m(8) == 0
+ }
+}
+func (h *noopDrv) DecodeExt(rv interface{}, xtag uint64, ext Ext) uint64 {
+ return 0
+}
+
+func (h *noopDrv) DecodeNaked() {
+ // use h.r (random) not h.m() because h.m() could cause the same value to be given.
+ var sk int
+ if h.mk {
+ // if mapkey, do not support values of nil OR bytes, array, map or rawext
+ sk = h.r(7) + 1
+ } else {
+ sk = h.r(12)
+ }
+ n := &h.d.n
+ switch sk {
+ case 0:
+ n.v = valueTypeNil
+ case 1:
+ n.v, n.b = valueTypeBool, false
+ case 2:
+ n.v, n.b = valueTypeBool, true
+ case 3:
+ n.v, n.i = valueTypeInt, h.DecodeInt(64)
+ case 4:
+ n.v, n.u = valueTypeUint, h.DecodeUint(64)
+ case 5:
+ n.v, n.f = valueTypeFloat, h.DecodeFloat(true)
+ case 6:
+ n.v, n.f = valueTypeFloat, h.DecodeFloat(false)
+ case 7:
+ n.v, n.s = valueTypeString, h.DecodeString()
+ case 8:
+ n.v, n.l = valueTypeBytes, h.B[h.m(len(h.B))]
+ case 9:
+ n.v = valueTypeArray
+ case 10:
+ n.v = valueTypeMap
+ default:
+ n.v = valueTypeExt
+ n.u = h.DecodeUint(64)
+ n.l = h.B[h.m(len(h.B))]
+ }
+ h.ct = n.v
+ return
+}
diff --git a/vendor/github.com/ugorji/go/codec/prebuild.go b/vendor/github.com/ugorji/go/codec/prebuild.go
new file mode 100644
index 000000000..2353263e8
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/prebuild.go
@@ -0,0 +1,3 @@
+package codec
+
+//go:generate bash prebuild.sh
diff --git a/vendor/github.com/ugorji/go/codec/rpc.go b/vendor/github.com/ugorji/go/codec/rpc.go
new file mode 100644
index 000000000..8062bed31
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/rpc.go
@@ -0,0 +1,180 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "bufio"
+ "io"
+ "net/rpc"
+ "sync"
+)
+
+// rpcEncodeTerminator allows a handler specify a []byte terminator to send after each Encode.
+//
+// Some codecs like json need to put a space after each encoded value, to serve as a
+// delimiter for things like numbers (else json codec will continue reading till EOF).
+type rpcEncodeTerminator interface {
+ rpcEncodeTerminate() []byte
+}
+
+// Rpc provides a rpc Server or Client Codec for rpc communication.
+type Rpc interface {
+ ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec
+ ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec
+}
+
+// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer
+// used by the rpc connection. It accommodates use-cases where the connection
+// should be used by rpc and non-rpc functions, e.g. streaming a file after
+// sending an rpc response.
+type RpcCodecBuffered interface {
+ BufferedReader() *bufio.Reader
+ BufferedWriter() *bufio.Writer
+}
+
+// -------------------------------------
+
+// rpcCodec defines the struct members and common methods.
+type rpcCodec struct {
+ rwc io.ReadWriteCloser
+ dec *Decoder
+ enc *Encoder
+ bw *bufio.Writer
+ br *bufio.Reader
+ mu sync.Mutex
+ h Handle
+
+ cls bool
+ clsmu sync.RWMutex
+}
+
+func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
+ bw := bufio.NewWriter(conn)
+ br := bufio.NewReader(conn)
+ return rpcCodec{
+ rwc: conn,
+ bw: bw,
+ br: br,
+ enc: NewEncoder(bw, h),
+ dec: NewDecoder(br, h),
+ h: h,
+ }
+}
+
+func (c *rpcCodec) BufferedReader() *bufio.Reader {
+ return c.br
+}
+
+func (c *rpcCodec) BufferedWriter() *bufio.Writer {
+ return c.bw
+}
+
+func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) {
+ if c.isClosed() {
+ return io.EOF
+ }
+ if err = c.enc.Encode(obj1); err != nil {
+ return
+ }
+ t, tOk := c.h.(rpcEncodeTerminator)
+ if tOk {
+ c.bw.Write(t.rpcEncodeTerminate())
+ }
+ if writeObj2 {
+ if err = c.enc.Encode(obj2); err != nil {
+ return
+ }
+ if tOk {
+ c.bw.Write(t.rpcEncodeTerminate())
+ }
+ }
+ if doFlush {
+ return c.bw.Flush()
+ }
+ return
+}
+
+func (c *rpcCodec) read(obj interface{}) (err error) {
+ if c.isClosed() {
+ return io.EOF
+ }
+ //If nil is passed in, we should still attempt to read content to nowhere.
+ if obj == nil {
+ var obj2 interface{}
+ return c.dec.Decode(&obj2)
+ }
+ return c.dec.Decode(obj)
+}
+
+func (c *rpcCodec) isClosed() bool {
+ c.clsmu.RLock()
+ x := c.cls
+ c.clsmu.RUnlock()
+ return x
+}
+
+func (c *rpcCodec) Close() error {
+ if c.isClosed() {
+ return io.EOF
+ }
+ c.clsmu.Lock()
+ c.cls = true
+ c.clsmu.Unlock()
+ return c.rwc.Close()
+}
+
+func (c *rpcCodec) ReadResponseBody(body interface{}) error {
+ return c.read(body)
+}
+
+// -------------------------------------
+
+type goRpcCodec struct {
+ rpcCodec
+}
+
+func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
+ // Must protect for concurrent access as per API
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.write(r, body, true, true)
+}
+
+func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.write(r, body, true, true)
+}
+
+func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error {
+ return c.read(r)
+}
+
+func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error {
+ return c.read(r)
+}
+
+func (c *goRpcCodec) ReadRequestBody(body interface{}) error {
+ return c.read(body)
+}
+
+// -------------------------------------
+
+// goRpc is the implementation of Rpc that uses the communication protocol
+// as defined in net/rpc package.
+type goRpc struct{}
+
+// GoRpc implements Rpc using the communication protocol defined in net/rpc package.
+// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered.
+var GoRpc goRpc
+
+func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
+ return &goRpcCodec{newRPCCodec(conn, h)}
+}
+
+func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
+ return &goRpcCodec{newRPCCodec(conn, h)}
+}
+
+var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered
diff --git a/vendor/github.com/ugorji/go/codec/simple.go b/vendor/github.com/ugorji/go/codec/simple.go
new file mode 100644
index 000000000..fdc457571
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/simple.go
@@ -0,0 +1,526 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "math"
+ "reflect"
+)
+
+const (
+ _ uint8 = iota
+ simpleVdNil = 1
+ simpleVdFalse = 2
+ simpleVdTrue = 3
+ simpleVdFloat32 = 4
+ simpleVdFloat64 = 5
+
+ // each lasts for 4 (ie n, n+1, n+2, n+3)
+ simpleVdPosInt = 8
+ simpleVdNegInt = 12
+
+ // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7)
+ simpleVdString = 216
+ simpleVdByteArray = 224
+ simpleVdArray = 232
+ simpleVdMap = 240
+ simpleVdExt = 248
+)
+
+type simpleEncDriver struct {
+ noBuiltInTypes
+ encNoSeparator
+ e *Encoder
+ h *SimpleHandle
+ w encWriter
+ b [8]byte
+}
+
+func (e *simpleEncDriver) EncodeNil() {
+ e.w.writen1(simpleVdNil)
+}
+
+func (e *simpleEncDriver) EncodeBool(b bool) {
+ if b {
+ e.w.writen1(simpleVdTrue)
+ } else {
+ e.w.writen1(simpleVdFalse)
+ }
+}
+
+func (e *simpleEncDriver) EncodeFloat32(f float32) {
+ e.w.writen1(simpleVdFloat32)
+ bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *simpleEncDriver) EncodeFloat64(f float64) {
+ e.w.writen1(simpleVdFloat64)
+ bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f))
+}
+
+func (e *simpleEncDriver) EncodeInt(v int64) {
+ if v < 0 {
+ e.encUint(uint64(-v), simpleVdNegInt)
+ } else {
+ e.encUint(uint64(v), simpleVdPosInt)
+ }
+}
+
+func (e *simpleEncDriver) EncodeUint(v uint64) {
+ e.encUint(v, simpleVdPosInt)
+}
+
+func (e *simpleEncDriver) encUint(v uint64, bd uint8) {
+ if v <= math.MaxUint8 {
+ e.w.writen2(bd, uint8(v))
+ } else if v <= math.MaxUint16 {
+ e.w.writen1(bd + 1)
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.w.writen1(bd + 2)
+ bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v))
+ } else { // if v <= math.MaxUint64 {
+ e.w.writen1(bd + 3)
+ bigenHelper{e.b[:8], e.w}.writeUint64(v)
+ }
+}
+
+func (e *simpleEncDriver) encLen(bd byte, length int) {
+ if length == 0 {
+ e.w.writen1(bd)
+ } else if length <= math.MaxUint8 {
+ e.w.writen1(bd + 1)
+ e.w.writen1(uint8(length))
+ } else if length <= math.MaxUint16 {
+ e.w.writen1(bd + 2)
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(length))
+ } else if int64(length) <= math.MaxUint32 {
+ e.w.writen1(bd + 3)
+ bigenHelper{e.b[:4], e.w}.writeUint32(uint32(length))
+ } else {
+ e.w.writen1(bd + 4)
+ bigenHelper{e.b[:8], e.w}.writeUint64(uint64(length))
+ }
+}
+
+func (e *simpleEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) {
+ bs := ext.WriteExt(rv)
+ if bs == nil {
+ e.EncodeNil()
+ return
+ }
+ e.encodeExtPreamble(uint8(xtag), len(bs))
+ e.w.writeb(bs)
+}
+
+func (e *simpleEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
+ e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
+ e.w.writeb(re.Data)
+}
+
+func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) {
+ e.encLen(simpleVdExt, length)
+ e.w.writen1(xtag)
+}
+
+func (e *simpleEncDriver) EncodeArrayStart(length int) {
+ e.encLen(simpleVdArray, length)
+}
+
+func (e *simpleEncDriver) EncodeMapStart(length int) {
+ e.encLen(simpleVdMap, length)
+}
+
+func (e *simpleEncDriver) EncodeString(c charEncoding, v string) {
+ e.encLen(simpleVdString, len(v))
+ e.w.writestr(v)
+}
+
+func (e *simpleEncDriver) EncodeSymbol(v string) {
+ e.EncodeString(c_UTF8, v)
+}
+
+func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+ e.encLen(simpleVdByteArray, len(v))
+ e.w.writeb(v)
+}
+
+//------------------------------------
+
+type simpleDecDriver struct {
+ d *Decoder
+ h *SimpleHandle
+ r decReader
+ bdRead bool
+ bd byte
+ br bool // bytes reader
+ noBuiltInTypes
+ noStreamingCodec
+ decNoSeparator
+ b [scratchByteArrayLen]byte
+}
+
+func (d *simpleDecDriver) readNextBd() {
+ d.bd = d.r.readn1()
+ d.bdRead = true
+}
+
+func (d *simpleDecDriver) uncacheRead() {
+ if d.bdRead {
+ d.r.unreadn1()
+ d.bdRead = false
+ }
+}
+
+func (d *simpleDecDriver) ContainerType() (vt valueType) {
+ if d.bd == simpleVdNil {
+ return valueTypeNil
+ } else if d.bd == simpleVdByteArray || d.bd == simpleVdByteArray+1 ||
+ d.bd == simpleVdByteArray+2 || d.bd == simpleVdByteArray+3 || d.bd == simpleVdByteArray+4 {
+ return valueTypeBytes
+ } else if d.bd == simpleVdString || d.bd == simpleVdString+1 ||
+ d.bd == simpleVdString+2 || d.bd == simpleVdString+3 || d.bd == simpleVdString+4 {
+ return valueTypeString
+ } else if d.bd == simpleVdArray || d.bd == simpleVdArray+1 ||
+ d.bd == simpleVdArray+2 || d.bd == simpleVdArray+3 || d.bd == simpleVdArray+4 {
+ return valueTypeArray
+ } else if d.bd == simpleVdMap || d.bd == simpleVdMap+1 ||
+ d.bd == simpleVdMap+2 || d.bd == simpleVdMap+3 || d.bd == simpleVdMap+4 {
+ return valueTypeMap
+ } else {
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ }
+ return valueTypeUnset
+}
+
+func (d *simpleDecDriver) TryDecodeAsNil() bool {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == simpleVdNil {
+ d.bdRead = false
+ return true
+ }
+ return false
+}
+
+func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ switch d.bd {
+ case simpleVdPosInt:
+ ui = uint64(d.r.readn1())
+ case simpleVdPosInt + 1:
+ ui = uint64(bigen.Uint16(d.r.readx(2)))
+ case simpleVdPosInt + 2:
+ ui = uint64(bigen.Uint32(d.r.readx(4)))
+ case simpleVdPosInt + 3:
+ ui = uint64(bigen.Uint64(d.r.readx(8)))
+ case simpleVdNegInt:
+ ui = uint64(d.r.readn1())
+ neg = true
+ case simpleVdNegInt + 1:
+ ui = uint64(bigen.Uint16(d.r.readx(2)))
+ neg = true
+ case simpleVdNegInt + 2:
+ ui = uint64(bigen.Uint32(d.r.readx(4)))
+ neg = true
+ case simpleVdNegInt + 3:
+ ui = uint64(bigen.Uint64(d.r.readx(8)))
+ neg = true
+ default:
+ d.d.errorf("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
+ return
+ }
+ // don't do this check, because callers may only want the unsigned value.
+ // if ui > math.MaxInt64 {
+ // d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui)
+ // return
+ // }
+ return
+}
+
+func (d *simpleDecDriver) DecodeInt(bitsize uint8) (i int64) {
+ ui, neg := d.decCheckInteger()
+ i, overflow := chkOvf.SignedInt(ui)
+ if overflow {
+ d.d.errorf("simple: overflow converting %v to signed integer", ui)
+ return
+ }
+ if neg {
+ i = -i
+ }
+ if chkOvf.Int(i, bitsize) {
+ d.d.errorf("simple: overflow integer: %v", i)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
+ ui, neg := d.decCheckInteger()
+ if neg {
+ d.d.errorf("Assigning negative signed value to unsigned type")
+ return
+ }
+ if chkOvf.Uint(ui, bitsize) {
+ d.d.errorf("simple: overflow integer: %v", ui)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == simpleVdFloat32 {
+ f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+ } else if d.bd == simpleVdFloat64 {
+ f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+ } else {
+ if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 {
+ f = float64(d.DecodeInt(64))
+ } else {
+ d.d.errorf("Float only valid from float32/64: Invalid descriptor: %v", d.bd)
+ return
+ }
+ }
+ if chkOverflow32 && chkOvf.Float32(f) {
+ d.d.errorf("msgpack: float32 overflow: %v", f)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool only (single byte).
+func (d *simpleDecDriver) DecodeBool() (b bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == simpleVdTrue {
+ b = true
+ } else if d.bd == simpleVdFalse {
+ } else {
+ d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) ReadMapStart() (length int) {
+ d.bdRead = false
+ return d.decLen()
+}
+
+func (d *simpleDecDriver) ReadArrayStart() (length int) {
+ d.bdRead = false
+ return d.decLen()
+}
+
+func (d *simpleDecDriver) decLen() int {
+ switch d.bd % 8 {
+ case 0:
+ return 0
+ case 1:
+ return int(d.r.readn1())
+ case 2:
+ return int(bigen.Uint16(d.r.readx(2)))
+ case 3:
+ ui := uint64(bigen.Uint32(d.r.readx(4)))
+ if chkOvf.Uint(ui, intBitsize) {
+ d.d.errorf("simple: overflow integer: %v", ui)
+ return 0
+ }
+ return int(ui)
+ case 4:
+ ui := bigen.Uint64(d.r.readx(8))
+ if chkOvf.Uint(ui, intBitsize) {
+ d.d.errorf("simple: overflow integer: %v", ui)
+ return 0
+ }
+ return int(ui)
+ }
+ d.d.errorf("decLen: Cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8)
+ return -1
+}
+
+func (d *simpleDecDriver) DecodeString() (s string) {
+ return string(d.DecodeBytes(d.b[:], true, true))
+}
+
+func (d *simpleDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == simpleVdNil {
+ d.bdRead = false
+ return
+ }
+ clen := d.decLen()
+ d.bdRead = false
+ if zerocopy {
+ if d.br {
+ return d.r.readx(clen)
+ } else if len(bs) == 0 {
+ bs = d.b[:]
+ }
+ }
+ return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
+}
+
+func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if xtag > 0xff {
+ d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
+ return
+ }
+ realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
+ realxtag = uint64(realxtag1)
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
+ } else {
+ ext.ReadExt(rv, xbs)
+ }
+ return
+}
+
+func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ switch d.bd {
+ case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
+ l := d.decLen()
+ xtag = d.r.readn1()
+ if verifyTag && xtag != tag {
+ d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+ return
+ }
+ xbs = d.r.readx(l)
+ case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
+ xbs = d.DecodeBytes(nil, false, true)
+ default:
+ d.d.errorf("Invalid d.bd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+
+ n := &d.d.n
+ var decodeFurther bool
+
+ switch d.bd {
+ case simpleVdNil:
+ n.v = valueTypeNil
+ case simpleVdFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case simpleVdTrue:
+ n.v = valueTypeBool
+ n.b = true
+ case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3:
+ if d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = d.DecodeInt(64)
+ } else {
+ n.v = valueTypeUint
+ n.u = d.DecodeUint(64)
+ }
+ case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3:
+ n.v = valueTypeInt
+ n.i = d.DecodeInt(64)
+ case simpleVdFloat32:
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat(true)
+ case simpleVdFloat64:
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat(false)
+ case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
+ case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
+ n.v = valueTypeExt
+ l := d.decLen()
+ n.u = uint64(d.r.readn1())
+ n.l = d.r.readx(l)
+ case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
+ n.v = valueTypeMap
+ decodeFurther = true
+ default:
+ d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
+ }
+
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ return
+}
+
+//------------------------------------
+
+// SimpleHandle is a Handle for a very simple encoding format.
+//
+// simple is a simplistic codec similar to binc, but not as compact.
+// - Encoding of a value is always preceded by the descriptor byte (bd)
+// - True, false, nil are encoded fully in 1 byte (the descriptor)
+// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
+// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
+// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte)
+// - Lenght of containers (strings, bytes, array, map, extensions)
+// are encoded in 0, 1, 2, 4 or 8 bytes.
+// Zero-length containers have no length encoded.
+// For others, the number of bytes is given by pow(2, bd%3)
+// - maps are encoded as [bd] [length] [[key][value]]...
+// - arrays are encoded as [bd] [length] [value]...
+// - extensions are encoded as [bd] [length] [tag] [byte]...
+// - strings/bytearrays are encoded as [bd] [length] [byte]...
+//
+// The full spec will be published soon.
+type SimpleHandle struct {
+ BasicHandle
+ binaryEncodingType
+}
+
+func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+ return h.SetExt(rt, tag, &setExtWrapper{b: ext})
+}
+
+func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver {
+ return &simpleEncDriver{e: e, w: e.w, h: h}
+}
+
+func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver {
+ return &simpleDecDriver{d: d, r: d.r, h: h, br: d.bytes}
+}
+
+func (e *simpleEncDriver) reset() {
+ e.w = e.e.w
+}
+
+func (d *simpleDecDriver) reset() {
+ d.r = d.d.r
+ d.bd, d.bdRead = 0, false
+}
+
+var _ decDriver = (*simpleDecDriver)(nil)
+var _ encDriver = (*simpleEncDriver)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/time.go b/vendor/github.com/ugorji/go/codec/time.go
new file mode 100644
index 000000000..718b731ec
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/time.go
@@ -0,0 +1,233 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+)
+
+var (
+ timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
+ timeExtEncFn = func(rv reflect.Value) (bs []byte, err error) {
+ defer panicToErr(&err)
+ bs = timeExt{}.WriteExt(rv.Interface())
+ return
+ }
+ timeExtDecFn = func(rv reflect.Value, bs []byte) (err error) {
+ defer panicToErr(&err)
+ timeExt{}.ReadExt(rv.Interface(), bs)
+ return
+ }
+)
+
+type timeExt struct{}
+
+func (x timeExt) WriteExt(v interface{}) (bs []byte) {
+ switch v2 := v.(type) {
+ case time.Time:
+ bs = encodeTime(v2)
+ case *time.Time:
+ bs = encodeTime(*v2)
+ default:
+ panic(fmt.Errorf("unsupported format for time conversion: expecting time.Time; got %T", v2))
+ }
+ return
+}
+func (x timeExt) ReadExt(v interface{}, bs []byte) {
+ tt, err := decodeTime(bs)
+ if err != nil {
+ panic(err)
+ }
+ *(v.(*time.Time)) = tt
+}
+
+func (x timeExt) ConvertExt(v interface{}) interface{} {
+ return x.WriteExt(v)
+}
+func (x timeExt) UpdateExt(v interface{}, src interface{}) {
+ x.ReadExt(v, src.([]byte))
+}
+
+// EncodeTime encodes a time.Time as a []byte, including
+// information on the instant in time and UTC offset.
+//
+// Format Description
+//
+// A timestamp is composed of 3 components:
+//
+// - secs: signed integer representing seconds since unix epoch
+// - nsces: unsigned integer representing fractional seconds as a
+// nanosecond offset within secs, in the range 0 <= nsecs < 1e9
+// - tz: signed integer representing timezone offset in minutes east of UTC,
+// and a dst (daylight savings time) flag
+//
+// When encoding a timestamp, the first byte is the descriptor, which
+// defines which components are encoded and how many bytes are used to
+// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it
+// is not encoded in the byte array explicitly*.
+//
+// Descriptor 8 bits are of the form `A B C DDD EE`:
+// A: Is secs component encoded? 1 = true
+// B: Is nsecs component encoded? 1 = true
+// C: Is tz component encoded? 1 = true
+// DDD: Number of extra bytes for secs (range 0-7).
+// If A = 1, secs encoded in DDD+1 bytes.
+// If A = 0, secs is not encoded, and is assumed to be 0.
+// If A = 1, then we need at least 1 byte to encode secs.
+// DDD says the number of extra bytes beyond that 1.
+// E.g. if DDD=0, then secs is represented in 1 byte.
+// if DDD=2, then secs is represented in 3 bytes.
+// EE: Number of extra bytes for nsecs (range 0-3).
+// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above)
+//
+// Following the descriptor bytes, subsequent bytes are:
+//
+// secs component encoded in `DDD + 1` bytes (if A == 1)
+// nsecs component encoded in `EE + 1` bytes (if B == 1)
+// tz component encoded in 2 bytes (if C == 1)
+//
+// secs and nsecs components are integers encoded in a BigEndian
+// 2-complement encoding format.
+//
+// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to
+// Least significant bit 0 are described below:
+//
+// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes).
+// Bit 15 = have\_dst: set to 1 if we set the dst flag.
+// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not.
+// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format.
+//
+func encodeTime(t time.Time) []byte {
+ //t := rv.Interface().(time.Time)
+ tsecs, tnsecs := t.Unix(), t.Nanosecond()
+ var (
+ bd byte
+ btmp [8]byte
+ bs [16]byte
+ i int = 1
+ )
+ l := t.Location()
+ if l == time.UTC {
+ l = nil
+ }
+ if tsecs != 0 {
+ bd = bd | 0x80
+ bigen.PutUint64(btmp[:], uint64(tsecs))
+ f := pruneSignExt(btmp[:], tsecs >= 0)
+ bd = bd | (byte(7-f) << 2)
+ copy(bs[i:], btmp[f:])
+ i = i + (8 - f)
+ }
+ if tnsecs != 0 {
+ bd = bd | 0x40
+ bigen.PutUint32(btmp[:4], uint32(tnsecs))
+ f := pruneSignExt(btmp[:4], true)
+ bd = bd | byte(3-f)
+ copy(bs[i:], btmp[f:4])
+ i = i + (4 - f)
+ }
+ if l != nil {
+ bd = bd | 0x20
+ // Note that Go Libs do not give access to dst flag.
+ _, zoneOffset := t.Zone()
+ //zoneName, zoneOffset := t.Zone()
+ zoneOffset /= 60
+ z := uint16(zoneOffset)
+ bigen.PutUint16(btmp[:2], z)
+ // clear dst flags
+ bs[i] = btmp[0] & 0x3f
+ bs[i+1] = btmp[1]
+ i = i + 2
+ }
+ bs[0] = bd
+ return bs[0:i]
+}
+
+// DecodeTime decodes a []byte into a time.Time.
+func decodeTime(bs []byte) (tt time.Time, err error) {
+ bd := bs[0]
+ var (
+ tsec int64
+ tnsec uint32
+ tz uint16
+ i byte = 1
+ i2 byte
+ n byte
+ )
+ if bd&(1<<7) != 0 {
+ var btmp [8]byte
+ n = ((bd >> 2) & 0x7) + 1
+ i2 = i + n
+ copy(btmp[8-n:], bs[i:i2])
+ //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it)
+ if bs[i]&(1<<7) != 0 {
+ copy(btmp[0:8-n], bsAll0xff)
+ //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff }
+ }
+ i = i2
+ tsec = int64(bigen.Uint64(btmp[:]))
+ }
+ if bd&(1<<6) != 0 {
+ var btmp [4]byte
+ n = (bd & 0x3) + 1
+ i2 = i + n
+ copy(btmp[4-n:], bs[i:i2])
+ i = i2
+ tnsec = bigen.Uint32(btmp[:])
+ }
+ if bd&(1<<5) == 0 {
+ tt = time.Unix(tsec, int64(tnsec)).UTC()
+ return
+ }
+ // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name.
+ // However, we need name here, so it can be shown when time is printed.
+ // Zone name is in form: UTC-08:00.
+ // Note that Go Libs do not give access to dst flag, so we ignore dst bits
+
+ i2 = i + 2
+ tz = bigen.Uint16(bs[i:i2])
+ i = i2
+ // sign extend sign bit into top 2 MSB (which were dst bits):
+ if tz&(1<<13) == 0 { // positive
+ tz = tz & 0x3fff //clear 2 MSBs: dst bits
+ } else { // negative
+ tz = tz | 0xc000 //set 2 MSBs: dst bits
+ //tzname[3] = '-' (TODO: verify. this works here)
+ }
+ tzint := int16(tz)
+ if tzint == 0 {
+ tt = time.Unix(tsec, int64(tnsec)).UTC()
+ } else {
+ // For Go Time, do not use a descriptive timezone.
+ // It's unnecessary, and makes it harder to do a reflect.DeepEqual.
+ // The Offset already tells what the offset should be, if not on UTC and unknown zone name.
+ // var zoneName = timeLocUTCName(tzint)
+ tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60))
+ }
+ return
+}
+
+func timeLocUTCName(tzint int16) string {
+ if tzint == 0 {
+ return "UTC"
+ }
+ var tzname = []byte("UTC+00:00")
+ //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below.
+ //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
+ var tzhr, tzmin int16
+ if tzint < 0 {
+ tzname[3] = '-' // (TODO: verify. this works here)
+ tzhr, tzmin = -tzint/60, (-tzint)%60
+ } else {
+ tzhr, tzmin = tzint/60, tzint%60
+ }
+ tzname[4] = timeDigits[tzhr/10]
+ tzname[5] = timeDigits[tzhr%10]
+ tzname[7] = timeDigits[tzmin/10]
+ tzname[8] = timeDigits[tzmin%10]
+ return string(tzname)
+ //return time.FixedZone(string(tzname), int(tzint)*60)
+}
diff --git a/vendor/github.com/ulule/deepcopier/LICENSE b/vendor/github.com/ulule/deepcopier/LICENSE
new file mode 100644
index 000000000..d5c4ea02c
--- /dev/null
+++ b/vendor/github.com/ulule/deepcopier/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Ulule
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/ulule/deepcopier/README.md b/vendor/github.com/ulule/deepcopier/README.md
new file mode 100644
index 000000000..25aafae8a
--- /dev/null
+++ b/vendor/github.com/ulule/deepcopier/README.md
@@ -0,0 +1,129 @@
+# Deepcopier
+
+[![Build Status](https://secure.travis-ci.org/ulule/deepcopier.png?branch=master)](http://travis-ci.org/ulule/deepcopier)
+
+This package is meant to make copying of structs to/from others structs a bit easier.
+
+## Installation
+
+```bash
+go get -u github.com/ulule/deepcopier
+```
+
+## Usage
+
+```golang
+// Deep copy instance1 into instance2
+Copy(instance1).To(instance2)
+
+// Deep copy instance1 into instance2 and passes the following context (which
+// is basically a map[string]interface{}) as first argument
+// to methods of instance2 that defined the struct tag "context".
+Copy(instance1).WithContext(map[string]interface{}{"foo": "bar"}).To(instance2)
+
+// Deep copy instance2 into instance1
+Copy(instance1).From(instance2)
+
+// Deep copy instance2 into instance1 and passes the following context (which
+// is basically a map[string]interface{}) as first argument
+// to methods of instance1 that defined the struct tag "context".
+Copy(instance1).WithContext(map[string]interface{}{"foo": "bar"}).From(instance2)
+```
+
+Available options for `deepcopier` struct tag:
+
+| Option | Description |
+| --------- | -------------------------------------------------------------------- |
+| `field` | Field or method name in source instance |
+| `skip` | Ignores the field |
+| `context` | Takes a `map[string]interface{}` as first argument (for methods) |
+| `force` | Set the value of a `sql.Null*` field (instead of copying the struct) |
+
+**Options example:**
+
+```golang
+type Source struct {
+ Name string
+ SkipMe string
+ SQLNullStringToSQLNullString sql.NullString
+ SQLNullStringToString sql.NullString
+
+}
+
+func (Source) MethodThatTakesContext(c map[string]interface{}) string {
+ return "whatever"
+}
+
+type Destination struct {
+ FieldWithAnotherNameInSource string `deepcopier:"field:Name"`
+ SkipMe string `deepcopier:"skip"`
+ MethodThatTakesContext string `deepcopier:"context"`
+ SQLNullStringToSQLNullString sql.NullString
+ SQLNullStringToString string `deepcopier:"force"`
+}
+
+```
+
+Example:
+
+```golang
+package main
+
+import (
+ "fmt"
+
+ "github.com/ulule/deepcopier"
+)
+
+// Model
+type User struct {
+ // Basic string field
+ Name string
+ // Deepcopier supports https://golang.org/pkg/database/sql/driver/#Valuer
+ Email sql.NullString
+}
+
+func (u *User) MethodThatTakesContext(ctx map[string]interface{}) string {
+ // do whatever you want
+ return "hello from this method"
+}
+
+// Resource
+type UserResource struct {
+ DisplayName string `deepcopier:"field:Name"`
+ SkipMe string `deepcopier:"skip"`
+ MethodThatTakesContext string `deepcopier:"context"`
+ Email string `deepcopier:"force"`
+
+}
+
+func main() {
+ user := &User{
+ Name: "gilles",
+ Email: sql.NullString{
+ Valid: true,
+ String: "gilles@example.com",
+ },
+ }
+
+ resource := &UserResource{}
+
+ deepcopier.Copy(user).To(resource)
+
+ fmt.Println(resource.DisplayName)
+ fmt.Println(resource.Email)
+}
+```
+
+Looking for more information about the usage?
+
+We wrote [an introduction article](https://github.com/ulule/deepcopier/blob/master/examples/rest-usage/README.rst).
+Have a look and feel free to give us your feedback.
+
+## Contributing
+
+* Ping us on twitter [@oibafsellig](https://twitter.com/oibafsellig), [@thoas](https://twitter.com/thoas)
+* Fork the [project](https://github.com/ulule/deepcopier)
+* Help us improving and fixing [issues](https://github.com/ulule/deepcopier/issues)
+
+Don't hesitate ;)
diff --git a/vendor/github.com/ulule/deepcopier/deepcopier.go b/vendor/github.com/ulule/deepcopier/deepcopier.go
new file mode 100644
index 000000000..8cccb5b78
--- /dev/null
+++ b/vendor/github.com/ulule/deepcopier/deepcopier.go
@@ -0,0 +1,350 @@
+package deepcopier
+
+import (
+ "database/sql/driver"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+const (
+ // TagName is the deepcopier struct tag name.
+ TagName = "deepcopier"
+ // FieldOptionName is the from field option name for struct tag.
+ FieldOptionName = "field"
+ // ContextOptionName is the context option name for struct tag.
+ ContextOptionName = "context"
+ // SkipOptionName is the skip option name for struct tag.
+ SkipOptionName = "skip"
+ // ForceOptionName is the skip option name for struct tag.
+ ForceOptionName = "force"
+)
+
+type (
+ // TagOptions is a map that contains extracted struct tag options.
+ TagOptions map[string]string
+
+ // Options are copier options.
+ Options struct {
+ // Context given to WithContext() method.
+ Context map[string]interface{}
+ // Reversed reverses struct tag checkings.
+ Reversed bool
+ }
+)
+
+// DeepCopier deep copies a struct to/from a struct.
+type DeepCopier struct {
+ dst interface{}
+ src interface{}
+ ctx map[string]interface{}
+}
+
+// Copy sets source or destination.
+func Copy(src interface{}) *DeepCopier {
+ return &DeepCopier{src: src}
+}
+
+// WithContext injects the given context into the builder instance.
+func (dc *DeepCopier) WithContext(ctx map[string]interface{}) *DeepCopier {
+ dc.ctx = ctx
+ return dc
+}
+
+// To sets the destination.
+func (dc *DeepCopier) To(dst interface{}) error {
+ dc.dst = dst
+ return process(dc.dst, dc.src, Options{Context: dc.ctx})
+}
+
+// From sets the given the source as destination and destination as source.
+func (dc *DeepCopier) From(src interface{}) error {
+ dc.dst = dc.src
+ dc.src = src
+ return process(dc.dst, dc.src, Options{Context: dc.ctx, Reversed: true})
+}
+
+// process handles copy.
+func process(dst interface{}, src interface{}, args ...Options) error {
+ var (
+ options = Options{}
+ srcValue = reflect.Indirect(reflect.ValueOf(src))
+ dstValue = reflect.Indirect(reflect.ValueOf(dst))
+ srcFieldNames = getFieldNames(src)
+ srcMethodNames = getMethodNames(src)
+ )
+
+ if len(args) > 0 {
+ options = args[0]
+ }
+
+ if !dstValue.CanAddr() {
+ return fmt.Errorf("destination %+v is unaddressable", dstValue.Interface())
+ }
+
+ for _, f := range srcFieldNames {
+ var (
+ srcFieldValue = srcValue.FieldByName(f)
+ srcFieldType, srcFieldFound = srcValue.Type().FieldByName(f)
+ srcFieldName = srcFieldType.Name
+ dstFieldName = srcFieldName
+ tagOptions TagOptions
+ )
+
+ if !srcFieldFound {
+ continue
+ }
+
+ if options.Reversed {
+ tagOptions = getTagOptions(srcFieldType.Tag.Get(TagName))
+ if v, ok := tagOptions[FieldOptionName]; ok && v != "" {
+ dstFieldName = v
+ }
+ } else {
+ if name, opts := getRelatedField(dst, srcFieldName); name != "" {
+ dstFieldName, tagOptions = name, opts
+ }
+ }
+
+ if _, ok := tagOptions[SkipOptionName]; ok {
+ continue
+ }
+
+ var (
+ dstFieldType, dstFieldFound = dstValue.Type().FieldByName(dstFieldName)
+ dstFieldValue = dstValue.FieldByName(dstFieldName)
+ )
+
+ if !dstFieldFound {
+ continue
+ }
+
+ // Force option for empty interfaces and nullable types
+ _, force := tagOptions[ForceOptionName]
+
+ // Valuer -> ptr
+ if isNullableType(srcFieldType.Type) && dstFieldValue.Kind() == reflect.Ptr && force {
+ v, _ := srcFieldValue.Interface().(driver.Valuer).Value()
+ if v == nil {
+ continue
+ }
+
+ valueType := reflect.TypeOf(v)
+
+ ptr := reflect.New(valueType)
+ ptr.Elem().Set(reflect.ValueOf(v))
+
+ if valueType.AssignableTo(dstFieldType.Type.Elem()) {
+ dstFieldValue.Set(ptr)
+ }
+
+ continue
+ }
+
+ // Valuer -> value
+ if isNullableType(srcFieldType.Type) {
+ if force {
+ v, _ := srcFieldValue.Interface().(driver.Valuer).Value()
+ if v == nil {
+ continue
+ }
+
+ rv := reflect.ValueOf(v)
+ if rv.Type().AssignableTo(dstFieldType.Type) {
+ dstFieldValue.Set(rv)
+ }
+ }
+
+ continue
+ }
+
+ if dstFieldValue.Kind() == reflect.Interface {
+ if force {
+ dstFieldValue.Set(srcFieldValue)
+ }
+ continue
+ }
+
+ // Ptr -> Value
+ if srcFieldType.Type.Kind() == reflect.Ptr && !srcFieldValue.IsNil() && dstFieldType.Type.Kind() != reflect.Ptr {
+ indirect := reflect.Indirect(srcFieldValue)
+
+ if indirect.Type().AssignableTo(dstFieldType.Type) {
+ dstFieldValue.Set(indirect)
+ continue
+ }
+ }
+
+ // Other types
+ if srcFieldType.Type.AssignableTo(dstFieldType.Type) {
+ dstFieldValue.Set(srcFieldValue)
+ }
+ }
+
+ for _, m := range srcMethodNames {
+ name, opts := getRelatedField(dst, m)
+ if name == "" {
+ continue
+ }
+
+ if _, ok := opts[SkipOptionName]; ok {
+ continue
+ }
+
+ method := reflect.ValueOf(src).MethodByName(m)
+ if !method.IsValid() {
+ return fmt.Errorf("method %s is invalid", m)
+ }
+
+ var (
+ dstFieldType, _ = dstValue.Type().FieldByName(name)
+ dstFieldValue = dstValue.FieldByName(name)
+ _, withContext = opts[ContextOptionName]
+ _, force = opts[ForceOptionName]
+ )
+
+ args := []reflect.Value{}
+ if withContext {
+ args = []reflect.Value{reflect.ValueOf(options.Context)}
+ }
+
+ var (
+ result = method.Call(args)[0]
+ resultInterface = result.Interface()
+ resultValue = reflect.ValueOf(resultInterface)
+ resultType = resultValue.Type()
+ )
+
+ // Value -> Ptr
+ if dstFieldValue.Kind() == reflect.Ptr && force {
+ ptr := reflect.New(resultType)
+ ptr.Elem().Set(resultValue)
+
+ if ptr.Type().AssignableTo(dstFieldType.Type) {
+ dstFieldValue.Set(ptr)
+ }
+
+ continue
+ }
+
+ // Ptr -> value
+ if resultValue.Kind() == reflect.Ptr && force {
+ if resultValue.Elem().Type().AssignableTo(dstFieldType.Type) {
+ dstFieldValue.Set(resultValue.Elem())
+ }
+
+ continue
+ }
+
+ if resultType.AssignableTo(dstFieldType.Type) && result.IsValid() {
+ dstFieldValue.Set(result)
+ }
+ }
+
+ return nil
+}
+
+// getTagOptions parses deepcopier tag field and returns options.
+func getTagOptions(value string) TagOptions {
+ options := TagOptions{}
+
+ for _, opt := range strings.Split(value, ";") {
+ o := strings.Split(opt, ":")
+
+ // deepcopier:"keyword; without; value;"
+ if len(o) == 1 {
+ options[o[0]] = ""
+ }
+
+ // deepcopier:"key:value; anotherkey:anothervalue"
+ if len(o) == 2 {
+ options[strings.TrimSpace(o[0])] = strings.TrimSpace(o[1])
+ }
+ }
+
+ return options
+}
+
+// getRelatedField returns first matching field.
+func getRelatedField(instance interface{}, name string) (string, TagOptions) {
+ var (
+ value = reflect.Indirect(reflect.ValueOf(instance))
+ fieldName string
+ tagOptions TagOptions
+ )
+
+ for i := 0; i < value.NumField(); i++ {
+ var (
+ vField = value.Field(i)
+ tField = value.Type().Field(i)
+ tagOptions = getTagOptions(tField.Tag.Get(TagName))
+ )
+
+ if tField.Type.Kind() == reflect.Struct && tField.Anonymous {
+ if n, o := getRelatedField(vField.Interface(), name); n != "" {
+ return n, o
+ }
+ }
+
+ if v, ok := tagOptions[FieldOptionName]; ok && v == name {
+ return tField.Name, tagOptions
+ }
+
+ if tField.Name == name {
+ return tField.Name, tagOptions
+ }
+ }
+
+ return fieldName, tagOptions
+}
+
+// getMethodNames returns instance's method names.
+func getMethodNames(instance interface{}) []string {
+ var methods []string
+
+ t := reflect.TypeOf(instance)
+ for i := 0; i < t.NumMethod(); i++ {
+ methods = append(methods, t.Method(i).Name)
+ }
+
+ return methods
+}
+
+// getFieldNames returns instance's field names.
+func getFieldNames(instance interface{}) []string {
+ var (
+ fields []string
+ v = reflect.Indirect(reflect.ValueOf(instance))
+ t = v.Type()
+ )
+
+ if t.Kind() != reflect.Struct {
+ return nil
+ }
+
+ for i := 0; i < v.NumField(); i++ {
+ var (
+ vField = v.Field(i)
+ tField = v.Type().Field(i)
+ )
+
+ // Is exportable?
+ if tField.PkgPath != "" {
+ continue
+ }
+
+ if tField.Type.Kind() == reflect.Struct && tField.Anonymous {
+ fields = append(fields, getFieldNames(vField.Interface())...)
+ continue
+ }
+
+ fields = append(fields, tField.Name)
+ }
+
+ return fields
+}
+
+// isNullableType returns true if the given type is a nullable one.
+func isNullableType(t reflect.Type) bool {
+ return t.ConvertibleTo(reflect.TypeOf((*driver.Valuer)(nil)).Elem())
+}
diff --git a/vendor/github.com/urfave/cli/LICENSE b/vendor/github.com/urfave/cli/LICENSE
new file mode 100644
index 000000000..42a597e29
--- /dev/null
+++ b/vendor/github.com/urfave/cli/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2016 Jeremy Saenz & Contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/urfave/cli/README.md b/vendor/github.com/urfave/cli/README.md
new file mode 100644
index 000000000..2bbbd8ea9
--- /dev/null
+++ b/vendor/github.com/urfave/cli/README.md
@@ -0,0 +1,1381 @@
+cli
+===
+
+[![Build Status](https://travis-ci.org/urfave/cli.svg?branch=master)](https://travis-ci.org/urfave/cli)
+[![Windows Build Status](https://ci.appveyor.com/api/projects/status/rtgk5xufi932pb2v?svg=true)](https://ci.appveyor.com/project/urfave/cli)
+[![GoDoc](https://godoc.org/github.com/urfave/cli?status.svg)](https://godoc.org/github.com/urfave/cli)
+[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-urfave-cli)
+[![Go Report Card](https://goreportcard.com/badge/urfave/cli)](https://goreportcard.com/report/urfave/cli)
+[![top level coverage](https://gocover.io/_badge/github.com/urfave/cli?0 "top level coverage")](http://gocover.io/github.com/urfave/cli) /
+[![altsrc coverage](https://gocover.io/_badge/github.com/urfave/cli/altsrc?0 "altsrc coverage")](http://gocover.io/github.com/urfave/cli/altsrc)
+
+**Notice:** This is the library formerly known as
+`github.com/codegangsta/cli` -- Github will automatically redirect requests
+to this repository, but we recommend updating your references for clarity.
+
+cli is a simple, fast, and fun package for building command line apps in Go. The
+goal is to enable developers to write fast and distributable command line
+applications in an expressive way.
+
+<!-- toc -->
+
+- [Overview](#overview)
+- [Installation](#installation)
+ * [Supported platforms](#supported-platforms)
+ * [Using the `v2` branch](#using-the-v2-branch)
+ * [Pinning to the `v1` releases](#pinning-to-the-v1-releases)
+- [Getting Started](#getting-started)
+- [Examples](#examples)
+ * [Arguments](#arguments)
+ * [Flags](#flags)
+ + [Placeholder Values](#placeholder-values)
+ + [Alternate Names](#alternate-names)
+ + [Ordering](#ordering)
+ + [Values from the Environment](#values-from-the-environment)
+ + [Values from alternate input sources (YAML, TOML, and others)](#values-from-alternate-input-sources-yaml-toml-and-others)
+ * [Subcommands](#subcommands)
+ * [Subcommands categories](#subcommands-categories)
+ * [Exit code](#exit-code)
+ * [Bash Completion](#bash-completion)
+ + [Enabling](#enabling)
+ + [Distribution](#distribution)
+ + [Customization](#customization)
+ * [Generated Help Text](#generated-help-text)
+ + [Customization](#customization-1)
+ * [Version Flag](#version-flag)
+ + [Customization](#customization-2)
+ + [Full API Example](#full-api-example)
+- [Contribution Guidelines](#contribution-guidelines)
+
+<!-- tocstop -->
+
+## Overview
+
+Command line apps are usually so tiny that there is absolutely no reason why
+your code should *not* be self-documenting. Things like generating help text and
+parsing command flags/options should not hinder productivity when writing a
+command line app.
+
+**This is where cli comes into play.** cli makes command line programming fun,
+organized, and expressive!
+
+## Installation
+
+Make sure you have a working Go environment. Go version 1.2+ is supported. [See
+the install instructions for Go](http://golang.org/doc/install.html).
+
+To install cli, simply run:
+```
+$ go get github.com/urfave/cli
+```
+
+Make sure your `PATH` includes the `$GOPATH/bin` directory so your commands can
+be easily used:
+```
+export PATH=$PATH:$GOPATH/bin
+```
+
+### Supported platforms
+
+cli is tested against multiple versions of Go on Linux, and against the latest
+released version of Go on OS X and Windows. For full details, see
+[`./.travis.yml`](./.travis.yml) and [`./appveyor.yml`](./appveyor.yml).
+
+### Using the `v2` branch
+
+**Warning**: The `v2` branch is currently unreleased and considered unstable.
+
+There is currently a long-lived branch named `v2` that is intended to land as
+the new `master` branch once development there has settled down. The current
+`master` branch (mirrored as `v1`) is being manually merged into `v2` on
+an irregular human-based schedule, but generally if one wants to "upgrade" to
+`v2` *now* and accept the volatility (read: "awesomeness") that comes along with
+that, please use whatever version pinning of your preference, such as via
+`gopkg.in`:
+
+```
+$ go get gopkg.in/urfave/cli.v2
+```
+
+``` go
+...
+import (
+ "gopkg.in/urfave/cli.v2" // imports as package "cli"
+)
+...
+```
+
+### Pinning to the `v1` releases
+
+Similarly to the section above describing use of the `v2` branch, if one wants
+to avoid any unexpected compatibility pains once `v2` becomes `master`, then
+pinning to `v1` is an acceptable option, e.g.:
+
+```
+$ go get gopkg.in/urfave/cli.v1
+```
+
+``` go
+...
+import (
+ "gopkg.in/urfave/cli.v1" // imports as package "cli"
+)
+...
+```
+
+This will pull the latest tagged `v1` release (e.g. `v1.18.1` at the time of writing).
+
+## Getting Started
+
+One of the philosophies behind cli is that an API should be playful and full of
+discovery. So a cli app can be as little as one line of code in `main()`.
+
+<!-- {
+ "args": ["&#45;&#45;help"],
+ "output": "A new cli application"
+} -->
+``` go
+package main
+
+import (
+ "os"
+
+ "github.com/urfave/cli"
+)
+
+func main() {
+ cli.NewApp().Run(os.Args)
+}
+```
+
+This app will run and show help text, but is not very useful. Let's give an
+action to execute and some help documentation:
+
+<!-- {
+ "output": "boom! I say!"
+} -->
+``` go
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/urfave/cli"
+)
+
+func main() {
+ app := cli.NewApp()
+ app.Name = "boom"
+ app.Usage = "make an explosive entrance"
+ app.Action = func(c *cli.Context) error {
+ fmt.Println("boom! I say!")
+ return nil
+ }
+
+ app.Run(os.Args)
+}
+```
+
+Running this already gives you a ton of functionality, plus support for things
+like subcommands and flags, which are covered below.
+
+## Examples
+
+Being a programmer can be a lonely job. Thankfully by the power of automation
+that is not the case! Let's create a greeter app to fend off our demons of
+loneliness!
+
+Start by creating a directory named `greet`, and within it, add a file,
+`greet.go` with the following code in it:
+
+<!-- {
+ "output": "Hello friend!"
+} -->
+``` go
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/urfave/cli"
+)
+
+func main() {
+ app := cli.NewApp()
+ app.Name = "greet"
+ app.Usage = "fight the loneliness!"
+ app.Action = func(c *cli.Context) error {
+ fmt.Println("Hello friend!")
+ return nil
+ }
+
+ app.Run(os.Args)
+}
+```
+
+Install our command to the `$GOPATH/bin` directory:
+
+```
+$ go install
+```
+
+Finally run our new command:
+
+```
+$ greet
+Hello friend!
+```
+
+cli also generates neat help text:
+
+```
+$ greet help
+NAME:
+ greet - fight the loneliness!
+
+USAGE:
+ greet [global options] command [command options] [arguments...]
+
+VERSION:
+ 0.0.0
+
+COMMANDS:
+ help, h Shows a list of commands or help for one command
+
+GLOBAL OPTIONS
+ --version Shows version information
+```
+
+### Arguments
+
+You can lookup arguments by calling the `Args` function on `cli.Context`, e.g.:
+
+<!-- {
+ "output": "Hello \""
+} -->
+``` go
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/urfave/cli"
+)
+
+func main() {
+ app := cli.NewApp()
+
+ app.Action = func(c *cli.Context) error {
+ fmt.Printf("Hello %q", c.Args().Get(0))
+ return nil
+ }
+
+ app.Run(os.Args)
+}
+```
+
+### Flags
+
+Setting and querying flags is simple.
+
+<!-- {
+ "output": "Hello Nefertiti"
+} -->
+``` go
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/urfave/cli"
+)
+
+func main() {
+ app := cli.NewApp()
+
+ app.Flags = []cli.Flag {
+ cli.StringFlag{
+ Name: "lang",
+ Value: "english",
+ Usage: "language for the greeting",
+ },
+ }
+
+ app.Action = func(c *cli.Context) error {
+ name := "Nefertiti"
+ if c.NArg() > 0 {
+ name = c.Args().Get(0)
+ }
+ if c.String("lang") == "spanish" {
+ fmt.Println("Hola", name)
+ } else {
+ fmt.Println("Hello", name)
+ }
+ return nil
+ }
+
+ app.Run(os.Args)
+}
+```
+
+You can also set a destination variable for a flag, to which the content will be
+scanned.
+
+<!-- {
+ "output": "Hello someone"
+} -->
+``` go
+package main
+
+import (
+ "os"
+ "fmt"
+
+ "github.com/urfave/cli"
+)
+
+func main() {
+ var language string
+
+ app := cli.NewApp()
+
+ app.Flags = []cli.Flag {
+ cli.StringFlag{
+ Name: "lang",
+ Value: "english",
+ Usage: "language for the greeting",
+ Destination: &language,
+ },
+ }
+
+ app.Action = func(c *cli.Context) error {
+ name := "someone"
+ if c.NArg() > 0 {
+ name = c.Args()[0]
+ }
+ if language == "spanish" {
+ fmt.Println("Hola", name)
+ } else {
+ fmt.Println("Hello", name)
+ }
+ return nil
+ }
+
+ app.Run(os.Args)
+}
+```
+
+See full list of flags at http://godoc.org/github.com/urfave/cli
+
+#### Placeholder Values
+
+Sometimes it's useful to specify a flag's value within the usage string itself.
+Such placeholders are indicated with back quotes.
+
+For example this:
+
+<!-- {
+ "args": ["&#45;&#45;help"],
+ "output": "&#45;&#45;config FILE, &#45;c FILE"
+} -->
+```go
+package main
+
+import (
+ "os"
+
+ "github.com/urfave/cli"
+)
+
+func main() {
+ app := cli.NewApp()
+
+ app.Flags = []cli.Flag{
+ cli.StringFlag{
+ Name: "config, c",
+ Usage: "Load configuration from `FILE`",
+ },
+ }
+
+ app.Run(os.Args)
+}
+```
+
+Will result in help output like:
+
+```
+--config FILE, -c FILE Load configuration from FILE
+```
+
+Note that only the first placeholder is used. Subsequent back-quoted words will
+be left as-is.
+
+#### Alternate Names
+
+You can set alternate (or short) names for flags by providing a comma-delimited
+list for the `Name`. e.g.
+
+<!-- {
+ "args": ["&#45;&#45;help"],
+ "output": "&#45;&#45;lang value, &#45;l value.*language for the greeting.*default: \"english\""
+} -->
+``` go
+package main
+
+import (
+ "os"
+
+ "github.com/urfave/cli"
+)
+
+func main() {
+ app := cli.NewApp()
+
+ app.Flags = []cli.Flag {
+ cli.StringFlag{
+ Name: "lang, l",
+ Value: "english",
+ Usage: "language for the greeting",
+ },
+ }
+
+ app.Run(os.Args)
+}
+```
+
+That flag can then be set with `--lang spanish` or `-l spanish`. Note that
+giving two different forms of the same flag in the same command invocation is an
+error.
+
+#### Ordering
+
+Flags for the application and commands are shown in the order they are defined.
+However, it's possible to sort them from outside this library by using `FlagsByName`
+or `CommandsByName` with `sort`.
+
+For example this:
+
+<!-- {
+ "args": ["&#45;&#45;help"],
+ "output": "add a task to the list\n.*complete a task on the list\n.*\n\n.*\n.*Load configuration from FILE\n.*Language for the greeting.*"
+} -->
+``` go
+package main
+
+import (
+ "os"
+ "sort"
+
+ "github.com/urfave/cli"
+)
+
+func main() {
+ app := cli.NewApp()
+
+ app.Flags = []cli.Flag {
+ cli.StringFlag{
+ Name: "lang, l",
+ Value: "english",
+ Usage: "Language for the greeting",
+ },
+ cli.StringFlag{
+ Name: "config, c",
+ Usage: "Load configuration from `FILE`",
+ },
+ }
+
+ app.Commands = []cli.Command{
+ {
+ Name: "complete",
+ Aliases: []string{"c"},
+ Usage: "complete a task on the list",
+ Action: func(c *cli.Context) error {
+ return nil
+ },
+ },
+ {
+ Name: "add",
+ Aliases: []string{"a"},
+ Usage: "add a task to the list",
+ Action: func(c *cli.Context) error {
+ return nil
+ },
+ },
+ }
+
+ sort.Sort(cli.FlagsByName(app.Flags))
+ sort.Sort(cli.CommandsByName(app.Commands))
+
+ app.Run(os.Args)
+}
+```
+
+Will result in help output like:
+
+```
+--config FILE, -c FILE Load configuration from FILE
+--lang value, -l value Language for the greeting (default: "english")
+```
+
+#### Values from the Environment
+
+You can also have the default value set from the environment via `EnvVar`. e.g.
+
+<!-- {
+ "args": ["&#45;&#45;help"],
+ "output": "language for the greeting.*APP_LANG"
+} -->
+``` go
+package main
+
+import (
+ "os"
+
+ "github.com/urfave/cli"
+)
+
+func main() {
+ app := cli.NewApp()
+
+ app.Flags = []cli.Flag {
+ cli.StringFlag{
+ Name: "lang, l",
+ Value: "english",
+ Usage: "language for the greeting",
+ EnvVar: "APP_LANG",
+ },
+ }
+
+ app.Run(os.Args)
+}
+```
+
+The `EnvVar` may also be given as a comma-delimited "cascade", where the first
+environment variable that resolves is used as the default.
+
+<!-- {
+ "args": ["&#45;&#45;help"],
+ "output": "language for the greeting.*LEGACY_COMPAT_LANG.*APP_LANG.*LANG"
+} -->
+``` go
+package main
+
+import (
+ "os"
+
+ "github.com/urfave/cli"
+)
+
+func main() {
+ app := cli.NewApp()
+
+ app.Flags = []cli.Flag {
+ cli.StringFlag{
+ Name: "lang, l",
+ Value: "english",
+ Usage: "language for the greeting",
+ EnvVar: "LEGACY_COMPAT_LANG,APP_LANG,LANG",
+ },
+ }
+
+ app.Run(os.Args)
+}
+```
+
+#### Values from alternate input sources (YAML, TOML, and others)
+
+There is a separate package altsrc that adds support for getting flag values
+from other file input sources.
+
+Currently supported input source formats:
+* YAML
+* TOML
+
+In order to get values for a flag from an alternate input source the following
+code would be added to wrap an existing cli.Flag like below:
+
+``` go
+ altsrc.NewIntFlag(cli.IntFlag{Name: "test"})
+```
+
+Initialization must also occur for these flags. Below is an example initializing
+getting data from a yaml file below.
+
+``` go
+ command.Before = altsrc.InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load"))
+```
+
+The code above will use the "load" string as a flag name to get the file name of
+a yaml file from the cli.Context. It will then use that file name to initialize
+the yaml input source for any flags that are defined on that command. As a note
+the "load" flag used would also have to be defined on the command flags in order
+for this code snipped to work.
+
+Currently only the aboved specified formats are supported but developers can
+add support for other input sources by implementing the
+altsrc.InputSourceContext for their given sources.
+
+Here is a more complete sample of a command using YAML support:
+
+<!-- {
+ "args": ["test-cmd", "&#45;&#45;help"],
+ "output": "&#45&#45;test value.*default: 0"
+} -->
+``` go
+package notmain
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/urfave/cli"
+ "github.com/urfave/cli/altsrc"
+)
+
+func main() {
+ app := cli.NewApp()
+
+ flags := []cli.Flag{
+ altsrc.NewIntFlag(cli.IntFlag{Name: "test"}),
+ cli.StringFlag{Name: "load"},
+ }
+
+ app.Action = func(c *cli.Context) error {
+ fmt.Println("yaml ist rad")
+ return nil
+ }
+
+ app.Before = altsrc.InitInputSourceWithContext(flags, altsrc.NewYamlSourceFromFlagFunc("load"))
+ app.Flags = flags
+
+ app.Run(os.Args)
+}
+```
+
+### Subcommands
+
+Subcommands can be defined for a more git-like command line app.
+
+<!-- {
+ "args": ["template", "add"],
+ "output": "new task template: .+"
+} -->
+```go
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/urfave/cli"
+)
+
+func main() {
+ app := cli.NewApp()
+
+ app.Commands = []cli.Command{
+ {
+ Name: "add",
+ Aliases: []string{"a"},
+ Usage: "add a task to the list",
+ Action: func(c *cli.Context) error {
+ fmt.Println("added task: ", c.Args().First())
+ return nil
+ },
+ },
+ {
+ Name: "complete",
+ Aliases: []string{"c"},
+ Usage: "complete a task on the list",
+ Action: func(c *cli.Context) error {
+ fmt.Println("completed task: ", c.Args().First())
+ return nil
+ },
+ },
+ {
+ Name: "template",
+ Aliases: []string{"t"},
+ Usage: "options for task templates",
+ Subcommands: []cli.Command{
+ {
+ Name: "add",
+ Usage: "add a new template",
+ Action: func(c *cli.Context) error {
+ fmt.Println("new task template: ", c.Args().First())
+ return nil
+ },
+ },
+ {
+ Name: "remove",
+ Usage: "remove an existing template",
+ Action: func(c *cli.Context) error {
+ fmt.Println("removed task template: ", c.Args().First())
+ return nil
+ },
+ },
+ },
+ },
+ }
+
+ app.Run(os.Args)
+}
+```
+
+### Subcommands categories
+
+For additional organization in apps that have many subcommands, you can
+associate a category for each command to group them together in the help
+output.
+
+E.g.
+
+```go
+package main
+
+import (
+ "os"
+
+ "github.com/urfave/cli"
+)
+
+func main() {
+ app := cli.NewApp()
+
+ app.Commands = []cli.Command{
+ {
+ Name: "noop",
+ },
+ {
+ Name: "add",
+ Category: "template",
+ },
+ {
+ Name: "remove",
+ Category: "template",
+ },
+ }
+
+ app.Run(os.Args)
+}
+```
+
+Will include:
+
+```
+COMMANDS:
+ noop
+
+ Template actions:
+ add
+ remove
+```
+
+### Exit code
+
+Calling `App.Run` will not automatically call `os.Exit`, which means that by
+default the exit code will "fall through" to being `0`. An explicit exit code
+may be set by returning a non-nil error that fulfills `cli.ExitCoder`, *or* a
+`cli.MultiError` that includes an error that fulfills `cli.ExitCoder`, e.g.:
+
+``` go
+package main
+
+import (
+ "os"
+
+ "github.com/urfave/cli"
+)
+
+func main() {
+ app := cli.NewApp()
+ app.Flags = []cli.Flag{
+ cli.BoolTFlag{
+ Name: "ginger-crouton",
+ Usage: "is it in the soup?",
+ },
+ }
+ app.Action = func(ctx *cli.Context) error {
+ if !ctx.Bool("ginger-crouton") {
+ return cli.NewExitError("it is not in the soup", 86)
+ }
+ return nil
+ }
+
+ app.Run(os.Args)
+}
+```
+
+### Bash Completion
+
+You can enable completion commands by setting the `EnableBashCompletion`
+flag on the `App` object. By default, this setting will only auto-complete to
+show an app's subcommands, but you can write your own completion methods for
+the App or its subcommands.
+
+<!-- {
+ "args": ["complete", "&#45;&#45;generate&#45;bash&#45;completion"],
+ "output": "laundry"
+} -->
+``` go
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/urfave/cli"
+)
+
+func main() {
+ tasks := []string{"cook", "clean", "laundry", "eat", "sleep", "code"}
+
+ app := cli.NewApp()
+ app.EnableBashCompletion = true
+ app.Commands = []cli.Command{
+ {
+ Name: "complete",
+ Aliases: []string{"c"},
+ Usage: "complete a task on the list",
+ Action: func(c *cli.Context) error {
+ fmt.Println("completed task: ", c.Args().First())
+ return nil
+ },
+ BashComplete: func(c *cli.Context) {
+ // This will complete if no args are passed
+ if c.NArg() > 0 {
+ return
+ }
+ for _, t := range tasks {
+ fmt.Println(t)
+ }
+ },
+ },
+ }
+
+ app.Run(os.Args)
+}
+```
+
+#### Enabling
+
+Source the `autocomplete/bash_autocomplete` file in your `.bashrc` file while
+setting the `PROG` variable to the name of your program:
+
+`PROG=myprogram source /.../cli/autocomplete/bash_autocomplete`
+
+#### Distribution
+
+Copy `autocomplete/bash_autocomplete` into `/etc/bash_completion.d/` and rename
+it to the name of the program you wish to add autocomplete support for (or
+automatically install it there if you are distributing a package). Don't forget
+to source the file to make it active in the current shell.
+
+```
+sudo cp src/bash_autocomplete /etc/bash_completion.d/<myprogram>
+source /etc/bash_completion.d/<myprogram>
+```
+
+Alternatively, you can just document that users should source the generic
+`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set
+to the name of their program (as above).
+
+#### Customization
+
+The default bash completion flag (`--generate-bash-completion`) is defined as
+`cli.BashCompletionFlag`, and may be redefined if desired, e.g.:
+
+<!-- {
+ "args": ["&#45;&#45;compgen"],
+ "output": "wat\nhelp\nh"
+} -->
+``` go
+package main
+
+import (
+ "os"
+
+ "github.com/urfave/cli"
+)
+
+func main() {
+ cli.BashCompletionFlag = cli.BoolFlag{
+ Name: "compgen",
+ Hidden: true,
+ }
+
+ app := cli.NewApp()
+ app.EnableBashCompletion = true
+ app.Commands = []cli.Command{
+ {
+ Name: "wat",
+ },
+ }
+ app.Run(os.Args)
+}
+```
+
+### Generated Help Text
+
+The default help flag (`-h/--help`) is defined as `cli.HelpFlag` and is checked
+by the cli internals in order to print generated help text for the app, command,
+or subcommand, and break execution.
+
+#### Customization
+
+All of the help text generation may be customized, and at multiple levels. The
+templates are exposed as variables `AppHelpTemplate`, `CommandHelpTemplate`, and
+`SubcommandHelpTemplate` which may be reassigned or augmented, and full override
+is possible by assigning a compatible func to the `cli.HelpPrinter` variable,
+e.g.:
+
+<!-- {
+ "output": "Ha HA. I pwnd the help!!1"
+} -->
+``` go
+package main
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/urfave/cli"
+)
+
+func main() {
+ // EXAMPLE: Append to an existing template
+ cli.AppHelpTemplate = fmt.Sprintf(`%s
+
+WEBSITE: http://awesometown.example.com
+
+SUPPORT: support@awesometown.example.com
+
+`, cli.AppHelpTemplate)
+
+ // EXAMPLE: Override a template
+ cli.AppHelpTemplate = `NAME:
+ {{.Name}} - {{.Usage}}
+USAGE:
+ {{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}
+ {{if len .Authors}}
+AUTHOR:
+ {{range .Authors}}{{ . }}{{end}}
+ {{end}}{{if .Commands}}
+COMMANDS:
+{{range .Commands}}{{if not .HideHelp}} {{join .Names ", "}}{{ "\t"}}{{.Usage}}{{ "\n" }}{{end}}{{end}}{{end}}{{if .VisibleFlags}}
+GLOBAL OPTIONS:
+ {{range .VisibleFlags}}{{.}}
+ {{end}}{{end}}{{if .Copyright }}
+COPYRIGHT:
+ {{.Copyright}}
+ {{end}}{{if .Version}}
+VERSION:
+ {{.Version}}
+ {{end}}
+`
+
+ // EXAMPLE: Replace the `HelpPrinter` func
+ cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) {
+ fmt.Println("Ha HA. I pwnd the help!!1")
+ }
+
+ cli.NewApp().Run(os.Args)
+}
+```
+
+The default flag may be customized to something other than `-h/--help` by
+setting `cli.HelpFlag`, e.g.:
+
+<!-- {
+ "args": ["&#45;&#45halp"],
+ "output": "haaaaalp.*HALP"
+} -->
+``` go
+package main
+
+import (
+ "os"
+
+ "github.com/urfave/cli"
+)
+
+func main() {
+ cli.HelpFlag = cli.BoolFlag{
+ Name: "halp, haaaaalp",
+ Usage: "HALP",
+ EnvVar: "SHOW_HALP,HALPPLZ",
+ }
+
+ cli.NewApp().Run(os.Args)
+}
+```
+
+### Version Flag
+
+The default version flag (`-v/--version`) is defined as `cli.VersionFlag`, which
+is checked by the cli internals in order to print the `App.Version` via
+`cli.VersionPrinter` and break execution.
+
+#### Customization
+
+The default flag may be customized to something other than `-v/--version` by
+setting `cli.VersionFlag`, e.g.:
+
+<!-- {
+ "args": ["&#45;&#45print-version"],
+ "output": "partay version 19\\.99\\.0"
+} -->
+``` go
+package main
+
+import (
+ "os"
+
+ "github.com/urfave/cli"
+)
+
+func main() {
+ cli.VersionFlag = cli.BoolFlag{
+ Name: "print-version, V",
+ Usage: "print only the version",
+ }
+
+ app := cli.NewApp()
+ app.Name = "partay"
+ app.Version = "19.99.0"
+ app.Run(os.Args)
+}
+```
+
+Alternatively, the version printer at `cli.VersionPrinter` may be overridden, e.g.:
+
+<!-- {
+ "args": ["&#45;&#45version"],
+ "output": "version=19\\.99\\.0 revision=fafafaf"
+} -->
+``` go
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/urfave/cli"
+)
+
+var (
+ Revision = "fafafaf"
+)
+
+func main() {
+ cli.VersionPrinter = func(c *cli.Context) {
+ fmt.Printf("version=%s revision=%s\n", c.App.Version, Revision)
+ }
+
+ app := cli.NewApp()
+ app.Name = "partay"
+ app.Version = "19.99.0"
+ app.Run(os.Args)
+}
+```
+
+#### Full API Example
+
+**Notice**: This is a contrived (functioning) example meant strictly for API
+demonstration purposes. Use of one's imagination is encouraged.
+
+<!-- {
+ "output": "made it!\nPhew!"
+} -->
+``` go
+package main
+
+import (
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "time"
+
+ "github.com/urfave/cli"
+)
+
+func init() {
+ cli.AppHelpTemplate += "\nCUSTOMIZED: you bet ur muffins\n"
+ cli.CommandHelpTemplate += "\nYMMV\n"
+ cli.SubcommandHelpTemplate += "\nor something\n"
+
+ cli.HelpFlag = cli.BoolFlag{Name: "halp"}
+ cli.BashCompletionFlag = cli.BoolFlag{Name: "compgen", Hidden: true}
+ cli.VersionFlag = cli.BoolFlag{Name: "print-version, V"}
+
+ cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) {
+ fmt.Fprintf(w, "best of luck to you\n")
+ }
+ cli.VersionPrinter = func(c *cli.Context) {
+ fmt.Fprintf(c.App.Writer, "version=%s\n", c.App.Version)
+ }
+ cli.OsExiter = func(c int) {
+ fmt.Fprintf(cli.ErrWriter, "refusing to exit %d\n", c)
+ }
+ cli.ErrWriter = ioutil.Discard
+ cli.FlagStringer = func(fl cli.Flag) string {
+ return fmt.Sprintf("\t\t%s", fl.GetName())
+ }
+}
+
+type hexWriter struct{}
+
+func (w *hexWriter) Write(p []byte) (int, error) {
+ for _, b := range p {
+ fmt.Printf("%x", b)
+ }
+ fmt.Printf("\n")
+
+ return len(p), nil
+}
+
+type genericType struct{
+ s string
+}
+
+func (g *genericType) Set(value string) error {
+ g.s = value
+ return nil
+}
+
+func (g *genericType) String() string {
+ return g.s
+}
+
+func main() {
+ app := cli.NewApp()
+ app.Name = "kənˈtrīv"
+ app.Version = "19.99.0"
+ app.Compiled = time.Now()
+ app.Authors = []cli.Author{
+ cli.Author{
+ Name: "Example Human",
+ Email: "human@example.com",
+ },
+ }
+ app.Copyright = "(c) 1999 Serious Enterprise"
+ app.HelpName = "contrive"
+ app.Usage = "demonstrate available API"
+ app.UsageText = "contrive - demonstrating the available API"
+ app.ArgsUsage = "[args and such]"
+ app.Commands = []cli.Command{
+ cli.Command{
+ Name: "doo",
+ Aliases: []string{"do"},
+ Category: "motion",
+ Usage: "do the doo",
+ UsageText: "doo - does the dooing",
+ Description: "no really, there is a lot of dooing to be done",
+ ArgsUsage: "[arrgh]",
+ Flags: []cli.Flag{
+ cli.BoolFlag{Name: "forever, forevvarr"},
+ },
+ Subcommands: cli.Commands{
+ cli.Command{
+ Name: "wop",
+ Action: wopAction,
+ },
+ },
+ SkipFlagParsing: false,
+ HideHelp: false,
+ Hidden: false,
+ HelpName: "doo!",
+ BashComplete: func(c *cli.Context) {
+ fmt.Fprintf(c.App.Writer, "--better\n")
+ },
+ Before: func(c *cli.Context) error {
+ fmt.Fprintf(c.App.Writer, "brace for impact\n")
+ return nil
+ },
+ After: func(c *cli.Context) error {
+ fmt.Fprintf(c.App.Writer, "did we lose anyone?\n")
+ return nil
+ },
+ Action: func(c *cli.Context) error {
+ c.Command.FullName()
+ c.Command.HasName("wop")
+ c.Command.Names()
+ c.Command.VisibleFlags()
+ fmt.Fprintf(c.App.Writer, "dodododododoodododddooooododododooo\n")
+ if c.Bool("forever") {
+ c.Command.Run(c)
+ }
+ return nil
+ },
+ OnUsageError: func(c *cli.Context, err error, isSubcommand bool) error {
+ fmt.Fprintf(c.App.Writer, "for shame\n")
+ return err
+ },
+ },
+ }
+ app.Flags = []cli.Flag{
+ cli.BoolFlag{Name: "fancy"},
+ cli.BoolTFlag{Name: "fancier"},
+ cli.DurationFlag{Name: "howlong, H", Value: time.Second * 3},
+ cli.Float64Flag{Name: "howmuch"},
+ cli.GenericFlag{Name: "wat", Value: &genericType{}},
+ cli.Int64Flag{Name: "longdistance"},
+ cli.Int64SliceFlag{Name: "intervals"},
+ cli.IntFlag{Name: "distance"},
+ cli.IntSliceFlag{Name: "times"},
+ cli.StringFlag{Name: "dance-move, d"},
+ cli.StringSliceFlag{Name: "names, N"},
+ cli.UintFlag{Name: "age"},
+ cli.Uint64Flag{Name: "bigage"},
+ }
+ app.EnableBashCompletion = true
+ app.HideHelp = false
+ app.HideVersion = false
+ app.BashComplete = func(c *cli.Context) {
+ fmt.Fprintf(c.App.Writer, "lipstick\nkiss\nme\nlipstick\nringo\n")
+ }
+ app.Before = func(c *cli.Context) error {
+ fmt.Fprintf(c.App.Writer, "HEEEERE GOES\n")
+ return nil
+ }
+ app.After = func(c *cli.Context) error {
+ fmt.Fprintf(c.App.Writer, "Phew!\n")
+ return nil
+ }
+ app.CommandNotFound = func(c *cli.Context, command string) {
+ fmt.Fprintf(c.App.Writer, "Thar be no %q here.\n", command)
+ }
+ app.OnUsageError = func(c *cli.Context, err error, isSubcommand bool) error {
+ if isSubcommand {
+ return err
+ }
+
+ fmt.Fprintf(c.App.Writer, "WRONG: %#v\n", err)
+ return nil
+ }
+ app.Action = func(c *cli.Context) error {
+ cli.DefaultAppComplete(c)
+ cli.HandleExitCoder(errors.New("not an exit coder, though"))
+ cli.ShowAppHelp(c)
+ cli.ShowCommandCompletions(c, "nope")
+ cli.ShowCommandHelp(c, "also-nope")
+ cli.ShowCompletions(c)
+ cli.ShowSubcommandHelp(c)
+ cli.ShowVersion(c)
+
+ categories := c.App.Categories()
+ categories.AddCommand("sounds", cli.Command{
+ Name: "bloop",
+ })
+
+ for _, category := range c.App.Categories() {
+ fmt.Fprintf(c.App.Writer, "%s\n", category.Name)
+ fmt.Fprintf(c.App.Writer, "%#v\n", category.Commands)
+ fmt.Fprintf(c.App.Writer, "%#v\n", category.VisibleCommands())
+ }
+
+ fmt.Printf("%#v\n", c.App.Command("doo"))
+ if c.Bool("infinite") {
+ c.App.Run([]string{"app", "doo", "wop"})
+ }
+
+ if c.Bool("forevar") {
+ c.App.RunAsSubcommand(c)
+ }
+ c.App.Setup()
+ fmt.Printf("%#v\n", c.App.VisibleCategories())
+ fmt.Printf("%#v\n", c.App.VisibleCommands())
+ fmt.Printf("%#v\n", c.App.VisibleFlags())
+
+ fmt.Printf("%#v\n", c.Args().First())
+ if len(c.Args()) > 0 {
+ fmt.Printf("%#v\n", c.Args()[1])
+ }
+ fmt.Printf("%#v\n", c.Args().Present())
+ fmt.Printf("%#v\n", c.Args().Tail())
+
+ set := flag.NewFlagSet("contrive", 0)
+ nc := cli.NewContext(c.App, set, c)
+
+ fmt.Printf("%#v\n", nc.Args())
+ fmt.Printf("%#v\n", nc.Bool("nope"))
+ fmt.Printf("%#v\n", nc.BoolT("nerp"))
+ fmt.Printf("%#v\n", nc.Duration("howlong"))
+ fmt.Printf("%#v\n", nc.Float64("hay"))
+ fmt.Printf("%#v\n", nc.Generic("bloop"))
+ fmt.Printf("%#v\n", nc.Int64("bonk"))
+ fmt.Printf("%#v\n", nc.Int64Slice("burnks"))
+ fmt.Printf("%#v\n", nc.Int("bips"))
+ fmt.Printf("%#v\n", nc.IntSlice("blups"))
+ fmt.Printf("%#v\n", nc.String("snurt"))
+ fmt.Printf("%#v\n", nc.StringSlice("snurkles"))
+ fmt.Printf("%#v\n", nc.Uint("flub"))
+ fmt.Printf("%#v\n", nc.Uint64("florb"))
+ fmt.Printf("%#v\n", nc.GlobalBool("global-nope"))
+ fmt.Printf("%#v\n", nc.GlobalBoolT("global-nerp"))
+ fmt.Printf("%#v\n", nc.GlobalDuration("global-howlong"))
+ fmt.Printf("%#v\n", nc.GlobalFloat64("global-hay"))
+ fmt.Printf("%#v\n", nc.GlobalGeneric("global-bloop"))
+ fmt.Printf("%#v\n", nc.GlobalInt("global-bips"))
+ fmt.Printf("%#v\n", nc.GlobalIntSlice("global-blups"))
+ fmt.Printf("%#v\n", nc.GlobalString("global-snurt"))
+ fmt.Printf("%#v\n", nc.GlobalStringSlice("global-snurkles"))
+
+ fmt.Printf("%#v\n", nc.FlagNames())
+ fmt.Printf("%#v\n", nc.GlobalFlagNames())
+ fmt.Printf("%#v\n", nc.GlobalIsSet("wat"))
+ fmt.Printf("%#v\n", nc.GlobalSet("wat", "nope"))
+ fmt.Printf("%#v\n", nc.NArg())
+ fmt.Printf("%#v\n", nc.NumFlags())
+ fmt.Printf("%#v\n", nc.Parent())
+
+ nc.Set("wat", "also-nope")
+
+ ec := cli.NewExitError("ohwell", 86)
+ fmt.Fprintf(c.App.Writer, "%d", ec.ExitCode())
+ fmt.Printf("made it!\n")
+ return ec
+ }
+
+ if os.Getenv("HEXY") != "" {
+ app.Writer = &hexWriter{}
+ app.ErrWriter = &hexWriter{}
+ }
+
+ app.Metadata = map[string]interface{}{
+ "layers": "many",
+ "explicable": false,
+ "whatever-values": 19.99,
+ }
+
+ app.Run(os.Args)
+}
+
+func wopAction(c *cli.Context) error {
+ fmt.Fprintf(c.App.Writer, ":wave: over here, eh\n")
+ return nil
+}
+```
+
+## Contribution Guidelines
+
+Feel free to put up a pull request to fix a bug or maybe add a feature. I will
+give it a code review and make sure that it does not break backwards
+compatibility. If I or any other collaborators agree that it is in line with
+the vision of the project, we will work with you to get the code into
+a mergeable state and merge it into the master branch.
+
+If you have contributed something significant to the project, we will most
+likely add you as a collaborator. As a collaborator you are given the ability
+to merge others pull requests. It is very important that new code does not
+break existing code, so be careful about what code you do choose to merge.
+
+If you feel like you have contributed to the project but have not yet been
+added as a collaborator, we probably forgot to add you, please open an issue.
diff --git a/vendor/github.com/urfave/cli/app.go b/vendor/github.com/urfave/cli/app.go
new file mode 100644
index 000000000..51fc45d87
--- /dev/null
+++ b/vendor/github.com/urfave/cli/app.go
@@ -0,0 +1,497 @@
+package cli
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "time"
+)
+
+var (
+ changeLogURL = "https://github.com/urfave/cli/blob/master/CHANGELOG.md"
+ appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL)
+ runAndExitOnErrorDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-runandexitonerror", changeLogURL)
+
+ contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you."
+
+ errInvalidActionType = NewExitError("ERROR invalid Action type. "+
+ fmt.Sprintf("Must be `func(*Context`)` or `func(*Context) error). %s", contactSysadmin)+
+ fmt.Sprintf("See %s", appActionDeprecationURL), 2)
+)
+
+// App is the main structure of a cli application. It is recommended that
+// an app be created with the cli.NewApp() function
+type App struct {
+ // The name of the program. Defaults to path.Base(os.Args[0])
+ Name string
+ // Full name of command for help, defaults to Name
+ HelpName string
+ // Description of the program.
+ Usage string
+ // Text to override the USAGE section of help
+ UsageText string
+ // Description of the program argument format.
+ ArgsUsage string
+ // Version of the program
+ Version string
+ // Description of the program
+ Description string
+ // List of commands to execute
+ Commands []Command
+ // List of flags to parse
+ Flags []Flag
+ // Boolean to enable bash completion commands
+ EnableBashCompletion bool
+ // Boolean to hide built-in help command
+ HideHelp bool
+ // Boolean to hide built-in version flag and the VERSION section of help
+ HideVersion bool
+ // Populate on app startup, only gettable through method Categories()
+ categories CommandCategories
+ // An action to execute when the bash-completion flag is set
+ BashComplete BashCompleteFunc
+ // An action to execute before any subcommands are run, but after the context is ready
+ // If a non-nil error is returned, no subcommands are run
+ Before BeforeFunc
+ // An action to execute after any subcommands are run, but after the subcommand has finished
+ // It is run even if Action() panics
+ After AfterFunc
+
+ // The action to execute when no subcommands are specified
+ // Expects a `cli.ActionFunc` but will accept the *deprecated* signature of `func(*cli.Context) {}`
+ // *Note*: support for the deprecated `Action` signature will be removed in a future version
+ Action interface{}
+
+ // Execute this function if the proper command cannot be found
+ CommandNotFound CommandNotFoundFunc
+ // Execute this function if an usage error occurs
+ OnUsageError OnUsageErrorFunc
+ // Compilation date
+ Compiled time.Time
+ // List of all authors who contributed
+ Authors []Author
+ // Copyright of the binary if any
+ Copyright string
+ // Name of Author (Note: Use App.Authors, this is deprecated)
+ Author string
+ // Email of Author (Note: Use App.Authors, this is deprecated)
+ Email string
+ // Writer writer to write output to
+ Writer io.Writer
+ // ErrWriter writes error output
+ ErrWriter io.Writer
+ // Other custom info
+ Metadata map[string]interface{}
+ // Carries a function which returns app specific info.
+ ExtraInfo func() map[string]string
+ // CustomAppHelpTemplate the text template for app help topic.
+ // cli.go uses text/template to render templates. You can
+ // render custom help text by setting this variable.
+ CustomAppHelpTemplate string
+
+ didSetup bool
+}
+
+// Tries to find out when this binary was compiled.
+// Returns the current time if it fails to find it.
+func compileTime() time.Time {
+ info, err := os.Stat(os.Args[0])
+ if err != nil {
+ return time.Now()
+ }
+ return info.ModTime()
+}
+
+// NewApp creates a new cli Application with some reasonable defaults for Name,
+// Usage, Version and Action.
+func NewApp() *App {
+ return &App{
+ Name: filepath.Base(os.Args[0]),
+ HelpName: filepath.Base(os.Args[0]),
+ Usage: "A new cli application",
+ UsageText: "",
+ Version: "0.0.0",
+ BashComplete: DefaultAppComplete,
+ Action: helpCommand.Action,
+ Compiled: compileTime(),
+ Writer: os.Stdout,
+ }
+}
+
+// Setup runs initialization code to ensure all data structures are ready for
+// `Run` or inspection prior to `Run`. It is internally called by `Run`, but
+// will return early if setup has already happened.
+func (a *App) Setup() {
+ if a.didSetup {
+ return
+ }
+
+ a.didSetup = true
+
+ if a.Author != "" || a.Email != "" {
+ a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email})
+ }
+
+ newCmds := []Command{}
+ for _, c := range a.Commands {
+ if c.HelpName == "" {
+ c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name)
+ }
+ newCmds = append(newCmds, c)
+ }
+ a.Commands = newCmds
+
+ if a.Command(helpCommand.Name) == nil && !a.HideHelp {
+ a.Commands = append(a.Commands, helpCommand)
+ if (HelpFlag != BoolFlag{}) {
+ a.appendFlag(HelpFlag)
+ }
+ }
+
+ if !a.HideVersion {
+ a.appendFlag(VersionFlag)
+ }
+
+ a.categories = CommandCategories{}
+ for _, command := range a.Commands {
+ a.categories = a.categories.AddCommand(command.Category, command)
+ }
+ sort.Sort(a.categories)
+
+ if a.Metadata == nil {
+ a.Metadata = make(map[string]interface{})
+ }
+
+ if a.Writer == nil {
+ a.Writer = os.Stdout
+ }
+}
+
+// Run is the entry point to the cli app. Parses the arguments slice and routes
+// to the proper flag/args combination
+func (a *App) Run(arguments []string) (err error) {
+ a.Setup()
+
+ // handle the completion flag separately from the flagset since
+ // completion could be attempted after a flag, but before its value was put
+ // on the command line. this causes the flagset to interpret the completion
+ // flag name as the value of the flag before it which is undesirable
+ // note that we can only do this because the shell autocomplete function
+ // always appends the completion flag at the end of the command
+ shellComplete, arguments := checkShellCompleteFlag(a, arguments)
+
+ // parse flags
+ set, err := flagSet(a.Name, a.Flags)
+ if err != nil {
+ return err
+ }
+
+ set.SetOutput(ioutil.Discard)
+ err = set.Parse(arguments[1:])
+ nerr := normalizeFlags(a.Flags, set)
+ context := NewContext(a, set, nil)
+ if nerr != nil {
+ fmt.Fprintln(a.Writer, nerr)
+ ShowAppHelp(context)
+ return nerr
+ }
+ context.shellComplete = shellComplete
+
+ if checkCompletions(context) {
+ return nil
+ }
+
+ if err != nil {
+ if a.OnUsageError != nil {
+ err := a.OnUsageError(context, err, false)
+ HandleExitCoder(err)
+ return err
+ }
+ fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error())
+ ShowAppHelp(context)
+ return err
+ }
+
+ if !a.HideHelp && checkHelp(context) {
+ ShowAppHelp(context)
+ return nil
+ }
+
+ if !a.HideVersion && checkVersion(context) {
+ ShowVersion(context)
+ return nil
+ }
+
+ if a.After != nil {
+ defer func() {
+ if afterErr := a.After(context); afterErr != nil {
+ if err != nil {
+ err = NewMultiError(err, afterErr)
+ } else {
+ err = afterErr
+ }
+ }
+ }()
+ }
+
+ if a.Before != nil {
+ beforeErr := a.Before(context)
+ if beforeErr != nil {
+ ShowAppHelp(context)
+ HandleExitCoder(beforeErr)
+ err = beforeErr
+ return err
+ }
+ }
+
+ args := context.Args()
+ if args.Present() {
+ name := args.First()
+ c := a.Command(name)
+ if c != nil {
+ return c.Run(context)
+ }
+ }
+
+ if a.Action == nil {
+ a.Action = helpCommand.Action
+ }
+
+ // Run default Action
+ err = HandleAction(a.Action, context)
+
+ HandleExitCoder(err)
+ return err
+}
+
+// RunAndExitOnError calls .Run() and exits non-zero if an error was returned
+//
+// Deprecated: instead you should return an error that fulfills cli.ExitCoder
+// to cli.App.Run. This will cause the application to exit with the given eror
+// code in the cli.ExitCoder
+func (a *App) RunAndExitOnError() {
+ if err := a.Run(os.Args); err != nil {
+ fmt.Fprintln(a.errWriter(), err)
+ OsExiter(1)
+ }
+}
+
+// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to
+// generate command-specific flags
+func (a *App) RunAsSubcommand(ctx *Context) (err error) {
+ // append help to commands
+ if len(a.Commands) > 0 {
+ if a.Command(helpCommand.Name) == nil && !a.HideHelp {
+ a.Commands = append(a.Commands, helpCommand)
+ if (HelpFlag != BoolFlag{}) {
+ a.appendFlag(HelpFlag)
+ }
+ }
+ }
+
+ newCmds := []Command{}
+ for _, c := range a.Commands {
+ if c.HelpName == "" {
+ c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name)
+ }
+ newCmds = append(newCmds, c)
+ }
+ a.Commands = newCmds
+
+ // parse flags
+ set, err := flagSet(a.Name, a.Flags)
+ if err != nil {
+ return err
+ }
+
+ set.SetOutput(ioutil.Discard)
+ err = set.Parse(ctx.Args().Tail())
+ nerr := normalizeFlags(a.Flags, set)
+ context := NewContext(a, set, ctx)
+
+ if nerr != nil {
+ fmt.Fprintln(a.Writer, nerr)
+ fmt.Fprintln(a.Writer)
+ if len(a.Commands) > 0 {
+ ShowSubcommandHelp(context)
+ } else {
+ ShowCommandHelp(ctx, context.Args().First())
+ }
+ return nerr
+ }
+
+ if checkCompletions(context) {
+ return nil
+ }
+
+ if err != nil {
+ if a.OnUsageError != nil {
+ err = a.OnUsageError(context, err, true)
+ HandleExitCoder(err)
+ return err
+ }
+ fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error())
+ ShowSubcommandHelp(context)
+ return err
+ }
+
+ if len(a.Commands) > 0 {
+ if checkSubcommandHelp(context) {
+ return nil
+ }
+ } else {
+ if checkCommandHelp(ctx, context.Args().First()) {
+ return nil
+ }
+ }
+
+ if a.After != nil {
+ defer func() {
+ afterErr := a.After(context)
+ if afterErr != nil {
+ HandleExitCoder(err)
+ if err != nil {
+ err = NewMultiError(err, afterErr)
+ } else {
+ err = afterErr
+ }
+ }
+ }()
+ }
+
+ if a.Before != nil {
+ beforeErr := a.Before(context)
+ if beforeErr != nil {
+ HandleExitCoder(beforeErr)
+ err = beforeErr
+ return err
+ }
+ }
+
+ args := context.Args()
+ if args.Present() {
+ name := args.First()
+ c := a.Command(name)
+ if c != nil {
+ return c.Run(context)
+ }
+ }
+
+ // Run default Action
+ err = HandleAction(a.Action, context)
+
+ HandleExitCoder(err)
+ return err
+}
+
+// Command returns the named command on App. Returns nil if the command does not exist
+func (a *App) Command(name string) *Command {
+ for _, c := range a.Commands {
+ if c.HasName(name) {
+ return &c
+ }
+ }
+
+ return nil
+}
+
+// Categories returns a slice containing all the categories with the commands they contain
+func (a *App) Categories() CommandCategories {
+ return a.categories
+}
+
+// VisibleCategories returns a slice of categories and commands that are
+// Hidden=false
+func (a *App) VisibleCategories() []*CommandCategory {
+ ret := []*CommandCategory{}
+ for _, category := range a.categories {
+ if visible := func() *CommandCategory {
+ for _, command := range category.Commands {
+ if !command.Hidden {
+ return category
+ }
+ }
+ return nil
+ }(); visible != nil {
+ ret = append(ret, visible)
+ }
+ }
+ return ret
+}
+
+// VisibleCommands returns a slice of the Commands with Hidden=false
+func (a *App) VisibleCommands() []Command {
+ ret := []Command{}
+ for _, command := range a.Commands {
+ if !command.Hidden {
+ ret = append(ret, command)
+ }
+ }
+ return ret
+}
+
+// VisibleFlags returns a slice of the Flags with Hidden=false
+func (a *App) VisibleFlags() []Flag {
+ return visibleFlags(a.Flags)
+}
+
+func (a *App) hasFlag(flag Flag) bool {
+ for _, f := range a.Flags {
+ if flag == f {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (a *App) errWriter() io.Writer {
+
+ // When the app ErrWriter is nil use the package level one.
+ if a.ErrWriter == nil {
+ return ErrWriter
+ }
+
+ return a.ErrWriter
+}
+
+func (a *App) appendFlag(flag Flag) {
+ if !a.hasFlag(flag) {
+ a.Flags = append(a.Flags, flag)
+ }
+}
+
+// Author represents someone who has contributed to a cli project.
+type Author struct {
+ Name string // The Authors name
+ Email string // The Authors email
+}
+
+// String makes Author comply to the Stringer interface, to allow an easy print in the templating process
+func (a Author) String() string {
+ e := ""
+ if a.Email != "" {
+ e = " <" + a.Email + ">"
+ }
+
+ return fmt.Sprintf("%v%v", a.Name, e)
+}
+
+// HandleAction attempts to figure out which Action signature was used. If
+// it's an ActionFunc or a func with the legacy signature for Action, the func
+// is run!
+func HandleAction(action interface{}, context *Context) (err error) {
+ if a, ok := action.(ActionFunc); ok {
+ return a(context)
+ } else if a, ok := action.(func(*Context) error); ok {
+ return a(context)
+ } else if a, ok := action.(func(*Context)); ok { // deprecated function signature
+ a(context)
+ return nil
+ } else {
+ return errInvalidActionType
+ }
+}
diff --git a/vendor/github.com/urfave/cli/category.go b/vendor/github.com/urfave/cli/category.go
new file mode 100644
index 000000000..1a6055023
--- /dev/null
+++ b/vendor/github.com/urfave/cli/category.go
@@ -0,0 +1,44 @@
+package cli
+
+// CommandCategories is a slice of *CommandCategory.
+type CommandCategories []*CommandCategory
+
+// CommandCategory is a category containing commands.
+type CommandCategory struct {
+ Name string
+ Commands Commands
+}
+
+func (c CommandCategories) Less(i, j int) bool {
+ return c[i].Name < c[j].Name
+}
+
+func (c CommandCategories) Len() int {
+ return len(c)
+}
+
+func (c CommandCategories) Swap(i, j int) {
+ c[i], c[j] = c[j], c[i]
+}
+
+// AddCommand adds a command to a category.
+func (c CommandCategories) AddCommand(category string, command Command) CommandCategories {
+ for _, commandCategory := range c {
+ if commandCategory.Name == category {
+ commandCategory.Commands = append(commandCategory.Commands, command)
+ return c
+ }
+ }
+ return append(c, &CommandCategory{Name: category, Commands: []Command{command}})
+}
+
+// VisibleCommands returns a slice of the Commands with Hidden=false
+func (c *CommandCategory) VisibleCommands() []Command {
+ ret := []Command{}
+ for _, command := range c.Commands {
+ if !command.Hidden {
+ ret = append(ret, command)
+ }
+ }
+ return ret
+}
diff --git a/vendor/github.com/urfave/cli/cli.go b/vendor/github.com/urfave/cli/cli.go
new file mode 100644
index 000000000..90c07eb8e
--- /dev/null
+++ b/vendor/github.com/urfave/cli/cli.go
@@ -0,0 +1,22 @@
+// Package cli provides a minimal framework for creating and organizing command line
+// Go applications. cli is designed to be easy to understand and write, the most simple
+// cli application can be written as follows:
+// func main() {
+// cli.NewApp().Run(os.Args)
+// }
+//
+// Of course this application does not do much, so let's make this an actual application:
+// func main() {
+// app := cli.NewApp()
+// app.Name = "greet"
+// app.Usage = "say a greeting"
+// app.Action = func(c *cli.Context) error {
+// println("Greetings")
+// return nil
+// }
+//
+// app.Run(os.Args)
+// }
+package cli
+
+//go:generate python ./generate-flag-types cli -i flag-types.json -o flag_generated.go
diff --git a/vendor/github.com/urfave/cli/command.go b/vendor/github.com/urfave/cli/command.go
new file mode 100644
index 000000000..23de2944b
--- /dev/null
+++ b/vendor/github.com/urfave/cli/command.go
@@ -0,0 +1,304 @@
+package cli
+
+import (
+ "fmt"
+ "io/ioutil"
+ "sort"
+ "strings"
+)
+
+// Command is a subcommand for a cli.App.
+type Command struct {
+ // The name of the command
+ Name string
+ // short name of the command. Typically one character (deprecated, use `Aliases`)
+ ShortName string
+ // A list of aliases for the command
+ Aliases []string
+ // A short description of the usage of this command
+ Usage string
+ // Custom text to show on USAGE section of help
+ UsageText string
+ // A longer explanation of how the command works
+ Description string
+ // A short description of the arguments of this command
+ ArgsUsage string
+ // The category the command is part of
+ Category string
+ // The function to call when checking for bash command completions
+ BashComplete BashCompleteFunc
+ // An action to execute before any sub-subcommands are run, but after the context is ready
+ // If a non-nil error is returned, no sub-subcommands are run
+ Before BeforeFunc
+ // An action to execute after any subcommands are run, but after the subcommand has finished
+ // It is run even if Action() panics
+ After AfterFunc
+ // The function to call when this command is invoked
+ Action interface{}
+ // TODO: replace `Action: interface{}` with `Action: ActionFunc` once some kind
+ // of deprecation period has passed, maybe?
+
+ // Execute this function if a usage error occurs.
+ OnUsageError OnUsageErrorFunc
+ // List of child commands
+ Subcommands Commands
+ // List of flags to parse
+ Flags []Flag
+ // Treat all flags as normal arguments if true
+ SkipFlagParsing bool
+ // Skip argument reordering which attempts to move flags before arguments,
+ // but only works if all flags appear after all arguments. This behavior was
+ // removed n version 2 since it only works under specific conditions so we
+ // backport here by exposing it as an option for compatibility.
+ SkipArgReorder bool
+ // Boolean to hide built-in help command
+ HideHelp bool
+ // Boolean to hide this command from help or completion
+ Hidden bool
+
+ // Full name of command for help, defaults to full command name, including parent commands.
+ HelpName string
+ commandNamePath []string
+
+ // CustomHelpTemplate the text template for the command help topic.
+ // cli.go uses text/template to render templates. You can
+ // render custom help text by setting this variable.
+ CustomHelpTemplate string
+}
+
+type CommandsByName []Command
+
+func (c CommandsByName) Len() int {
+ return len(c)
+}
+
+func (c CommandsByName) Less(i, j int) bool {
+ return c[i].Name < c[j].Name
+}
+
+func (c CommandsByName) Swap(i, j int) {
+ c[i], c[j] = c[j], c[i]
+}
+
+// FullName returns the full name of the command.
+// For subcommands this ensures that parent commands are part of the command path
+func (c Command) FullName() string {
+ if c.commandNamePath == nil {
+ return c.Name
+ }
+ return strings.Join(c.commandNamePath, " ")
+}
+
+// Commands is a slice of Command
+type Commands []Command
+
+// Run invokes the command given the context, parses ctx.Args() to generate command-specific flags
+func (c Command) Run(ctx *Context) (err error) {
+ if len(c.Subcommands) > 0 {
+ return c.startApp(ctx)
+ }
+
+ if !c.HideHelp && (HelpFlag != BoolFlag{}) {
+ // append help to flags
+ c.Flags = append(
+ c.Flags,
+ HelpFlag,
+ )
+ }
+
+ set, err := flagSet(c.Name, c.Flags)
+ if err != nil {
+ return err
+ }
+ set.SetOutput(ioutil.Discard)
+
+ if c.SkipFlagParsing {
+ err = set.Parse(append([]string{"--"}, ctx.Args().Tail()...))
+ } else if !c.SkipArgReorder {
+ firstFlagIndex := -1
+ terminatorIndex := -1
+ for index, arg := range ctx.Args() {
+ if arg == "--" {
+ terminatorIndex = index
+ break
+ } else if arg == "-" {
+ // Do nothing. A dash alone is not really a flag.
+ continue
+ } else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 {
+ firstFlagIndex = index
+ }
+ }
+
+ if firstFlagIndex > -1 {
+ args := ctx.Args()
+ regularArgs := make([]string, len(args[1:firstFlagIndex]))
+ copy(regularArgs, args[1:firstFlagIndex])
+
+ var flagArgs []string
+ if terminatorIndex > -1 {
+ flagArgs = args[firstFlagIndex:terminatorIndex]
+ regularArgs = append(regularArgs, args[terminatorIndex:]...)
+ } else {
+ flagArgs = args[firstFlagIndex:]
+ }
+
+ err = set.Parse(append(flagArgs, regularArgs...))
+ } else {
+ err = set.Parse(ctx.Args().Tail())
+ }
+ } else {
+ err = set.Parse(ctx.Args().Tail())
+ }
+
+ nerr := normalizeFlags(c.Flags, set)
+ if nerr != nil {
+ fmt.Fprintln(ctx.App.Writer, nerr)
+ fmt.Fprintln(ctx.App.Writer)
+ ShowCommandHelp(ctx, c.Name)
+ return nerr
+ }
+
+ context := NewContext(ctx.App, set, ctx)
+ context.Command = c
+ if checkCommandCompletions(context, c.Name) {
+ return nil
+ }
+
+ if err != nil {
+ if c.OnUsageError != nil {
+ err := c.OnUsageError(context, err, false)
+ HandleExitCoder(err)
+ return err
+ }
+ fmt.Fprintln(context.App.Writer, "Incorrect Usage:", err.Error())
+ fmt.Fprintln(context.App.Writer)
+ ShowCommandHelp(context, c.Name)
+ return err
+ }
+
+ if checkCommandHelp(context, c.Name) {
+ return nil
+ }
+
+ if c.After != nil {
+ defer func() {
+ afterErr := c.After(context)
+ if afterErr != nil {
+ HandleExitCoder(err)
+ if err != nil {
+ err = NewMultiError(err, afterErr)
+ } else {
+ err = afterErr
+ }
+ }
+ }()
+ }
+
+ if c.Before != nil {
+ err = c.Before(context)
+ if err != nil {
+ ShowCommandHelp(context, c.Name)
+ HandleExitCoder(err)
+ return err
+ }
+ }
+
+ if c.Action == nil {
+ c.Action = helpSubcommand.Action
+ }
+
+ err = HandleAction(c.Action, context)
+
+ if err != nil {
+ HandleExitCoder(err)
+ }
+ return err
+}
+
+// Names returns the names including short names and aliases.
+func (c Command) Names() []string {
+ names := []string{c.Name}
+
+ if c.ShortName != "" {
+ names = append(names, c.ShortName)
+ }
+
+ return append(names, c.Aliases...)
+}
+
+// HasName returns true if Command.Name or Command.ShortName matches given name
+func (c Command) HasName(name string) bool {
+ for _, n := range c.Names() {
+ if n == name {
+ return true
+ }
+ }
+ return false
+}
+
+func (c Command) startApp(ctx *Context) error {
+ app := NewApp()
+ app.Metadata = ctx.App.Metadata
+ // set the name and usage
+ app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name)
+ if c.HelpName == "" {
+ app.HelpName = c.HelpName
+ } else {
+ app.HelpName = app.Name
+ }
+
+ app.Usage = c.Usage
+ app.Description = c.Description
+ app.ArgsUsage = c.ArgsUsage
+
+ // set CommandNotFound
+ app.CommandNotFound = ctx.App.CommandNotFound
+ app.CustomAppHelpTemplate = c.CustomHelpTemplate
+
+ // set the flags and commands
+ app.Commands = c.Subcommands
+ app.Flags = c.Flags
+ app.HideHelp = c.HideHelp
+
+ app.Version = ctx.App.Version
+ app.HideVersion = ctx.App.HideVersion
+ app.Compiled = ctx.App.Compiled
+ app.Author = ctx.App.Author
+ app.Email = ctx.App.Email
+ app.Writer = ctx.App.Writer
+ app.ErrWriter = ctx.App.ErrWriter
+
+ app.categories = CommandCategories{}
+ for _, command := range c.Subcommands {
+ app.categories = app.categories.AddCommand(command.Category, command)
+ }
+
+ sort.Sort(app.categories)
+
+ // bash completion
+ app.EnableBashCompletion = ctx.App.EnableBashCompletion
+ if c.BashComplete != nil {
+ app.BashComplete = c.BashComplete
+ }
+
+ // set the actions
+ app.Before = c.Before
+ app.After = c.After
+ if c.Action != nil {
+ app.Action = c.Action
+ } else {
+ app.Action = helpSubcommand.Action
+ }
+ app.OnUsageError = c.OnUsageError
+
+ for index, cc := range app.Commands {
+ app.Commands[index].commandNamePath = []string{c.Name, cc.Name}
+ }
+
+ return app.RunAsSubcommand(ctx)
+}
+
+// VisibleFlags returns a slice of the Flags with Hidden=false
+func (c Command) VisibleFlags() []Flag {
+ return visibleFlags(c.Flags)
+}
diff --git a/vendor/github.com/urfave/cli/context.go b/vendor/github.com/urfave/cli/context.go
new file mode 100644
index 000000000..db94191e2
--- /dev/null
+++ b/vendor/github.com/urfave/cli/context.go
@@ -0,0 +1,278 @@
+package cli
+
+import (
+ "errors"
+ "flag"
+ "reflect"
+ "strings"
+ "syscall"
+)
+
+// Context is a type that is passed through to
+// each Handler action in a cli application. Context
+// can be used to retrieve context-specific Args and
+// parsed command-line options.
+type Context struct {
+ App *App
+ Command Command
+ shellComplete bool
+ flagSet *flag.FlagSet
+ setFlags map[string]bool
+ parentContext *Context
+}
+
+// NewContext creates a new context. For use in when invoking an App or Command action.
+func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context {
+ c := &Context{App: app, flagSet: set, parentContext: parentCtx}
+
+ if parentCtx != nil {
+ c.shellComplete = parentCtx.shellComplete
+ }
+
+ return c
+}
+
+// NumFlags returns the number of flags set
+func (c *Context) NumFlags() int {
+ return c.flagSet.NFlag()
+}
+
+// Set sets a context flag to a value.
+func (c *Context) Set(name, value string) error {
+ c.setFlags = nil
+ return c.flagSet.Set(name, value)
+}
+
+// GlobalSet sets a context flag to a value on the global flagset
+func (c *Context) GlobalSet(name, value string) error {
+ globalContext(c).setFlags = nil
+ return globalContext(c).flagSet.Set(name, value)
+}
+
+// IsSet determines if the flag was actually set
+func (c *Context) IsSet(name string) bool {
+ if c.setFlags == nil {
+ c.setFlags = make(map[string]bool)
+
+ c.flagSet.Visit(func(f *flag.Flag) {
+ c.setFlags[f.Name] = true
+ })
+
+ c.flagSet.VisitAll(func(f *flag.Flag) {
+ if _, ok := c.setFlags[f.Name]; ok {
+ return
+ }
+ c.setFlags[f.Name] = false
+ })
+
+ // XXX hack to support IsSet for flags with EnvVar
+ //
+ // There isn't an easy way to do this with the current implementation since
+ // whether a flag was set via an environment variable is very difficult to
+ // determine here. Instead, we intend to introduce a backwards incompatible
+ // change in version 2 to add `IsSet` to the Flag interface to push the
+ // responsibility closer to where the information required to determine
+ // whether a flag is set by non-standard means such as environment
+ // variables is avaliable.
+ //
+ // See https://github.com/urfave/cli/issues/294 for additional discussion
+ flags := c.Command.Flags
+ if c.Command.Name == "" { // cannot == Command{} since it contains slice types
+ if c.App != nil {
+ flags = c.App.Flags
+ }
+ }
+ for _, f := range flags {
+ eachName(f.GetName(), func(name string) {
+ if isSet, ok := c.setFlags[name]; isSet || !ok {
+ return
+ }
+
+ val := reflect.ValueOf(f)
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+
+ envVarValue := val.FieldByName("EnvVar")
+ if !envVarValue.IsValid() {
+ return
+ }
+
+ eachName(envVarValue.String(), func(envVar string) {
+ envVar = strings.TrimSpace(envVar)
+ if _, ok := syscall.Getenv(envVar); ok {
+ c.setFlags[name] = true
+ return
+ }
+ })
+ })
+ }
+ }
+
+ return c.setFlags[name]
+}
+
+// GlobalIsSet determines if the global flag was actually set
+func (c *Context) GlobalIsSet(name string) bool {
+ ctx := c
+ if ctx.parentContext != nil {
+ ctx = ctx.parentContext
+ }
+
+ for ; ctx != nil; ctx = ctx.parentContext {
+ if ctx.IsSet(name) {
+ return true
+ }
+ }
+ return false
+}
+
+// FlagNames returns a slice of flag names used in this context.
+func (c *Context) FlagNames() (names []string) {
+ for _, flag := range c.Command.Flags {
+ name := strings.Split(flag.GetName(), ",")[0]
+ if name == "help" {
+ continue
+ }
+ names = append(names, name)
+ }
+ return
+}
+
+// GlobalFlagNames returns a slice of global flag names used by the app.
+func (c *Context) GlobalFlagNames() (names []string) {
+ for _, flag := range c.App.Flags {
+ name := strings.Split(flag.GetName(), ",")[0]
+ if name == "help" || name == "version" {
+ continue
+ }
+ names = append(names, name)
+ }
+ return
+}
+
+// Parent returns the parent context, if any
+func (c *Context) Parent() *Context {
+ return c.parentContext
+}
+
+// value returns the value of the flag coressponding to `name`
+func (c *Context) value(name string) interface{} {
+ return c.flagSet.Lookup(name).Value.(flag.Getter).Get()
+}
+
+// Args contains apps console arguments
+type Args []string
+
+// Args returns the command line arguments associated with the context.
+func (c *Context) Args() Args {
+ args := Args(c.flagSet.Args())
+ return args
+}
+
+// NArg returns the number of the command line arguments.
+func (c *Context) NArg() int {
+ return len(c.Args())
+}
+
+// Get returns the nth argument, or else a blank string
+func (a Args) Get(n int) string {
+ if len(a) > n {
+ return a[n]
+ }
+ return ""
+}
+
+// First returns the first argument, or else a blank string
+func (a Args) First() string {
+ return a.Get(0)
+}
+
+// Tail returns the rest of the arguments (not the first one)
+// or else an empty string slice
+func (a Args) Tail() []string {
+ if len(a) >= 2 {
+ return []string(a)[1:]
+ }
+ return []string{}
+}
+
+// Present checks if there are any arguments present
+func (a Args) Present() bool {
+ return len(a) != 0
+}
+
+// Swap swaps arguments at the given indexes
+func (a Args) Swap(from, to int) error {
+ if from >= len(a) || to >= len(a) {
+ return errors.New("index out of range")
+ }
+ a[from], a[to] = a[to], a[from]
+ return nil
+}
+
+func globalContext(ctx *Context) *Context {
+ if ctx == nil {
+ return nil
+ }
+
+ for {
+ if ctx.parentContext == nil {
+ return ctx
+ }
+ ctx = ctx.parentContext
+ }
+}
+
+func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet {
+ if ctx.parentContext != nil {
+ ctx = ctx.parentContext
+ }
+ for ; ctx != nil; ctx = ctx.parentContext {
+ if f := ctx.flagSet.Lookup(name); f != nil {
+ return ctx.flagSet
+ }
+ }
+ return nil
+}
+
+func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) {
+ switch ff.Value.(type) {
+ case *StringSlice:
+ default:
+ set.Set(name, ff.Value.String())
+ }
+}
+
+func normalizeFlags(flags []Flag, set *flag.FlagSet) error {
+ visited := make(map[string]bool)
+ set.Visit(func(f *flag.Flag) {
+ visited[f.Name] = true
+ })
+ for _, f := range flags {
+ parts := strings.Split(f.GetName(), ",")
+ if len(parts) == 1 {
+ continue
+ }
+ var ff *flag.Flag
+ for _, name := range parts {
+ name = strings.Trim(name, " ")
+ if visited[name] {
+ if ff != nil {
+ return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name)
+ }
+ ff = set.Lookup(name)
+ }
+ }
+ if ff == nil {
+ continue
+ }
+ for _, name := range parts {
+ name = strings.Trim(name, " ")
+ if !visited[name] {
+ copyFlag(name, ff, set)
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/urfave/cli/errors.go b/vendor/github.com/urfave/cli/errors.go
new file mode 100644
index 000000000..562b2953c
--- /dev/null
+++ b/vendor/github.com/urfave/cli/errors.go
@@ -0,0 +1,115 @@
+package cli
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+// OsExiter is the function used when the app exits. If not set defaults to os.Exit.
+var OsExiter = os.Exit
+
+// ErrWriter is used to write errors to the user. This can be anything
+// implementing the io.Writer interface and defaults to os.Stderr.
+var ErrWriter io.Writer = os.Stderr
+
+// MultiError is an error that wraps multiple errors.
+type MultiError struct {
+ Errors []error
+}
+
+// NewMultiError creates a new MultiError. Pass in one or more errors.
+func NewMultiError(err ...error) MultiError {
+ return MultiError{Errors: err}
+}
+
+// Error implements the error interface.
+func (m MultiError) Error() string {
+ errs := make([]string, len(m.Errors))
+ for i, err := range m.Errors {
+ errs[i] = err.Error()
+ }
+
+ return strings.Join(errs, "\n")
+}
+
+type ErrorFormatter interface {
+ Format(s fmt.State, verb rune)
+}
+
+// ExitCoder is the interface checked by `App` and `Command` for a custom exit
+// code
+type ExitCoder interface {
+ error
+ ExitCode() int
+}
+
+// ExitError fulfills both the builtin `error` interface and `ExitCoder`
+type ExitError struct {
+ exitCode int
+ message interface{}
+}
+
+// NewExitError makes a new *ExitError
+func NewExitError(message interface{}, exitCode int) *ExitError {
+ return &ExitError{
+ exitCode: exitCode,
+ message: message,
+ }
+}
+
+// Error returns the string message, fulfilling the interface required by
+// `error`
+func (ee *ExitError) Error() string {
+ return fmt.Sprintf("%v", ee.message)
+}
+
+// ExitCode returns the exit code, fulfilling the interface required by
+// `ExitCoder`
+func (ee *ExitError) ExitCode() int {
+ return ee.exitCode
+}
+
+// HandleExitCoder checks if the error fulfills the ExitCoder interface, and if
+// so prints the error to stderr (if it is non-empty) and calls OsExiter with the
+// given exit code. If the given error is a MultiError, then this func is
+// called on all members of the Errors slice and calls OsExiter with the last exit code.
+func HandleExitCoder(err error) {
+ if err == nil {
+ return
+ }
+
+ if exitErr, ok := err.(ExitCoder); ok {
+ if err.Error() != "" {
+ if _, ok := exitErr.(ErrorFormatter); ok {
+ fmt.Fprintf(ErrWriter, "%+v\n", err)
+ } else {
+ fmt.Fprintln(ErrWriter, err)
+ }
+ }
+ OsExiter(exitErr.ExitCode())
+ return
+ }
+
+ if multiErr, ok := err.(MultiError); ok {
+ code := handleMultiError(multiErr)
+ OsExiter(code)
+ return
+ }
+}
+
+func handleMultiError(multiErr MultiError) int {
+ code := 1
+ for _, merr := range multiErr.Errors {
+ if multiErr2, ok := merr.(MultiError); ok {
+ code = handleMultiError(multiErr2)
+ } else {
+ fmt.Fprintln(ErrWriter, merr)
+ if exitErr, ok := merr.(ExitCoder); ok {
+ code = exitErr.ExitCode()
+ }
+ }
+ }
+ return code
+}
diff --git a/vendor/github.com/urfave/cli/flag.go b/vendor/github.com/urfave/cli/flag.go
new file mode 100644
index 000000000..877ff3523
--- /dev/null
+++ b/vendor/github.com/urfave/cli/flag.go
@@ -0,0 +1,799 @@
+package cli
+
+import (
+ "flag"
+ "fmt"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "syscall"
+ "time"
+)
+
+const defaultPlaceholder = "value"
+
+// BashCompletionFlag enables bash-completion for all commands and subcommands
+var BashCompletionFlag Flag = BoolFlag{
+ Name: "generate-bash-completion",
+ Hidden: true,
+}
+
+// VersionFlag prints the version for the application
+var VersionFlag Flag = BoolFlag{
+ Name: "version, v",
+ Usage: "print the version",
+}
+
+// HelpFlag prints the help for all commands and subcommands
+// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand
+// unless HideHelp is set to true)
+var HelpFlag Flag = BoolFlag{
+ Name: "help, h",
+ Usage: "show help",
+}
+
+// FlagStringer converts a flag definition to a string. This is used by help
+// to display a flag.
+var FlagStringer FlagStringFunc = stringifyFlag
+
+// FlagsByName is a slice of Flag.
+type FlagsByName []Flag
+
+func (f FlagsByName) Len() int {
+ return len(f)
+}
+
+func (f FlagsByName) Less(i, j int) bool {
+ return f[i].GetName() < f[j].GetName()
+}
+
+func (f FlagsByName) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+// Flag is a common interface related to parsing flags in cli.
+// For more advanced flag parsing techniques, it is recommended that
+// this interface be implemented.
+type Flag interface {
+ fmt.Stringer
+ // Apply Flag settings to the given flag set
+ Apply(*flag.FlagSet)
+ GetName() string
+}
+
+// errorableFlag is an interface that allows us to return errors during apply
+// it allows flags defined in this library to return errors in a fashion backwards compatible
+// TODO remove in v2 and modify the existing Flag interface to return errors
+type errorableFlag interface {
+ Flag
+
+ ApplyWithError(*flag.FlagSet) error
+}
+
+func flagSet(name string, flags []Flag) (*flag.FlagSet, error) {
+ set := flag.NewFlagSet(name, flag.ContinueOnError)
+
+ for _, f := range flags {
+ //TODO remove in v2 when errorableFlag is removed
+ if ef, ok := f.(errorableFlag); ok {
+ if err := ef.ApplyWithError(set); err != nil {
+ return nil, err
+ }
+ } else {
+ f.Apply(set)
+ }
+ }
+ return set, nil
+}
+
+func eachName(longName string, fn func(string)) {
+ parts := strings.Split(longName, ",")
+ for _, name := range parts {
+ name = strings.Trim(name, " ")
+ fn(name)
+ }
+}
+
+// Generic is a generic parseable type identified by a specific flag
+type Generic interface {
+ Set(value string) error
+ String() string
+}
+
+// Apply takes the flagset and calls Set on the generic flag with the value
+// provided by the user for parsing by the flag
+// Ignores parsing errors
+func (f GenericFlag) Apply(set *flag.FlagSet) {
+ f.ApplyWithError(set)
+}
+
+// ApplyWithError takes the flagset and calls Set on the generic flag with the value
+// provided by the user for parsing by the flag
+func (f GenericFlag) ApplyWithError(set *flag.FlagSet) error {
+ val := f.Value
+ if f.EnvVar != "" {
+ for _, envVar := range strings.Split(f.EnvVar, ",") {
+ envVar = strings.TrimSpace(envVar)
+ if envVal, ok := syscall.Getenv(envVar); ok {
+ if err := val.Set(envVal); err != nil {
+ return fmt.Errorf("could not parse %s as value for flag %s: %s", envVal, f.Name, err)
+ }
+ break
+ }
+ }
+ }
+
+ eachName(f.Name, func(name string) {
+ set.Var(f.Value, name, f.Usage)
+ })
+
+ return nil
+}
+
+// StringSlice is an opaque type for []string to satisfy flag.Value and flag.Getter
+type StringSlice []string
+
+// Set appends the string value to the list of values
+func (f *StringSlice) Set(value string) error {
+ *f = append(*f, value)
+ return nil
+}
+
+// String returns a readable representation of this value (for usage defaults)
+func (f *StringSlice) String() string {
+ return fmt.Sprintf("%s", *f)
+}
+
+// Value returns the slice of strings set by this flag
+func (f *StringSlice) Value() []string {
+ return *f
+}
+
+// Get returns the slice of strings set by this flag
+func (f *StringSlice) Get() interface{} {
+ return *f
+}
+
+// Apply populates the flag given the flag set and environment
+// Ignores errors
+func (f StringSliceFlag) Apply(set *flag.FlagSet) {
+ f.ApplyWithError(set)
+}
+
+// ApplyWithError populates the flag given the flag set and environment
+func (f StringSliceFlag) ApplyWithError(set *flag.FlagSet) error {
+ if f.EnvVar != "" {
+ for _, envVar := range strings.Split(f.EnvVar, ",") {
+ envVar = strings.TrimSpace(envVar)
+ if envVal, ok := syscall.Getenv(envVar); ok {
+ newVal := &StringSlice{}
+ for _, s := range strings.Split(envVal, ",") {
+ s = strings.TrimSpace(s)
+ if err := newVal.Set(s); err != nil {
+ return fmt.Errorf("could not parse %s as string value for flag %s: %s", envVal, f.Name, err)
+ }
+ }
+ f.Value = newVal
+ break
+ }
+ }
+ }
+
+ eachName(f.Name, func(name string) {
+ if f.Value == nil {
+ f.Value = &StringSlice{}
+ }
+ set.Var(f.Value, name, f.Usage)
+ })
+
+ return nil
+}
+
+// IntSlice is an opaque type for []int to satisfy flag.Value and flag.Getter
+type IntSlice []int
+
+// Set parses the value into an integer and appends it to the list of values
+func (f *IntSlice) Set(value string) error {
+ tmp, err := strconv.Atoi(value)
+ if err != nil {
+ return err
+ }
+ *f = append(*f, tmp)
+ return nil
+}
+
+// String returns a readable representation of this value (for usage defaults)
+func (f *IntSlice) String() string {
+ return fmt.Sprintf("%#v", *f)
+}
+
+// Value returns the slice of ints set by this flag
+func (f *IntSlice) Value() []int {
+ return *f
+}
+
+// Get returns the slice of ints set by this flag
+func (f *IntSlice) Get() interface{} {
+ return *f
+}
+
+// Apply populates the flag given the flag set and environment
+// Ignores errors
+func (f IntSliceFlag) Apply(set *flag.FlagSet) {
+ f.ApplyWithError(set)
+}
+
+// ApplyWithError populates the flag given the flag set and environment
+func (f IntSliceFlag) ApplyWithError(set *flag.FlagSet) error {
+ if f.EnvVar != "" {
+ for _, envVar := range strings.Split(f.EnvVar, ",") {
+ envVar = strings.TrimSpace(envVar)
+ if envVal, ok := syscall.Getenv(envVar); ok {
+ newVal := &IntSlice{}
+ for _, s := range strings.Split(envVal, ",") {
+ s = strings.TrimSpace(s)
+ if err := newVal.Set(s); err != nil {
+ return fmt.Errorf("could not parse %s as int slice value for flag %s: %s", envVal, f.Name, err)
+ }
+ }
+ f.Value = newVal
+ break
+ }
+ }
+ }
+
+ eachName(f.Name, func(name string) {
+ if f.Value == nil {
+ f.Value = &IntSlice{}
+ }
+ set.Var(f.Value, name, f.Usage)
+ })
+
+ return nil
+}
+
+// Int64Slice is an opaque type for []int to satisfy flag.Value and flag.Getter
+type Int64Slice []int64
+
+// Set parses the value into an integer and appends it to the list of values
+func (f *Int64Slice) Set(value string) error {
+ tmp, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return err
+ }
+ *f = append(*f, tmp)
+ return nil
+}
+
+// String returns a readable representation of this value (for usage defaults)
+func (f *Int64Slice) String() string {
+ return fmt.Sprintf("%#v", *f)
+}
+
+// Value returns the slice of ints set by this flag
+func (f *Int64Slice) Value() []int64 {
+ return *f
+}
+
+// Get returns the slice of ints set by this flag
+func (f *Int64Slice) Get() interface{} {
+ return *f
+}
+
+// Apply populates the flag given the flag set and environment
+// Ignores errors
+func (f Int64SliceFlag) Apply(set *flag.FlagSet) {
+ f.ApplyWithError(set)
+}
+
+// ApplyWithError populates the flag given the flag set and environment
+func (f Int64SliceFlag) ApplyWithError(set *flag.FlagSet) error {
+ if f.EnvVar != "" {
+ for _, envVar := range strings.Split(f.EnvVar, ",") {
+ envVar = strings.TrimSpace(envVar)
+ if envVal, ok := syscall.Getenv(envVar); ok {
+ newVal := &Int64Slice{}
+ for _, s := range strings.Split(envVal, ",") {
+ s = strings.TrimSpace(s)
+ if err := newVal.Set(s); err != nil {
+ return fmt.Errorf("could not parse %s as int64 slice value for flag %s: %s", envVal, f.Name, err)
+ }
+ }
+ f.Value = newVal
+ break
+ }
+ }
+ }
+
+ eachName(f.Name, func(name string) {
+ if f.Value == nil {
+ f.Value = &Int64Slice{}
+ }
+ set.Var(f.Value, name, f.Usage)
+ })
+ return nil
+}
+
+// Apply populates the flag given the flag set and environment
+// Ignores errors
+func (f BoolFlag) Apply(set *flag.FlagSet) {
+ f.ApplyWithError(set)
+}
+
+// ApplyWithError populates the flag given the flag set and environment
+func (f BoolFlag) ApplyWithError(set *flag.FlagSet) error {
+ val := false
+ if f.EnvVar != "" {
+ for _, envVar := range strings.Split(f.EnvVar, ",") {
+ envVar = strings.TrimSpace(envVar)
+ if envVal, ok := syscall.Getenv(envVar); ok {
+ if envVal == "" {
+ val = false
+ break
+ }
+
+ envValBool, err := strconv.ParseBool(envVal)
+ if err != nil {
+ return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err)
+ }
+
+ val = envValBool
+ break
+ }
+ }
+ }
+
+ eachName(f.Name, func(name string) {
+ if f.Destination != nil {
+ set.BoolVar(f.Destination, name, val, f.Usage)
+ return
+ }
+ set.Bool(name, val, f.Usage)
+ })
+
+ return nil
+}
+
+// Apply populates the flag given the flag set and environment
+// Ignores errors
+func (f BoolTFlag) Apply(set *flag.FlagSet) {
+ f.ApplyWithError(set)
+}
+
+// ApplyWithError populates the flag given the flag set and environment
+func (f BoolTFlag) ApplyWithError(set *flag.FlagSet) error {
+ val := true
+ if f.EnvVar != "" {
+ for _, envVar := range strings.Split(f.EnvVar, ",") {
+ envVar = strings.TrimSpace(envVar)
+ if envVal, ok := syscall.Getenv(envVar); ok {
+ if envVal == "" {
+ val = false
+ break
+ }
+
+ envValBool, err := strconv.ParseBool(envVal)
+ if err != nil {
+ return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err)
+ }
+
+ val = envValBool
+ break
+ }
+ }
+ }
+
+ eachName(f.Name, func(name string) {
+ if f.Destination != nil {
+ set.BoolVar(f.Destination, name, val, f.Usage)
+ return
+ }
+ set.Bool(name, val, f.Usage)
+ })
+
+ return nil
+}
+
+// Apply populates the flag given the flag set and environment
+// Ignores errors
+func (f StringFlag) Apply(set *flag.FlagSet) {
+ f.ApplyWithError(set)
+}
+
+// ApplyWithError populates the flag given the flag set and environment
+func (f StringFlag) ApplyWithError(set *flag.FlagSet) error {
+ if f.EnvVar != "" {
+ for _, envVar := range strings.Split(f.EnvVar, ",") {
+ envVar = strings.TrimSpace(envVar)
+ if envVal, ok := syscall.Getenv(envVar); ok {
+ f.Value = envVal
+ break
+ }
+ }
+ }
+
+ eachName(f.Name, func(name string) {
+ if f.Destination != nil {
+ set.StringVar(f.Destination, name, f.Value, f.Usage)
+ return
+ }
+ set.String(name, f.Value, f.Usage)
+ })
+
+ return nil
+}
+
+// Apply populates the flag given the flag set and environment
+// Ignores errors
+func (f IntFlag) Apply(set *flag.FlagSet) {
+ f.ApplyWithError(set)
+}
+
+// ApplyWithError populates the flag given the flag set and environment
+func (f IntFlag) ApplyWithError(set *flag.FlagSet) error {
+ if f.EnvVar != "" {
+ for _, envVar := range strings.Split(f.EnvVar, ",") {
+ envVar = strings.TrimSpace(envVar)
+ if envVal, ok := syscall.Getenv(envVar); ok {
+ envValInt, err := strconv.ParseInt(envVal, 0, 64)
+ if err != nil {
+ return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err)
+ }
+ f.Value = int(envValInt)
+ break
+ }
+ }
+ }
+
+ eachName(f.Name, func(name string) {
+ if f.Destination != nil {
+ set.IntVar(f.Destination, name, f.Value, f.Usage)
+ return
+ }
+ set.Int(name, f.Value, f.Usage)
+ })
+
+ return nil
+}
+
+// Apply populates the flag given the flag set and environment
+// Ignores errors
+func (f Int64Flag) Apply(set *flag.FlagSet) {
+ f.ApplyWithError(set)
+}
+
+// ApplyWithError populates the flag given the flag set and environment
+func (f Int64Flag) ApplyWithError(set *flag.FlagSet) error {
+ if f.EnvVar != "" {
+ for _, envVar := range strings.Split(f.EnvVar, ",") {
+ envVar = strings.TrimSpace(envVar)
+ if envVal, ok := syscall.Getenv(envVar); ok {
+ envValInt, err := strconv.ParseInt(envVal, 0, 64)
+ if err != nil {
+ return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err)
+ }
+
+ f.Value = envValInt
+ break
+ }
+ }
+ }
+
+ eachName(f.Name, func(name string) {
+ if f.Destination != nil {
+ set.Int64Var(f.Destination, name, f.Value, f.Usage)
+ return
+ }
+ set.Int64(name, f.Value, f.Usage)
+ })
+
+ return nil
+}
+
+// Apply populates the flag given the flag set and environment
+// Ignores errors
+func (f UintFlag) Apply(set *flag.FlagSet) {
+ f.ApplyWithError(set)
+}
+
+// ApplyWithError populates the flag given the flag set and environment
+func (f UintFlag) ApplyWithError(set *flag.FlagSet) error {
+ if f.EnvVar != "" {
+ for _, envVar := range strings.Split(f.EnvVar, ",") {
+ envVar = strings.TrimSpace(envVar)
+ if envVal, ok := syscall.Getenv(envVar); ok {
+ envValInt, err := strconv.ParseUint(envVal, 0, 64)
+ if err != nil {
+ return fmt.Errorf("could not parse %s as uint value for flag %s: %s", envVal, f.Name, err)
+ }
+
+ f.Value = uint(envValInt)
+ break
+ }
+ }
+ }
+
+ eachName(f.Name, func(name string) {
+ if f.Destination != nil {
+ set.UintVar(f.Destination, name, f.Value, f.Usage)
+ return
+ }
+ set.Uint(name, f.Value, f.Usage)
+ })
+
+ return nil
+}
+
+// Apply populates the flag given the flag set and environment
+// Ignores errors
+func (f Uint64Flag) Apply(set *flag.FlagSet) {
+ f.ApplyWithError(set)
+}
+
+// ApplyWithError populates the flag given the flag set and environment
+func (f Uint64Flag) ApplyWithError(set *flag.FlagSet) error {
+ if f.EnvVar != "" {
+ for _, envVar := range strings.Split(f.EnvVar, ",") {
+ envVar = strings.TrimSpace(envVar)
+ if envVal, ok := syscall.Getenv(envVar); ok {
+ envValInt, err := strconv.ParseUint(envVal, 0, 64)
+ if err != nil {
+ return fmt.Errorf("could not parse %s as uint64 value for flag %s: %s", envVal, f.Name, err)
+ }
+
+ f.Value = uint64(envValInt)
+ break
+ }
+ }
+ }
+
+ eachName(f.Name, func(name string) {
+ if f.Destination != nil {
+ set.Uint64Var(f.Destination, name, f.Value, f.Usage)
+ return
+ }
+ set.Uint64(name, f.Value, f.Usage)
+ })
+
+ return nil
+}
+
+// Apply populates the flag given the flag set and environment
+// Ignores errors
+func (f DurationFlag) Apply(set *flag.FlagSet) {
+ f.ApplyWithError(set)
+}
+
+// ApplyWithError populates the flag given the flag set and environment
+func (f DurationFlag) ApplyWithError(set *flag.FlagSet) error {
+ if f.EnvVar != "" {
+ for _, envVar := range strings.Split(f.EnvVar, ",") {
+ envVar = strings.TrimSpace(envVar)
+ if envVal, ok := syscall.Getenv(envVar); ok {
+ envValDuration, err := time.ParseDuration(envVal)
+ if err != nil {
+ return fmt.Errorf("could not parse %s as duration for flag %s: %s", envVal, f.Name, err)
+ }
+
+ f.Value = envValDuration
+ break
+ }
+ }
+ }
+
+ eachName(f.Name, func(name string) {
+ if f.Destination != nil {
+ set.DurationVar(f.Destination, name, f.Value, f.Usage)
+ return
+ }
+ set.Duration(name, f.Value, f.Usage)
+ })
+
+ return nil
+}
+
+// Apply populates the flag given the flag set and environment
+// Ignores errors
+func (f Float64Flag) Apply(set *flag.FlagSet) {
+ f.ApplyWithError(set)
+}
+
+// ApplyWithError populates the flag given the flag set and environment
+func (f Float64Flag) ApplyWithError(set *flag.FlagSet) error {
+ if f.EnvVar != "" {
+ for _, envVar := range strings.Split(f.EnvVar, ",") {
+ envVar = strings.TrimSpace(envVar)
+ if envVal, ok := syscall.Getenv(envVar); ok {
+ envValFloat, err := strconv.ParseFloat(envVal, 10)
+ if err != nil {
+ return fmt.Errorf("could not parse %s as float64 value for flag %s: %s", envVal, f.Name, err)
+ }
+
+ f.Value = float64(envValFloat)
+ break
+ }
+ }
+ }
+
+ eachName(f.Name, func(name string) {
+ if f.Destination != nil {
+ set.Float64Var(f.Destination, name, f.Value, f.Usage)
+ return
+ }
+ set.Float64(name, f.Value, f.Usage)
+ })
+
+ return nil
+}
+
+func visibleFlags(fl []Flag) []Flag {
+ visible := []Flag{}
+ for _, flag := range fl {
+ field := flagValue(flag).FieldByName("Hidden")
+ if !field.IsValid() || !field.Bool() {
+ visible = append(visible, flag)
+ }
+ }
+ return visible
+}
+
+func prefixFor(name string) (prefix string) {
+ if len(name) == 1 {
+ prefix = "-"
+ } else {
+ prefix = "--"
+ }
+
+ return
+}
+
+// Returns the placeholder, if any, and the unquoted usage string.
+func unquoteUsage(usage string) (string, string) {
+ for i := 0; i < len(usage); i++ {
+ if usage[i] == '`' {
+ for j := i + 1; j < len(usage); j++ {
+ if usage[j] == '`' {
+ name := usage[i+1 : j]
+ usage = usage[:i] + name + usage[j+1:]
+ return name, usage
+ }
+ }
+ break
+ }
+ }
+ return "", usage
+}
+
+func prefixedNames(fullName, placeholder string) string {
+ var prefixed string
+ parts := strings.Split(fullName, ",")
+ for i, name := range parts {
+ name = strings.Trim(name, " ")
+ prefixed += prefixFor(name) + name
+ if placeholder != "" {
+ prefixed += " " + placeholder
+ }
+ if i < len(parts)-1 {
+ prefixed += ", "
+ }
+ }
+ return prefixed
+}
+
+func withEnvHint(envVar, str string) string {
+ envText := ""
+ if envVar != "" {
+ prefix := "$"
+ suffix := ""
+ sep := ", $"
+ if runtime.GOOS == "windows" {
+ prefix = "%"
+ suffix = "%"
+ sep = "%, %"
+ }
+ envText = fmt.Sprintf(" [%s%s%s]", prefix, strings.Join(strings.Split(envVar, ","), sep), suffix)
+ }
+ return str + envText
+}
+
+func flagValue(f Flag) reflect.Value {
+ fv := reflect.ValueOf(f)
+ for fv.Kind() == reflect.Ptr {
+ fv = reflect.Indirect(fv)
+ }
+ return fv
+}
+
+func stringifyFlag(f Flag) string {
+ fv := flagValue(f)
+
+ switch f.(type) {
+ case IntSliceFlag:
+ return withEnvHint(fv.FieldByName("EnvVar").String(),
+ stringifyIntSliceFlag(f.(IntSliceFlag)))
+ case Int64SliceFlag:
+ return withEnvHint(fv.FieldByName("EnvVar").String(),
+ stringifyInt64SliceFlag(f.(Int64SliceFlag)))
+ case StringSliceFlag:
+ return withEnvHint(fv.FieldByName("EnvVar").String(),
+ stringifyStringSliceFlag(f.(StringSliceFlag)))
+ }
+
+ placeholder, usage := unquoteUsage(fv.FieldByName("Usage").String())
+
+ needsPlaceholder := false
+ defaultValueString := ""
+
+ if val := fv.FieldByName("Value"); val.IsValid() {
+ needsPlaceholder = true
+ defaultValueString = fmt.Sprintf(" (default: %v)", val.Interface())
+
+ if val.Kind() == reflect.String && val.String() != "" {
+ defaultValueString = fmt.Sprintf(" (default: %q)", val.String())
+ }
+ }
+
+ if defaultValueString == " (default: )" {
+ defaultValueString = ""
+ }
+
+ if needsPlaceholder && placeholder == "" {
+ placeholder = defaultPlaceholder
+ }
+
+ usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultValueString))
+
+ return withEnvHint(fv.FieldByName("EnvVar").String(),
+ fmt.Sprintf("%s\t%s", prefixedNames(fv.FieldByName("Name").String(), placeholder), usageWithDefault))
+}
+
+func stringifyIntSliceFlag(f IntSliceFlag) string {
+ defaultVals := []string{}
+ if f.Value != nil && len(f.Value.Value()) > 0 {
+ for _, i := range f.Value.Value() {
+ defaultVals = append(defaultVals, fmt.Sprintf("%d", i))
+ }
+ }
+
+ return stringifySliceFlag(f.Usage, f.Name, defaultVals)
+}
+
+func stringifyInt64SliceFlag(f Int64SliceFlag) string {
+ defaultVals := []string{}
+ if f.Value != nil && len(f.Value.Value()) > 0 {
+ for _, i := range f.Value.Value() {
+ defaultVals = append(defaultVals, fmt.Sprintf("%d", i))
+ }
+ }
+
+ return stringifySliceFlag(f.Usage, f.Name, defaultVals)
+}
+
+func stringifyStringSliceFlag(f StringSliceFlag) string {
+ defaultVals := []string{}
+ if f.Value != nil && len(f.Value.Value()) > 0 {
+ for _, s := range f.Value.Value() {
+ if len(s) > 0 {
+ defaultVals = append(defaultVals, fmt.Sprintf("%q", s))
+ }
+ }
+ }
+
+ return stringifySliceFlag(f.Usage, f.Name, defaultVals)
+}
+
+func stringifySliceFlag(usage, name string, defaultVals []string) string {
+ placeholder, usage := unquoteUsage(usage)
+ if placeholder == "" {
+ placeholder = defaultPlaceholder
+ }
+
+ defaultVal := ""
+ if len(defaultVals) > 0 {
+ defaultVal = fmt.Sprintf(" (default: %s)", strings.Join(defaultVals, ", "))
+ }
+
+ usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultVal))
+ return fmt.Sprintf("%s\t%s", prefixedNames(name, placeholder), usageWithDefault)
+}
diff --git a/vendor/github.com/urfave/cli/flag_generated.go b/vendor/github.com/urfave/cli/flag_generated.go
new file mode 100644
index 000000000..491b61956
--- /dev/null
+++ b/vendor/github.com/urfave/cli/flag_generated.go
@@ -0,0 +1,627 @@
+package cli
+
+import (
+ "flag"
+ "strconv"
+ "time"
+)
+
+// WARNING: This file is generated!
+
+// BoolFlag is a flag with type bool
+type BoolFlag struct {
+ Name string
+ Usage string
+ EnvVar string
+ Hidden bool
+ Destination *bool
+}
+
+// String returns a readable representation of this value
+// (for usage defaults)
+func (f BoolFlag) String() string {
+ return FlagStringer(f)
+}
+
+// GetName returns the name of the flag
+func (f BoolFlag) GetName() string {
+ return f.Name
+}
+
+// Bool looks up the value of a local BoolFlag, returns
+// false if not found
+func (c *Context) Bool(name string) bool {
+ return lookupBool(name, c.flagSet)
+}
+
+// GlobalBool looks up the value of a global BoolFlag, returns
+// false if not found
+func (c *Context) GlobalBool(name string) bool {
+ if fs := lookupGlobalFlagSet(name, c); fs != nil {
+ return lookupBool(name, fs)
+ }
+ return false
+}
+
+func lookupBool(name string, set *flag.FlagSet) bool {
+ f := set.Lookup(name)
+ if f != nil {
+ parsed, err := strconv.ParseBool(f.Value.String())
+ if err != nil {
+ return false
+ }
+ return parsed
+ }
+ return false
+}
+
+// BoolTFlag is a flag with type bool that is true by default
+type BoolTFlag struct {
+ Name string
+ Usage string
+ EnvVar string
+ Hidden bool
+ Destination *bool
+}
+
+// String returns a readable representation of this value
+// (for usage defaults)
+func (f BoolTFlag) String() string {
+ return FlagStringer(f)
+}
+
+// GetName returns the name of the flag
+func (f BoolTFlag) GetName() string {
+ return f.Name
+}
+
+// BoolT looks up the value of a local BoolTFlag, returns
+// false if not found
+func (c *Context) BoolT(name string) bool {
+ return lookupBoolT(name, c.flagSet)
+}
+
+// GlobalBoolT looks up the value of a global BoolTFlag, returns
+// false if not found
+func (c *Context) GlobalBoolT(name string) bool {
+ if fs := lookupGlobalFlagSet(name, c); fs != nil {
+ return lookupBoolT(name, fs)
+ }
+ return false
+}
+
+func lookupBoolT(name string, set *flag.FlagSet) bool {
+ f := set.Lookup(name)
+ if f != nil {
+ parsed, err := strconv.ParseBool(f.Value.String())
+ if err != nil {
+ return false
+ }
+ return parsed
+ }
+ return false
+}
+
+// DurationFlag is a flag with type time.Duration (see https://golang.org/pkg/time/#ParseDuration)
+type DurationFlag struct {
+ Name string
+ Usage string
+ EnvVar string
+ Hidden bool
+ Value time.Duration
+ Destination *time.Duration
+}
+
+// String returns a readable representation of this value
+// (for usage defaults)
+func (f DurationFlag) String() string {
+ return FlagStringer(f)
+}
+
+// GetName returns the name of the flag
+func (f DurationFlag) GetName() string {
+ return f.Name
+}
+
+// Duration looks up the value of a local DurationFlag, returns
+// 0 if not found
+func (c *Context) Duration(name string) time.Duration {
+ return lookupDuration(name, c.flagSet)
+}
+
+// GlobalDuration looks up the value of a global DurationFlag, returns
+// 0 if not found
+func (c *Context) GlobalDuration(name string) time.Duration {
+ if fs := lookupGlobalFlagSet(name, c); fs != nil {
+ return lookupDuration(name, fs)
+ }
+ return 0
+}
+
+func lookupDuration(name string, set *flag.FlagSet) time.Duration {
+ f := set.Lookup(name)
+ if f != nil {
+ parsed, err := time.ParseDuration(f.Value.String())
+ if err != nil {
+ return 0
+ }
+ return parsed
+ }
+ return 0
+}
+
+// Float64Flag is a flag with type float64
+type Float64Flag struct {
+ Name string
+ Usage string
+ EnvVar string
+ Hidden bool
+ Value float64
+ Destination *float64
+}
+
+// String returns a readable representation of this value
+// (for usage defaults)
+func (f Float64Flag) String() string {
+ return FlagStringer(f)
+}
+
+// GetName returns the name of the flag
+func (f Float64Flag) GetName() string {
+ return f.Name
+}
+
+// Float64 looks up the value of a local Float64Flag, returns
+// 0 if not found
+func (c *Context) Float64(name string) float64 {
+ return lookupFloat64(name, c.flagSet)
+}
+
+// GlobalFloat64 looks up the value of a global Float64Flag, returns
+// 0 if not found
+func (c *Context) GlobalFloat64(name string) float64 {
+ if fs := lookupGlobalFlagSet(name, c); fs != nil {
+ return lookupFloat64(name, fs)
+ }
+ return 0
+}
+
+func lookupFloat64(name string, set *flag.FlagSet) float64 {
+ f := set.Lookup(name)
+ if f != nil {
+ parsed, err := strconv.ParseFloat(f.Value.String(), 64)
+ if err != nil {
+ return 0
+ }
+ return parsed
+ }
+ return 0
+}
+
+// GenericFlag is a flag with type Generic
+type GenericFlag struct {
+ Name string
+ Usage string
+ EnvVar string
+ Hidden bool
+ Value Generic
+}
+
+// String returns a readable representation of this value
+// (for usage defaults)
+func (f GenericFlag) String() string {
+ return FlagStringer(f)
+}
+
+// GetName returns the name of the flag
+func (f GenericFlag) GetName() string {
+ return f.Name
+}
+
+// Generic looks up the value of a local GenericFlag, returns
+// nil if not found
+func (c *Context) Generic(name string) interface{} {
+ return lookupGeneric(name, c.flagSet)
+}
+
+// GlobalGeneric looks up the value of a global GenericFlag, returns
+// nil if not found
+func (c *Context) GlobalGeneric(name string) interface{} {
+ if fs := lookupGlobalFlagSet(name, c); fs != nil {
+ return lookupGeneric(name, fs)
+ }
+ return nil
+}
+
+func lookupGeneric(name string, set *flag.FlagSet) interface{} {
+ f := set.Lookup(name)
+ if f != nil {
+ parsed, err := f.Value, error(nil)
+ if err != nil {
+ return nil
+ }
+ return parsed
+ }
+ return nil
+}
+
+// Int64Flag is a flag with type int64
+type Int64Flag struct {
+ Name string
+ Usage string
+ EnvVar string
+ Hidden bool
+ Value int64
+ Destination *int64
+}
+
+// String returns a readable representation of this value
+// (for usage defaults)
+func (f Int64Flag) String() string {
+ return FlagStringer(f)
+}
+
+// GetName returns the name of the flag
+func (f Int64Flag) GetName() string {
+ return f.Name
+}
+
+// Int64 looks up the value of a local Int64Flag, returns
+// 0 if not found
+func (c *Context) Int64(name string) int64 {
+ return lookupInt64(name, c.flagSet)
+}
+
+// GlobalInt64 looks up the value of a global Int64Flag, returns
+// 0 if not found
+func (c *Context) GlobalInt64(name string) int64 {
+ if fs := lookupGlobalFlagSet(name, c); fs != nil {
+ return lookupInt64(name, fs)
+ }
+ return 0
+}
+
+func lookupInt64(name string, set *flag.FlagSet) int64 {
+ f := set.Lookup(name)
+ if f != nil {
+ parsed, err := strconv.ParseInt(f.Value.String(), 0, 64)
+ if err != nil {
+ return 0
+ }
+ return parsed
+ }
+ return 0
+}
+
+// IntFlag is a flag with type int
+type IntFlag struct {
+ Name string
+ Usage string
+ EnvVar string
+ Hidden bool
+ Value int
+ Destination *int
+}
+
+// String returns a readable representation of this value
+// (for usage defaults)
+func (f IntFlag) String() string {
+ return FlagStringer(f)
+}
+
+// GetName returns the name of the flag
+func (f IntFlag) GetName() string {
+ return f.Name
+}
+
+// Int looks up the value of a local IntFlag, returns
+// 0 if not found
+func (c *Context) Int(name string) int {
+ return lookupInt(name, c.flagSet)
+}
+
+// GlobalInt looks up the value of a global IntFlag, returns
+// 0 if not found
+func (c *Context) GlobalInt(name string) int {
+ if fs := lookupGlobalFlagSet(name, c); fs != nil {
+ return lookupInt(name, fs)
+ }
+ return 0
+}
+
+func lookupInt(name string, set *flag.FlagSet) int {
+ f := set.Lookup(name)
+ if f != nil {
+ parsed, err := strconv.ParseInt(f.Value.String(), 0, 64)
+ if err != nil {
+ return 0
+ }
+ return int(parsed)
+ }
+ return 0
+}
+
+// IntSliceFlag is a flag with type *IntSlice
+type IntSliceFlag struct {
+ Name string
+ Usage string
+ EnvVar string
+ Hidden bool
+ Value *IntSlice
+}
+
+// String returns a readable representation of this value
+// (for usage defaults)
+func (f IntSliceFlag) String() string {
+ return FlagStringer(f)
+}
+
+// GetName returns the name of the flag
+func (f IntSliceFlag) GetName() string {
+ return f.Name
+}
+
+// IntSlice looks up the value of a local IntSliceFlag, returns
+// nil if not found
+func (c *Context) IntSlice(name string) []int {
+ return lookupIntSlice(name, c.flagSet)
+}
+
+// GlobalIntSlice looks up the value of a global IntSliceFlag, returns
+// nil if not found
+func (c *Context) GlobalIntSlice(name string) []int {
+ if fs := lookupGlobalFlagSet(name, c); fs != nil {
+ return lookupIntSlice(name, fs)
+ }
+ return nil
+}
+
+func lookupIntSlice(name string, set *flag.FlagSet) []int {
+ f := set.Lookup(name)
+ if f != nil {
+ parsed, err := (f.Value.(*IntSlice)).Value(), error(nil)
+ if err != nil {
+ return nil
+ }
+ return parsed
+ }
+ return nil
+}
+
+// Int64SliceFlag is a flag with type *Int64Slice
+type Int64SliceFlag struct {
+ Name string
+ Usage string
+ EnvVar string
+ Hidden bool
+ Value *Int64Slice
+}
+
+// String returns a readable representation of this value
+// (for usage defaults)
+func (f Int64SliceFlag) String() string {
+ return FlagStringer(f)
+}
+
+// GetName returns the name of the flag
+func (f Int64SliceFlag) GetName() string {
+ return f.Name
+}
+
+// Int64Slice looks up the value of a local Int64SliceFlag, returns
+// nil if not found
+func (c *Context) Int64Slice(name string) []int64 {
+ return lookupInt64Slice(name, c.flagSet)
+}
+
+// GlobalInt64Slice looks up the value of a global Int64SliceFlag, returns
+// nil if not found
+func (c *Context) GlobalInt64Slice(name string) []int64 {
+ if fs := lookupGlobalFlagSet(name, c); fs != nil {
+ return lookupInt64Slice(name, fs)
+ }
+ return nil
+}
+
+func lookupInt64Slice(name string, set *flag.FlagSet) []int64 {
+ f := set.Lookup(name)
+ if f != nil {
+ parsed, err := (f.Value.(*Int64Slice)).Value(), error(nil)
+ if err != nil {
+ return nil
+ }
+ return parsed
+ }
+ return nil
+}
+
+// StringFlag is a flag with type string
+type StringFlag struct {
+ Name string
+ Usage string
+ EnvVar string
+ Hidden bool
+ Value string
+ Destination *string
+}
+
+// String returns a readable representation of this value
+// (for usage defaults)
+func (f StringFlag) String() string {
+ return FlagStringer(f)
+}
+
+// GetName returns the name of the flag
+func (f StringFlag) GetName() string {
+ return f.Name
+}
+
+// String looks up the value of a local StringFlag, returns
+// "" if not found
+func (c *Context) String(name string) string {
+ return lookupString(name, c.flagSet)
+}
+
+// GlobalString looks up the value of a global StringFlag, returns
+// "" if not found
+func (c *Context) GlobalString(name string) string {
+ if fs := lookupGlobalFlagSet(name, c); fs != nil {
+ return lookupString(name, fs)
+ }
+ return ""
+}
+
+func lookupString(name string, set *flag.FlagSet) string {
+ f := set.Lookup(name)
+ if f != nil {
+ parsed, err := f.Value.String(), error(nil)
+ if err != nil {
+ return ""
+ }
+ return parsed
+ }
+ return ""
+}
+
+// StringSliceFlag is a flag with type *StringSlice
+type StringSliceFlag struct {
+ Name string
+ Usage string
+ EnvVar string
+ Hidden bool
+ Value *StringSlice
+}
+
+// String returns a readable representation of this value
+// (for usage defaults)
+func (f StringSliceFlag) String() string {
+ return FlagStringer(f)
+}
+
+// GetName returns the name of the flag
+func (f StringSliceFlag) GetName() string {
+ return f.Name
+}
+
+// StringSlice looks up the value of a local StringSliceFlag, returns
+// nil if not found
+func (c *Context) StringSlice(name string) []string {
+ return lookupStringSlice(name, c.flagSet)
+}
+
+// GlobalStringSlice looks up the value of a global StringSliceFlag, returns
+// nil if not found
+func (c *Context) GlobalStringSlice(name string) []string {
+ if fs := lookupGlobalFlagSet(name, c); fs != nil {
+ return lookupStringSlice(name, fs)
+ }
+ return nil
+}
+
+func lookupStringSlice(name string, set *flag.FlagSet) []string {
+ f := set.Lookup(name)
+ if f != nil {
+ parsed, err := (f.Value.(*StringSlice)).Value(), error(nil)
+ if err != nil {
+ return nil
+ }
+ return parsed
+ }
+ return nil
+}
+
+// Uint64Flag is a flag with type uint64
+type Uint64Flag struct {
+ Name string
+ Usage string
+ EnvVar string
+ Hidden bool
+ Value uint64
+ Destination *uint64
+}
+
+// String returns a readable representation of this value
+// (for usage defaults)
+func (f Uint64Flag) String() string {
+ return FlagStringer(f)
+}
+
+// GetName returns the name of the flag
+func (f Uint64Flag) GetName() string {
+ return f.Name
+}
+
+// Uint64 looks up the value of a local Uint64Flag, returns
+// 0 if not found
+func (c *Context) Uint64(name string) uint64 {
+ return lookupUint64(name, c.flagSet)
+}
+
+// GlobalUint64 looks up the value of a global Uint64Flag, returns
+// 0 if not found
+func (c *Context) GlobalUint64(name string) uint64 {
+ if fs := lookupGlobalFlagSet(name, c); fs != nil {
+ return lookupUint64(name, fs)
+ }
+ return 0
+}
+
+func lookupUint64(name string, set *flag.FlagSet) uint64 {
+ f := set.Lookup(name)
+ if f != nil {
+ parsed, err := strconv.ParseUint(f.Value.String(), 0, 64)
+ if err != nil {
+ return 0
+ }
+ return parsed
+ }
+ return 0
+}
+
+// UintFlag is a flag with type uint
+type UintFlag struct {
+ Name string
+ Usage string
+ EnvVar string
+ Hidden bool
+ Value uint
+ Destination *uint
+}
+
+// String returns a readable representation of this value
+// (for usage defaults)
+func (f UintFlag) String() string {
+ return FlagStringer(f)
+}
+
+// GetName returns the name of the flag
+func (f UintFlag) GetName() string {
+ return f.Name
+}
+
+// Uint looks up the value of a local UintFlag, returns
+// 0 if not found
+func (c *Context) Uint(name string) uint {
+ return lookupUint(name, c.flagSet)
+}
+
+// GlobalUint looks up the value of a global UintFlag, returns
+// 0 if not found
+func (c *Context) GlobalUint(name string) uint {
+ if fs := lookupGlobalFlagSet(name, c); fs != nil {
+ return lookupUint(name, fs)
+ }
+ return 0
+}
+
+func lookupUint(name string, set *flag.FlagSet) uint {
+ f := set.Lookup(name)
+ if f != nil {
+ parsed, err := strconv.ParseUint(f.Value.String(), 0, 64)
+ if err != nil {
+ return 0
+ }
+ return uint(parsed)
+ }
+ return 0
+}
diff --git a/vendor/github.com/urfave/cli/funcs.go b/vendor/github.com/urfave/cli/funcs.go
new file mode 100644
index 000000000..cba5e6cb0
--- /dev/null
+++ b/vendor/github.com/urfave/cli/funcs.go
@@ -0,0 +1,28 @@
+package cli
+
+// BashCompleteFunc is an action to execute when the bash-completion flag is set
+type BashCompleteFunc func(*Context)
+
+// BeforeFunc is an action to execute before any subcommands are run, but after
+// the context is ready if a non-nil error is returned, no subcommands are run
+type BeforeFunc func(*Context) error
+
+// AfterFunc is an action to execute after any subcommands are run, but after the
+// subcommand has finished it is run even if Action() panics
+type AfterFunc func(*Context) error
+
+// ActionFunc is the action to execute when no subcommands are specified
+type ActionFunc func(*Context) error
+
+// CommandNotFoundFunc is executed if the proper command cannot be found
+type CommandNotFoundFunc func(*Context, string)
+
+// OnUsageErrorFunc is executed if an usage error occurs. This is useful for displaying
+// customized usage error messages. This function is able to replace the
+// original error messages. If this function is not set, the "Incorrect usage"
+// is displayed and the execution is interrupted.
+type OnUsageErrorFunc func(context *Context, err error, isSubcommand bool) error
+
+// FlagStringFunc is used by the help generation to display a flag, which is
+// expected to be a single line.
+type FlagStringFunc func(Flag) string
diff --git a/vendor/github.com/urfave/cli/help.go b/vendor/github.com/urfave/cli/help.go
new file mode 100644
index 000000000..57ec98d58
--- /dev/null
+++ b/vendor/github.com/urfave/cli/help.go
@@ -0,0 +1,338 @@
+package cli
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "text/tabwriter"
+ "text/template"
+)
+
+// AppHelpTemplate is the text template for the Default help topic.
+// cli.go uses text/template to render templates. You can
+// render custom help text by setting this variable.
+var AppHelpTemplate = `NAME:
+ {{.Name}}{{if .Usage}} - {{.Usage}}{{end}}
+
+USAGE:
+ {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
+
+VERSION:
+ {{.Version}}{{end}}{{end}}{{if .Description}}
+
+DESCRIPTION:
+ {{.Description}}{{end}}{{if len .Authors}}
+
+AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}:
+ {{range $index, $author := .Authors}}{{if $index}}
+ {{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}}
+
+COMMANDS:{{range .VisibleCategories}}{{if .Name}}
+ {{.Name}}:{{end}}{{range .VisibleCommands}}
+ {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{if .VisibleFlags}}
+
+GLOBAL OPTIONS:
+ {{range $index, $option := .VisibleFlags}}{{if $index}}
+ {{end}}{{$option}}{{end}}{{end}}{{if .Copyright}}
+
+COPYRIGHT:
+ {{.Copyright}}{{end}}
+`
+
+// CommandHelpTemplate is the text template for the command help topic.
+// cli.go uses text/template to render templates. You can
+// render custom help text by setting this variable.
+var CommandHelpTemplate = `NAME:
+ {{.HelpName}} - {{.Usage}}
+
+USAGE:
+ {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}}
+
+CATEGORY:
+ {{.Category}}{{end}}{{if .Description}}
+
+DESCRIPTION:
+ {{.Description}}{{end}}{{if .VisibleFlags}}
+
+OPTIONS:
+ {{range .VisibleFlags}}{{.}}
+ {{end}}{{end}}
+`
+
+// SubcommandHelpTemplate is the text template for the subcommand help topic.
+// cli.go uses text/template to render templates. You can
+// render custom help text by setting this variable.
+var SubcommandHelpTemplate = `NAME:
+ {{.HelpName}} - {{if .Description}}{{.Description}}{{else}}{{.Usage}}{{end}}
+
+USAGE:
+ {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}
+
+COMMANDS:{{range .VisibleCategories}}{{if .Name}}
+ {{.Name}}:{{end}}{{range .VisibleCommands}}
+ {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}
+{{end}}{{if .VisibleFlags}}
+OPTIONS:
+ {{range .VisibleFlags}}{{.}}
+ {{end}}{{end}}
+`
+
+var helpCommand = Command{
+ Name: "help",
+ Aliases: []string{"h"},
+ Usage: "Shows a list of commands or help for one command",
+ ArgsUsage: "[command]",
+ Action: func(c *Context) error {
+ args := c.Args()
+ if args.Present() {
+ return ShowCommandHelp(c, args.First())
+ }
+
+ ShowAppHelp(c)
+ return nil
+ },
+}
+
+var helpSubcommand = Command{
+ Name: "help",
+ Aliases: []string{"h"},
+ Usage: "Shows a list of commands or help for one command",
+ ArgsUsage: "[command]",
+ Action: func(c *Context) error {
+ args := c.Args()
+ if args.Present() {
+ return ShowCommandHelp(c, args.First())
+ }
+
+ return ShowSubcommandHelp(c)
+ },
+}
+
+// Prints help for the App or Command
+type helpPrinter func(w io.Writer, templ string, data interface{})
+
+// Prints help for the App or Command with custom template function.
+type helpPrinterCustom func(w io.Writer, templ string, data interface{}, customFunc map[string]interface{})
+
+// HelpPrinter is a function that writes the help output. If not set a default
+// is used. The function signature is:
+// func(w io.Writer, templ string, data interface{})
+var HelpPrinter helpPrinter = printHelp
+
+// HelpPrinterCustom is same as HelpPrinter but
+// takes a custom function for template function map.
+var HelpPrinterCustom helpPrinterCustom = printHelpCustom
+
+// VersionPrinter prints the version for the App
+var VersionPrinter = printVersion
+
+// ShowAppHelpAndExit - Prints the list of subcommands for the app and exits with exit code.
+func ShowAppHelpAndExit(c *Context, exitCode int) {
+ ShowAppHelp(c)
+ os.Exit(exitCode)
+}
+
+// ShowAppHelp is an action that displays the help.
+func ShowAppHelp(c *Context) (err error) {
+ if c.App.CustomAppHelpTemplate == "" {
+ HelpPrinter(c.App.Writer, AppHelpTemplate, c.App)
+ return
+ }
+ customAppData := func() map[string]interface{} {
+ if c.App.ExtraInfo == nil {
+ return nil
+ }
+ return map[string]interface{}{
+ "ExtraInfo": c.App.ExtraInfo,
+ }
+ }
+ HelpPrinterCustom(c.App.Writer, c.App.CustomAppHelpTemplate, c.App, customAppData())
+ return nil
+}
+
+// DefaultAppComplete prints the list of subcommands as the default app completion method
+func DefaultAppComplete(c *Context) {
+ for _, command := range c.App.Commands {
+ if command.Hidden {
+ continue
+ }
+ for _, name := range command.Names() {
+ fmt.Fprintln(c.App.Writer, name)
+ }
+ }
+}
+
+// ShowCommandHelpAndExit - exits with code after showing help
+func ShowCommandHelpAndExit(c *Context, command string, code int) {
+ ShowCommandHelp(c, command)
+ os.Exit(code)
+}
+
+// ShowCommandHelp prints help for the given command
+func ShowCommandHelp(ctx *Context, command string) error {
+ // show the subcommand help for a command with subcommands
+ if command == "" {
+ HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App)
+ return nil
+ }
+
+ for _, c := range ctx.App.Commands {
+ if c.HasName(command) {
+ if c.CustomHelpTemplate != "" {
+ HelpPrinterCustom(ctx.App.Writer, c.CustomHelpTemplate, c, nil)
+ } else {
+ HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c)
+ }
+ return nil
+ }
+ }
+
+ if ctx.App.CommandNotFound == nil {
+ return NewExitError(fmt.Sprintf("No help topic for '%v'", command), 3)
+ }
+
+ ctx.App.CommandNotFound(ctx, command)
+ return nil
+}
+
+// ShowSubcommandHelp prints help for the given subcommand
+func ShowSubcommandHelp(c *Context) error {
+ return ShowCommandHelp(c, c.Command.Name)
+}
+
+// ShowVersion prints the version number of the App
+func ShowVersion(c *Context) {
+ VersionPrinter(c)
+}
+
+func printVersion(c *Context) {
+ fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version)
+}
+
+// ShowCompletions prints the lists of commands within a given context
+func ShowCompletions(c *Context) {
+ a := c.App
+ if a != nil && a.BashComplete != nil {
+ a.BashComplete(c)
+ }
+}
+
+// ShowCommandCompletions prints the custom completions for a given command
+func ShowCommandCompletions(ctx *Context, command string) {
+ c := ctx.App.Command(command)
+ if c != nil && c.BashComplete != nil {
+ c.BashComplete(ctx)
+ }
+}
+
+func printHelpCustom(out io.Writer, templ string, data interface{}, customFunc map[string]interface{}) {
+ funcMap := template.FuncMap{
+ "join": strings.Join,
+ }
+ if customFunc != nil {
+ for key, value := range customFunc {
+ funcMap[key] = value
+ }
+ }
+
+ w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0)
+ t := template.Must(template.New("help").Funcs(funcMap).Parse(templ))
+ err := t.Execute(w, data)
+ if err != nil {
+ // If the writer is closed, t.Execute will fail, and there's nothing
+ // we can do to recover.
+ if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" {
+ fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err)
+ }
+ return
+ }
+ w.Flush()
+}
+
+func printHelp(out io.Writer, templ string, data interface{}) {
+ printHelpCustom(out, templ, data, nil)
+}
+
+func checkVersion(c *Context) bool {
+ found := false
+ if VersionFlag.GetName() != "" {
+ eachName(VersionFlag.GetName(), func(name string) {
+ if c.GlobalBool(name) || c.Bool(name) {
+ found = true
+ }
+ })
+ }
+ return found
+}
+
+func checkHelp(c *Context) bool {
+ found := false
+ if HelpFlag.GetName() != "" {
+ eachName(HelpFlag.GetName(), func(name string) {
+ if c.GlobalBool(name) || c.Bool(name) {
+ found = true
+ }
+ })
+ }
+ return found
+}
+
+func checkCommandHelp(c *Context, name string) bool {
+ if c.Bool("h") || c.Bool("help") {
+ ShowCommandHelp(c, name)
+ return true
+ }
+
+ return false
+}
+
+func checkSubcommandHelp(c *Context) bool {
+ if c.Bool("h") || c.Bool("help") {
+ ShowSubcommandHelp(c)
+ return true
+ }
+
+ return false
+}
+
+func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) {
+ if !a.EnableBashCompletion {
+ return false, arguments
+ }
+
+ pos := len(arguments) - 1
+ lastArg := arguments[pos]
+
+ if lastArg != "--"+BashCompletionFlag.GetName() {
+ return false, arguments
+ }
+
+ return true, arguments[:pos]
+}
+
+func checkCompletions(c *Context) bool {
+ if !c.shellComplete {
+ return false
+ }
+
+ if args := c.Args(); args.Present() {
+ name := args.First()
+ if cmd := c.App.Command(name); cmd != nil {
+ // let the command handle the completion
+ return false
+ }
+ }
+
+ ShowCompletions(c)
+ return true
+}
+
+func checkCommandCompletions(c *Context, name string) bool {
+ if !c.shellComplete {
+ return false
+ }
+
+ ShowCommandCompletions(c, name)
+ return true
+}
diff --git a/vendor/github.com/vbatts/tar-split/LICENSE b/vendor/github.com/vbatts/tar-split/LICENSE
new file mode 100644
index 000000000..ca03685b1
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2015 Vincent Batts, Raleigh, NC, USA
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors
+may be used to endorse or promote products derived from this software without
+specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/vbatts/tar-split/README.md b/vendor/github.com/vbatts/tar-split/README.md
new file mode 100644
index 000000000..4c544d823
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/README.md
@@ -0,0 +1,137 @@
+# tar-split
+
+[![Build Status](https://travis-ci.org/vbatts/tar-split.svg?branch=master)](https://travis-ci.org/vbatts/tar-split)
+
+Pristinely disassembling a tar archive, and stashing needed raw bytes and offsets to reassemble a validating original archive.
+
+## Docs
+
+Code API for libraries provided by `tar-split`:
+
+* https://godoc.org/github.com/vbatts/tar-split/tar/asm
+* https://godoc.org/github.com/vbatts/tar-split/tar/storage
+* https://godoc.org/github.com/vbatts/tar-split/archive/tar
+
+## Install
+
+The command line utilitiy is installable via:
+
+```bash
+go get github.com/vbatts/tar-split/cmd/tar-split
+```
+
+## Usage
+
+For cli usage, see its [README.md](cmd/tar-split/README.md).
+For the library see the [docs](#docs)
+
+## Demo
+
+### Basic disassembly and assembly
+
+This demonstrates the `tar-split` command and how to assemble a tar archive from the `tar-data.json.gz`
+
+
+![basic cmd demo thumbnail](https://i.ytimg.com/vi/vh5wyjIOBtc/2.jpg?time=1445027151805)
+[youtube video of basic command demo](https://youtu.be/vh5wyjIOBtc)
+
+### Docker layer preservation
+
+This demonstrates the tar-split integration for docker-1.8. Providing consistent tar archives for the image layer content.
+
+![docker tar-split demo](https://i.ytimg.com/vi_webp/vh5wyjIOBtc/default.webp)
+[youtube vide of docker layer checksums](https://youtu.be/tV_Dia8E8xw)
+
+## Caveat
+
+Eventually this should detect TARs that this is not possible with.
+
+For example stored sparse files that have "holes" in them, will be read as a
+contiguous file, though the archive contents may be recorded in sparse format.
+Therefore when adding the file payload to a reassembled tar, to achieve
+identical output, the file payload would need be precisely re-sparsified. This
+is not something I seek to fix imediately, but would rather have an alert that
+precise reassembly is not possible.
+(see more http://www.gnu.org/software/tar/manual/html_node/Sparse-Formats.html)
+
+
+Other caveat, while tar archives support having multiple file entries for the
+same path, we will not support this feature. If there are more than one entries
+with the same path, expect an err (like `ErrDuplicatePath`) or a resulting tar
+stream that does not validate your original checksum/signature.
+
+## Contract
+
+Do not break the API of stdlib `archive/tar` in our fork (ideally find an upstream mergeable solution).
+
+## Std Version
+
+The version of golang stdlib `archive/tar` is from go1.6
+It is minimally extended to expose the raw bytes of the TAR, rather than just the marshalled headers and file stream.
+
+
+## Design
+
+See the [design](concept/DESIGN.md).
+
+## Stored Metadata
+
+Since the raw bytes of the headers and padding are stored, you may be wondering
+what the size implications are. The headers are at least 512 bytes per
+file (sometimes more), at least 1024 null bytes on the end, and then various
+padding. This makes for a constant linear growth in the stored metadata, with a
+naive storage implementation.
+
+First we'll get an archive to work with. For repeatability, we'll make an
+archive from what you've just cloned:
+
+```bash
+git archive --format=tar -o tar-split.tar HEAD .
+```
+
+```bash
+$ go get github.com/vbatts/tar-split/cmd/tar-split
+$ tar-split checksize ./tar-split.tar
+inspecting "tar-split.tar" (size 210k)
+ -- number of files: 50
+ -- size of metadata uncompressed: 53k
+ -- size of gzip compressed metadata: 3k
+```
+
+So assuming you've managed the extraction of the archive yourself, for reuse of
+the file payloads from a relative path, then the only additional storage
+implications are as little as 3kb.
+
+But let's look at a larger archive, with many files.
+
+```bash
+$ ls -sh ./d.tar
+1.4G ./d.tar
+$ tar-split checksize ~/d.tar
+inspecting "/home/vbatts/d.tar" (size 1420749k)
+ -- number of files: 38718
+ -- size of metadata uncompressed: 43261k
+ -- size of gzip compressed metadata: 2251k
+```
+
+Here, an archive with 38,718 files has a compressed footprint of about 2mb.
+
+Rolling the null bytes on the end of the archive, we will assume a
+bytes-per-file rate for the storage implications.
+
+| uncompressed | compressed |
+| :----------: | :--------: |
+| ~ 1kb per/file | 0.06kb per/file |
+
+
+## What's Next?
+
+* More implementations of storage Packer and Unpacker
+* More implementations of FileGetter and FilePutter
+* would be interesting to have an assembler stream that implements `io.Seeker`
+
+
+## License
+
+See [LICENSE](LICENSE)
+
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/common.go b/vendor/github.com/vbatts/tar-split/archive/tar/common.go
new file mode 100644
index 000000000..36f4e2398
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/common.go
@@ -0,0 +1,340 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tar implements access to tar archives.
+// It aims to cover most of the variations, including those produced
+// by GNU and BSD tars.
+//
+// References:
+// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
+// http://www.gnu.org/software/tar/manual/html_node/Standard.html
+// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
+package tar
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "path"
+ "time"
+)
+
+const (
+ blockSize = 512
+
+ // Types
+ TypeReg = '0' // regular file
+ TypeRegA = '\x00' // regular file
+ TypeLink = '1' // hard link
+ TypeSymlink = '2' // symbolic link
+ TypeChar = '3' // character device node
+ TypeBlock = '4' // block device node
+ TypeDir = '5' // directory
+ TypeFifo = '6' // fifo node
+ TypeCont = '7' // reserved
+ TypeXHeader = 'x' // extended header
+ TypeXGlobalHeader = 'g' // global extended header
+ TypeGNULongName = 'L' // Next file has a long name
+ TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
+ TypeGNUSparse = 'S' // sparse file
+)
+
+// A Header represents a single header in a tar archive.
+// Some fields may not be populated.
+type Header struct {
+ Name string // name of header file entry
+ Mode int64 // permission and mode bits
+ Uid int // user id of owner
+ Gid int // group id of owner
+ Size int64 // length in bytes
+ ModTime time.Time // modified time
+ Typeflag byte // type of header entry
+ Linkname string // target name of link
+ Uname string // user name of owner
+ Gname string // group name of owner
+ Devmajor int64 // major number of character or block device
+ Devminor int64 // minor number of character or block device
+ AccessTime time.Time // access time
+ ChangeTime time.Time // status change time
+ Xattrs map[string]string
+}
+
+// File name constants from the tar spec.
+const (
+ fileNameSize = 100 // Maximum number of bytes in a standard tar name.
+ fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
+)
+
+// FileInfo returns an os.FileInfo for the Header.
+func (h *Header) FileInfo() os.FileInfo {
+ return headerFileInfo{h}
+}
+
+// headerFileInfo implements os.FileInfo.
+type headerFileInfo struct {
+ h *Header
+}
+
+func (fi headerFileInfo) Size() int64 { return fi.h.Size }
+func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
+func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
+func (fi headerFileInfo) Sys() interface{} { return fi.h }
+
+// Name returns the base name of the file.
+func (fi headerFileInfo) Name() string {
+ if fi.IsDir() {
+ return path.Base(path.Clean(fi.h.Name))
+ }
+ return path.Base(fi.h.Name)
+}
+
+// Mode returns the permission and mode bits for the headerFileInfo.
+func (fi headerFileInfo) Mode() (mode os.FileMode) {
+ // Set file permission bits.
+ mode = os.FileMode(fi.h.Mode).Perm()
+
+ // Set setuid, setgid and sticky bits.
+ if fi.h.Mode&c_ISUID != 0 {
+ // setuid
+ mode |= os.ModeSetuid
+ }
+ if fi.h.Mode&c_ISGID != 0 {
+ // setgid
+ mode |= os.ModeSetgid
+ }
+ if fi.h.Mode&c_ISVTX != 0 {
+ // sticky
+ mode |= os.ModeSticky
+ }
+
+ // Set file mode bits.
+ // clear perm, setuid, setgid and sticky bits.
+ m := os.FileMode(fi.h.Mode) &^ 07777
+ if m == c_ISDIR {
+ // directory
+ mode |= os.ModeDir
+ }
+ if m == c_ISFIFO {
+ // named pipe (FIFO)
+ mode |= os.ModeNamedPipe
+ }
+ if m == c_ISLNK {
+ // symbolic link
+ mode |= os.ModeSymlink
+ }
+ if m == c_ISBLK {
+ // device file
+ mode |= os.ModeDevice
+ }
+ if m == c_ISCHR {
+ // Unix character device
+ mode |= os.ModeDevice
+ mode |= os.ModeCharDevice
+ }
+ if m == c_ISSOCK {
+ // Unix domain socket
+ mode |= os.ModeSocket
+ }
+
+ switch fi.h.Typeflag {
+ case TypeSymlink:
+ // symbolic link
+ mode |= os.ModeSymlink
+ case TypeChar:
+ // character device node
+ mode |= os.ModeDevice
+ mode |= os.ModeCharDevice
+ case TypeBlock:
+ // block device node
+ mode |= os.ModeDevice
+ case TypeDir:
+ // directory
+ mode |= os.ModeDir
+ case TypeFifo:
+ // fifo node
+ mode |= os.ModeNamedPipe
+ }
+
+ return mode
+}
+
+// sysStat, if non-nil, populates h from system-dependent fields of fi.
+var sysStat func(fi os.FileInfo, h *Header) error
+
+// Mode constants from the tar spec.
+const (
+ c_ISUID = 04000 // Set uid
+ c_ISGID = 02000 // Set gid
+ c_ISVTX = 01000 // Save text (sticky bit)
+ c_ISDIR = 040000 // Directory
+ c_ISFIFO = 010000 // FIFO
+ c_ISREG = 0100000 // Regular file
+ c_ISLNK = 0120000 // Symbolic link
+ c_ISBLK = 060000 // Block special file
+ c_ISCHR = 020000 // Character special file
+ c_ISSOCK = 0140000 // Socket
+)
+
+// Keywords for the PAX Extended Header
+const (
+ paxAtime = "atime"
+ paxCharset = "charset"
+ paxComment = "comment"
+ paxCtime = "ctime" // please note that ctime is not a valid pax header.
+ paxGid = "gid"
+ paxGname = "gname"
+ paxLinkpath = "linkpath"
+ paxMtime = "mtime"
+ paxPath = "path"
+ paxSize = "size"
+ paxUid = "uid"
+ paxUname = "uname"
+ paxXattr = "SCHILY.xattr."
+ paxNone = ""
+)
+
+// FileInfoHeader creates a partially-populated Header from fi.
+// If fi describes a symlink, FileInfoHeader records link as the link target.
+// If fi describes a directory, a slash is appended to the name.
+// Because os.FileInfo's Name method returns only the base name of
+// the file it describes, it may be necessary to modify the Name field
+// of the returned header to provide the full path name of the file.
+func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
+ if fi == nil {
+ return nil, errors.New("tar: FileInfo is nil")
+ }
+ fm := fi.Mode()
+ h := &Header{
+ Name: fi.Name(),
+ ModTime: fi.ModTime(),
+ Mode: int64(fm.Perm()), // or'd with c_IS* constants later
+ }
+ switch {
+ case fm.IsRegular():
+ h.Mode |= c_ISREG
+ h.Typeflag = TypeReg
+ h.Size = fi.Size()
+ case fi.IsDir():
+ h.Typeflag = TypeDir
+ h.Mode |= c_ISDIR
+ h.Name += "/"
+ case fm&os.ModeSymlink != 0:
+ h.Typeflag = TypeSymlink
+ h.Mode |= c_ISLNK
+ h.Linkname = link
+ case fm&os.ModeDevice != 0:
+ if fm&os.ModeCharDevice != 0 {
+ h.Mode |= c_ISCHR
+ h.Typeflag = TypeChar
+ } else {
+ h.Mode |= c_ISBLK
+ h.Typeflag = TypeBlock
+ }
+ case fm&os.ModeNamedPipe != 0:
+ h.Typeflag = TypeFifo
+ h.Mode |= c_ISFIFO
+ case fm&os.ModeSocket != 0:
+ h.Mode |= c_ISSOCK
+ default:
+ return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
+ }
+ if fm&os.ModeSetuid != 0 {
+ h.Mode |= c_ISUID
+ }
+ if fm&os.ModeSetgid != 0 {
+ h.Mode |= c_ISGID
+ }
+ if fm&os.ModeSticky != 0 {
+ h.Mode |= c_ISVTX
+ }
+ // If possible, populate additional fields from OS-specific
+ // FileInfo fields.
+ if sys, ok := fi.Sys().(*Header); ok {
+ // This FileInfo came from a Header (not the OS). Use the
+ // original Header to populate all remaining fields.
+ h.Uid = sys.Uid
+ h.Gid = sys.Gid
+ h.Uname = sys.Uname
+ h.Gname = sys.Gname
+ h.AccessTime = sys.AccessTime
+ h.ChangeTime = sys.ChangeTime
+ if sys.Xattrs != nil {
+ h.Xattrs = make(map[string]string)
+ for k, v := range sys.Xattrs {
+ h.Xattrs[k] = v
+ }
+ }
+ if sys.Typeflag == TypeLink {
+ // hard link
+ h.Typeflag = TypeLink
+ h.Size = 0
+ h.Linkname = sys.Linkname
+ }
+ }
+ if sysStat != nil {
+ return h, sysStat(fi, h)
+ }
+ return h, nil
+}
+
+var zeroBlock = make([]byte, blockSize)
+
+// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
+// We compute and return both.
+func checksum(header []byte) (unsigned int64, signed int64) {
+ for i := 0; i < len(header); i++ {
+ if i == 148 {
+ // The chksum field (header[148:156]) is special: it should be treated as space bytes.
+ unsigned += ' ' * 8
+ signed += ' ' * 8
+ i += 7
+ continue
+ }
+ unsigned += int64(header[i])
+ signed += int64(int8(header[i]))
+ }
+ return
+}
+
+type slicer []byte
+
+func (sp *slicer) next(n int) (b []byte) {
+ s := *sp
+ b, *sp = s[0:n], s[n:]
+ return
+}
+
+func isASCII(s string) bool {
+ for _, c := range s {
+ if c >= 0x80 {
+ return false
+ }
+ }
+ return true
+}
+
+func toASCII(s string) string {
+ if isASCII(s) {
+ return s
+ }
+ var buf bytes.Buffer
+ for _, c := range s {
+ if c < 0x80 {
+ buf.WriteByte(byte(c))
+ }
+ }
+ return buf.String()
+}
+
+// isHeaderOnlyType checks if the given type flag is of the type that has no
+// data section even if a size is specified.
+func isHeaderOnlyType(flag byte) bool {
+ switch flag {
+ case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go
new file mode 100644
index 000000000..adf32122e
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go
@@ -0,0 +1,1064 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+// TODO(dsymonds):
+// - pax extensions
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "io/ioutil"
+ "math"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ ErrHeader = errors.New("archive/tar: invalid tar header")
+)
+
+const maxNanoSecondIntSize = 9
+
+// A Reader provides sequential access to the contents of a tar archive.
+// A tar archive consists of a sequence of files.
+// The Next method advances to the next file in the archive (including the first),
+// and then it can be treated as an io.Reader to access the file's data.
+type Reader struct {
+ r io.Reader
+ err error
+ pad int64 // amount of padding (ignored) after current file entry
+ curr numBytesReader // reader for current file entry
+ hdrBuff [blockSize]byte // buffer to use in readHeader
+
+ RawAccounting bool // Whether to enable the access needed to reassemble the tar from raw bytes. Some performance/memory hit for this.
+ rawBytes *bytes.Buffer // last raw bits
+}
+
+type parser struct {
+ err error // Last error seen
+}
+
+// RawBytes accesses the raw bytes of the archive, apart from the file payload itself.
+// This includes the header and padding.
+//
+// This call resets the current rawbytes buffer
+//
+// Only when RawAccounting is enabled, otherwise this returns nil
+func (tr *Reader) RawBytes() []byte {
+ if !tr.RawAccounting {
+ return nil
+ }
+ if tr.rawBytes == nil {
+ tr.rawBytes = bytes.NewBuffer(nil)
+ }
+ // if we've read them, then flush them.
+ defer tr.rawBytes.Reset()
+ return tr.rawBytes.Bytes()
+}
+
+// A numBytesReader is an io.Reader with a numBytes method, returning the number
+// of bytes remaining in the underlying encoded data.
+type numBytesReader interface {
+ io.Reader
+ numBytes() int64
+}
+
+// A regFileReader is a numBytesReader for reading file data from a tar archive.
+type regFileReader struct {
+ r io.Reader // underlying reader
+ nb int64 // number of unread bytes for current file entry
+}
+
+// A sparseFileReader is a numBytesReader for reading sparse file data from a
+// tar archive.
+type sparseFileReader struct {
+ rfr numBytesReader // Reads the sparse-encoded file data
+ sp []sparseEntry // The sparse map for the file
+ pos int64 // Keeps track of file position
+ total int64 // Total size of the file
+}
+
+// A sparseEntry holds a single entry in a sparse file's sparse map.
+//
+// Sparse files are represented using a series of sparseEntrys.
+// Despite the name, a sparseEntry represents an actual data fragment that
+// references data found in the underlying archive stream. All regions not
+// covered by a sparseEntry are logically filled with zeros.
+//
+// For example, if the underlying raw file contains the 10-byte data:
+// var compactData = "abcdefgh"
+//
+// And the sparse map has the following entries:
+// var sp = []sparseEntry{
+// {offset: 2, numBytes: 5} // Data fragment for [2..7]
+// {offset: 18, numBytes: 3} // Data fragment for [18..21]
+// }
+//
+// Then the content of the resulting sparse file with a "real" size of 25 is:
+// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
+type sparseEntry struct {
+ offset int64 // Starting position of the fragment
+ numBytes int64 // Length of the fragment
+}
+
+// Keywords for GNU sparse files in a PAX extended header
+const (
+ paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
+ paxGNUSparseOffset = "GNU.sparse.offset"
+ paxGNUSparseNumBytes = "GNU.sparse.numbytes"
+ paxGNUSparseMap = "GNU.sparse.map"
+ paxGNUSparseName = "GNU.sparse.name"
+ paxGNUSparseMajor = "GNU.sparse.major"
+ paxGNUSparseMinor = "GNU.sparse.minor"
+ paxGNUSparseSize = "GNU.sparse.size"
+ paxGNUSparseRealSize = "GNU.sparse.realsize"
+)
+
+// Keywords for old GNU sparse headers
+const (
+ oldGNUSparseMainHeaderOffset = 386
+ oldGNUSparseMainHeaderIsExtendedOffset = 482
+ oldGNUSparseMainHeaderNumEntries = 4
+ oldGNUSparseExtendedHeaderIsExtendedOffset = 504
+ oldGNUSparseExtendedHeaderNumEntries = 21
+ oldGNUSparseOffsetSize = 12
+ oldGNUSparseNumBytesSize = 12
+)
+
+// NewReader creates a new Reader reading from r.
+func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
+
+// Next advances to the next entry in the tar archive.
+//
+// io.EOF is returned at the end of the input.
+func (tr *Reader) Next() (*Header, error) {
+ if tr.RawAccounting {
+ if tr.rawBytes == nil {
+ tr.rawBytes = bytes.NewBuffer(nil)
+ } else {
+ tr.rawBytes.Reset()
+ }
+ }
+
+ if tr.err != nil {
+ return nil, tr.err
+ }
+
+ var hdr *Header
+ var extHdrs map[string]string
+
+ // Externally, Next iterates through the tar archive as if it is a series of
+ // files. Internally, the tar format often uses fake "files" to add meta
+ // data that describes the next file. These meta data "files" should not
+ // normally be visible to the outside. As such, this loop iterates through
+ // one or more "header files" until it finds a "normal file".
+loop:
+ for {
+ tr.err = tr.skipUnread()
+ if tr.err != nil {
+ return nil, tr.err
+ }
+
+ hdr = tr.readHeader()
+ if tr.err != nil {
+ return nil, tr.err
+ }
+ // Check for PAX/GNU special headers and files.
+ switch hdr.Typeflag {
+ case TypeXHeader:
+ extHdrs, tr.err = parsePAX(tr)
+ if tr.err != nil {
+ return nil, tr.err
+ }
+ continue loop // This is a meta header affecting the next header
+ case TypeGNULongName, TypeGNULongLink:
+ var realname []byte
+ realname, tr.err = ioutil.ReadAll(tr)
+ if tr.err != nil {
+ return nil, tr.err
+ }
+
+ if tr.RawAccounting {
+ if _, tr.err = tr.rawBytes.Write(realname); tr.err != nil {
+ return nil, tr.err
+ }
+ }
+
+ // Convert GNU extensions to use PAX headers.
+ if extHdrs == nil {
+ extHdrs = make(map[string]string)
+ }
+ var p parser
+ switch hdr.Typeflag {
+ case TypeGNULongName:
+ extHdrs[paxPath] = p.parseString(realname)
+ case TypeGNULongLink:
+ extHdrs[paxLinkpath] = p.parseString(realname)
+ }
+ if p.err != nil {
+ tr.err = p.err
+ return nil, tr.err
+ }
+ continue loop // This is a meta header affecting the next header
+ default:
+ mergePAX(hdr, extHdrs)
+
+ // Check for a PAX format sparse file
+ sp, err := tr.checkForGNUSparsePAXHeaders(hdr, extHdrs)
+ if err != nil {
+ tr.err = err
+ return nil, err
+ }
+ if sp != nil {
+ // Current file is a PAX format GNU sparse file.
+ // Set the current file reader to a sparse file reader.
+ tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
+ if tr.err != nil {
+ return nil, tr.err
+ }
+ }
+ break loop // This is a file, so stop
+ }
+ }
+ return hdr, nil
+}
+
+// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
+// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to
+// be treated as a regular file.
+func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) {
+ var sparseFormat string
+
+ // Check for sparse format indicators
+ major, majorOk := headers[paxGNUSparseMajor]
+ minor, minorOk := headers[paxGNUSparseMinor]
+ sparseName, sparseNameOk := headers[paxGNUSparseName]
+ _, sparseMapOk := headers[paxGNUSparseMap]
+ sparseSize, sparseSizeOk := headers[paxGNUSparseSize]
+ sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize]
+
+ // Identify which, if any, sparse format applies from which PAX headers are set
+ if majorOk && minorOk {
+ sparseFormat = major + "." + minor
+ } else if sparseNameOk && sparseMapOk {
+ sparseFormat = "0.1"
+ } else if sparseSizeOk {
+ sparseFormat = "0.0"
+ } else {
+ // Not a PAX format GNU sparse file.
+ return nil, nil
+ }
+
+ // Check for unknown sparse format
+ if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" {
+ return nil, nil
+ }
+
+ // Update hdr from GNU sparse PAX headers
+ if sparseNameOk {
+ hdr.Name = sparseName
+ }
+ if sparseSizeOk {
+ realSize, err := strconv.ParseInt(sparseSize, 10, 0)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ hdr.Size = realSize
+ } else if sparseRealSizeOk {
+ realSize, err := strconv.ParseInt(sparseRealSize, 10, 0)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ hdr.Size = realSize
+ }
+
+ // Set up the sparse map, according to the particular sparse format in use
+ var sp []sparseEntry
+ var err error
+ switch sparseFormat {
+ case "0.0", "0.1":
+ sp, err = readGNUSparseMap0x1(headers)
+ case "1.0":
+ sp, err = readGNUSparseMap1x0(tr.curr)
+ }
+ return sp, err
+}
+
+// mergePAX merges well known headers according to PAX standard.
+// In general headers with the same name as those found
+// in the header struct overwrite those found in the header
+// struct with higher precision or longer values. Esp. useful
+// for name and linkname fields.
+func mergePAX(hdr *Header, headers map[string]string) error {
+ for k, v := range headers {
+ switch k {
+ case paxPath:
+ hdr.Name = v
+ case paxLinkpath:
+ hdr.Linkname = v
+ case paxGname:
+ hdr.Gname = v
+ case paxUname:
+ hdr.Uname = v
+ case paxUid:
+ uid, err := strconv.ParseInt(v, 10, 0)
+ if err != nil {
+ return err
+ }
+ hdr.Uid = int(uid)
+ case paxGid:
+ gid, err := strconv.ParseInt(v, 10, 0)
+ if err != nil {
+ return err
+ }
+ hdr.Gid = int(gid)
+ case paxAtime:
+ t, err := parsePAXTime(v)
+ if err != nil {
+ return err
+ }
+ hdr.AccessTime = t
+ case paxMtime:
+ t, err := parsePAXTime(v)
+ if err != nil {
+ return err
+ }
+ hdr.ModTime = t
+ case paxCtime:
+ t, err := parsePAXTime(v)
+ if err != nil {
+ return err
+ }
+ hdr.ChangeTime = t
+ case paxSize:
+ size, err := strconv.ParseInt(v, 10, 0)
+ if err != nil {
+ return err
+ }
+ hdr.Size = int64(size)
+ default:
+ if strings.HasPrefix(k, paxXattr) {
+ if hdr.Xattrs == nil {
+ hdr.Xattrs = make(map[string]string)
+ }
+ hdr.Xattrs[k[len(paxXattr):]] = v
+ }
+ }
+ }
+ return nil
+}
+
+// parsePAXTime takes a string of the form %d.%d as described in
+// the PAX specification.
+func parsePAXTime(t string) (time.Time, error) {
+ buf := []byte(t)
+ pos := bytes.IndexByte(buf, '.')
+ var seconds, nanoseconds int64
+ var err error
+ if pos == -1 {
+ seconds, err = strconv.ParseInt(t, 10, 0)
+ if err != nil {
+ return time.Time{}, err
+ }
+ } else {
+ seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0)
+ if err != nil {
+ return time.Time{}, err
+ }
+ nano_buf := string(buf[pos+1:])
+ // Pad as needed before converting to a decimal.
+ // For example .030 -> .030000000 -> 30000000 nanoseconds
+ if len(nano_buf) < maxNanoSecondIntSize {
+ // Right pad
+ nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf))
+ } else if len(nano_buf) > maxNanoSecondIntSize {
+ // Right truncate
+ nano_buf = nano_buf[:maxNanoSecondIntSize]
+ }
+ nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0)
+ if err != nil {
+ return time.Time{}, err
+ }
+ }
+ ts := time.Unix(seconds, nanoseconds)
+ return ts, nil
+}
+
+// parsePAX parses PAX headers.
+// If an extended header (type 'x') is invalid, ErrHeader is returned
+func parsePAX(r io.Reader) (map[string]string, error) {
+ buf, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ // leaving this function for io.Reader makes it more testable
+ if tr, ok := r.(*Reader); ok && tr.RawAccounting {
+ if _, err = tr.rawBytes.Write(buf); err != nil {
+ return nil, err
+ }
+ }
+ sbuf := string(buf)
+
+ // For GNU PAX sparse format 0.0 support.
+ // This function transforms the sparse format 0.0 headers into sparse format 0.1 headers.
+ var sparseMap bytes.Buffer
+
+ headers := make(map[string]string)
+ // Each record is constructed as
+ // "%d %s=%s\n", length, keyword, value
+ for len(sbuf) > 0 {
+ key, value, residual, err := parsePAXRecord(sbuf)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ sbuf = residual
+
+ keyStr := string(key)
+ if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes {
+ // GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map.
+ sparseMap.WriteString(value)
+ sparseMap.Write([]byte{','})
+ } else {
+ // Normal key. Set the value in the headers map.
+ headers[keyStr] = string(value)
+ }
+ }
+ if sparseMap.Len() != 0 {
+ // Add sparse info to headers, chopping off the extra comma
+ sparseMap.Truncate(sparseMap.Len() - 1)
+ headers[paxGNUSparseMap] = sparseMap.String()
+ }
+ return headers, nil
+}
+
+// parsePAXRecord parses the input PAX record string into a key-value pair.
+// If parsing is successful, it will slice off the currently read record and
+// return the remainder as r.
+//
+// A PAX record is of the following form:
+// "%d %s=%s\n" % (size, key, value)
+func parsePAXRecord(s string) (k, v, r string, err error) {
+ // The size field ends at the first space.
+ sp := strings.IndexByte(s, ' ')
+ if sp == -1 {
+ return "", "", s, ErrHeader
+ }
+
+ // Parse the first token as a decimal integer.
+ n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int
+ if perr != nil || n < 5 || int64(len(s)) < n {
+ return "", "", s, ErrHeader
+ }
+
+ // Extract everything between the space and the final newline.
+ rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:]
+ if nl != "\n" {
+ return "", "", s, ErrHeader
+ }
+
+ // The first equals separates the key from the value.
+ eq := strings.IndexByte(rec, '=')
+ if eq == -1 {
+ return "", "", s, ErrHeader
+ }
+ return rec[:eq], rec[eq+1:], rem, nil
+}
+
+// parseString parses bytes as a NUL-terminated C-style string.
+// If a NUL byte is not found then the whole slice is returned as a string.
+func (*parser) parseString(b []byte) string {
+ n := 0
+ for n < len(b) && b[n] != 0 {
+ n++
+ }
+ return string(b[0:n])
+}
+
+// parseNumeric parses the input as being encoded in either base-256 or octal.
+// This function may return negative numbers.
+// If parsing fails or an integer overflow occurs, err will be set.
+func (p *parser) parseNumeric(b []byte) int64 {
+ // Check for base-256 (binary) format first.
+ // If the first bit is set, then all following bits constitute a two's
+ // complement encoded number in big-endian byte order.
+ if len(b) > 0 && b[0]&0x80 != 0 {
+ // Handling negative numbers relies on the following identity:
+ // -a-1 == ^a
+ //
+ // If the number is negative, we use an inversion mask to invert the
+ // data bytes and treat the value as an unsigned number.
+ var inv byte // 0x00 if positive or zero, 0xff if negative
+ if b[0]&0x40 != 0 {
+ inv = 0xff
+ }
+
+ var x uint64
+ for i, c := range b {
+ c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing
+ if i == 0 {
+ c &= 0x7f // Ignore signal bit in first byte
+ }
+ if (x >> 56) > 0 {
+ p.err = ErrHeader // Integer overflow
+ return 0
+ }
+ x = x<<8 | uint64(c)
+ }
+ if (x >> 63) > 0 {
+ p.err = ErrHeader // Integer overflow
+ return 0
+ }
+ if inv == 0xff {
+ return ^int64(x)
+ }
+ return int64(x)
+ }
+
+ // Normal case is base-8 (octal) format.
+ return p.parseOctal(b)
+}
+
+func (p *parser) parseOctal(b []byte) int64 {
+ // Because unused fields are filled with NULs, we need
+ // to skip leading NULs. Fields may also be padded with
+ // spaces or NULs.
+ // So we remove leading and trailing NULs and spaces to
+ // be sure.
+ b = bytes.Trim(b, " \x00")
+
+ if len(b) == 0 {
+ return 0
+ }
+ x, perr := strconv.ParseUint(p.parseString(b), 8, 64)
+ if perr != nil {
+ p.err = ErrHeader
+ }
+ return int64(x)
+}
+
+// skipUnread skips any unread bytes in the existing file entry, as well as any
+// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is
+// encountered in the data portion; it is okay to hit io.EOF in the padding.
+//
+// Note that this function still works properly even when sparse files are being
+// used since numBytes returns the bytes remaining in the underlying io.Reader.
+func (tr *Reader) skipUnread() error {
+ dataSkip := tr.numBytes() // Number of data bytes to skip
+ totalSkip := dataSkip + tr.pad // Total number of bytes to skip
+ tr.curr, tr.pad = nil, 0
+ if tr.RawAccounting {
+ _, tr.err = io.CopyN(tr.rawBytes, tr.r, totalSkip)
+ return tr.err
+ }
+ // If possible, Seek to the last byte before the end of the data section.
+ // Do this because Seek is often lazy about reporting errors; this will mask
+ // the fact that the tar stream may be truncated. We can rely on the
+ // io.CopyN done shortly afterwards to trigger any IO errors.
+ var seekSkipped int64 // Number of bytes skipped via Seek
+ if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 {
+ // Not all io.Seeker can actually Seek. For example, os.Stdin implements
+ // io.Seeker, but calling Seek always returns an error and performs
+ // no action. Thus, we try an innocent seek to the current position
+ // to see if Seek is really supported.
+ pos1, err := sr.Seek(0, os.SEEK_CUR)
+ if err == nil {
+ // Seek seems supported, so perform the real Seek.
+ pos2, err := sr.Seek(dataSkip-1, os.SEEK_CUR)
+ if err != nil {
+ tr.err = err
+ return tr.err
+ }
+ seekSkipped = pos2 - pos1
+ }
+ }
+
+ var copySkipped int64 // Number of bytes skipped via CopyN
+ copySkipped, tr.err = io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped)
+ if tr.err == io.EOF && seekSkipped+copySkipped < dataSkip {
+ tr.err = io.ErrUnexpectedEOF
+ }
+ return tr.err
+}
+
+func (tr *Reader) verifyChecksum(header []byte) bool {
+ if tr.err != nil {
+ return false
+ }
+
+ var p parser
+ given := p.parseOctal(header[148:156])
+ unsigned, signed := checksum(header)
+ return p.err == nil && (given == unsigned || given == signed)
+}
+
+// readHeader reads the next block header and assumes that the underlying reader
+// is already aligned to a block boundary.
+//
+// The err will be set to io.EOF only when one of the following occurs:
+// * Exactly 0 bytes are read and EOF is hit.
+// * Exactly 1 block of zeros is read and EOF is hit.
+// * At least 2 blocks of zeros are read.
+func (tr *Reader) readHeader() *Header {
+ header := tr.hdrBuff[:]
+ copy(header, zeroBlock)
+
+ if n, err := io.ReadFull(tr.r, header); err != nil {
+ tr.err = err
+ // because it could read some of the block, but reach EOF first
+ if tr.err == io.EOF && tr.RawAccounting {
+ if _, err := tr.rawBytes.Write(header[:n]); err != nil {
+ tr.err = err
+ }
+ }
+ return nil // io.EOF is okay here
+ }
+ if tr.RawAccounting {
+ if _, tr.err = tr.rawBytes.Write(header); tr.err != nil {
+ return nil
+ }
+ }
+
+ // Two blocks of zero bytes marks the end of the archive.
+ if bytes.Equal(header, zeroBlock[0:blockSize]) {
+ if n, err := io.ReadFull(tr.r, header); err != nil {
+ tr.err = err
+ // because it could read some of the block, but reach EOF first
+ if tr.err == io.EOF && tr.RawAccounting {
+ if _, err := tr.rawBytes.Write(header[:n]); err != nil {
+ tr.err = err
+ }
+ }
+ return nil // io.EOF is okay here
+ }
+ if tr.RawAccounting {
+ if _, tr.err = tr.rawBytes.Write(header); tr.err != nil {
+ return nil
+ }
+ }
+ if bytes.Equal(header, zeroBlock[0:blockSize]) {
+ tr.err = io.EOF
+ } else {
+ tr.err = ErrHeader // zero block and then non-zero block
+ }
+ return nil
+ }
+
+ if !tr.verifyChecksum(header) {
+ tr.err = ErrHeader
+ return nil
+ }
+
+ // Unpack
+ var p parser
+ hdr := new(Header)
+ s := slicer(header)
+
+ hdr.Name = p.parseString(s.next(100))
+ hdr.Mode = p.parseNumeric(s.next(8))
+ hdr.Uid = int(p.parseNumeric(s.next(8)))
+ hdr.Gid = int(p.parseNumeric(s.next(8)))
+ hdr.Size = p.parseNumeric(s.next(12))
+ hdr.ModTime = time.Unix(p.parseNumeric(s.next(12)), 0)
+ s.next(8) // chksum
+ hdr.Typeflag = s.next(1)[0]
+ hdr.Linkname = p.parseString(s.next(100))
+
+ // The remainder of the header depends on the value of magic.
+ // The original (v7) version of tar had no explicit magic field,
+ // so its magic bytes, like the rest of the block, are NULs.
+ magic := string(s.next(8)) // contains version field as well.
+ var format string
+ switch {
+ case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988)
+ if string(header[508:512]) == "tar\x00" {
+ format = "star"
+ } else {
+ format = "posix"
+ }
+ case magic == "ustar \x00": // old GNU tar
+ format = "gnu"
+ }
+
+ switch format {
+ case "posix", "gnu", "star":
+ hdr.Uname = p.parseString(s.next(32))
+ hdr.Gname = p.parseString(s.next(32))
+ devmajor := s.next(8)
+ devminor := s.next(8)
+ if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
+ hdr.Devmajor = p.parseNumeric(devmajor)
+ hdr.Devminor = p.parseNumeric(devminor)
+ }
+ var prefix string
+ switch format {
+ case "posix", "gnu":
+ prefix = p.parseString(s.next(155))
+ case "star":
+ prefix = p.parseString(s.next(131))
+ hdr.AccessTime = time.Unix(p.parseNumeric(s.next(12)), 0)
+ hdr.ChangeTime = time.Unix(p.parseNumeric(s.next(12)), 0)
+ }
+ if len(prefix) > 0 {
+ hdr.Name = prefix + "/" + hdr.Name
+ }
+ }
+
+ if p.err != nil {
+ tr.err = p.err
+ return nil
+ }
+
+ nb := hdr.Size
+ if isHeaderOnlyType(hdr.Typeflag) {
+ nb = 0
+ }
+ if nb < 0 {
+ tr.err = ErrHeader
+ return nil
+ }
+
+ // Set the current file reader.
+ tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
+ tr.curr = &regFileReader{r: tr.r, nb: nb}
+
+ // Check for old GNU sparse format entry.
+ if hdr.Typeflag == TypeGNUSparse {
+ // Get the real size of the file.
+ hdr.Size = p.parseNumeric(header[483:495])
+ if p.err != nil {
+ tr.err = p.err
+ return nil
+ }
+
+ // Read the sparse map.
+ sp := tr.readOldGNUSparseMap(header)
+ if tr.err != nil {
+ return nil
+ }
+
+ // Current file is a GNU sparse file. Update the current file reader.
+ tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
+ if tr.err != nil {
+ return nil
+ }
+ }
+
+ return hdr
+}
+
+// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
+// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
+// then one or more extension headers are used to store the rest of the sparse map.
+func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
+ var p parser
+ isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0
+ spCap := oldGNUSparseMainHeaderNumEntries
+ if isExtended {
+ spCap += oldGNUSparseExtendedHeaderNumEntries
+ }
+ sp := make([]sparseEntry, 0, spCap)
+ s := slicer(header[oldGNUSparseMainHeaderOffset:])
+
+ // Read the four entries from the main tar header
+ for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ {
+ offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
+ numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
+ if p.err != nil {
+ tr.err = p.err
+ return nil
+ }
+ if offset == 0 && numBytes == 0 {
+ break
+ }
+ sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+ }
+
+ for isExtended {
+ // There are more entries. Read an extension header and parse its entries.
+ sparseHeader := make([]byte, blockSize)
+ if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil {
+ return nil
+ }
+ if tr.RawAccounting {
+ if _, tr.err = tr.rawBytes.Write(sparseHeader); tr.err != nil {
+ return nil
+ }
+ }
+
+ isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0
+ s = slicer(sparseHeader)
+ for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ {
+ offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
+ numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
+ if p.err != nil {
+ tr.err = p.err
+ return nil
+ }
+ if offset == 0 && numBytes == 0 {
+ break
+ }
+ sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+ }
+ }
+ return sp
+}
+
+// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
+// version 1.0. The format of the sparse map consists of a series of
+// newline-terminated numeric fields. The first field is the number of entries
+// and is always present. Following this are the entries, consisting of two
+// fields (offset, numBytes). This function must stop reading at the end
+// boundary of the block containing the last newline.
+//
+// Note that the GNU manual says that numeric values should be encoded in octal
+// format. However, the GNU tar utility itself outputs these values in decimal.
+// As such, this library treats values as being encoded in decimal.
+func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
+ var cntNewline int64
+ var buf bytes.Buffer
+ var blk = make([]byte, blockSize)
+
+ // feedTokens copies data in numBlock chunks from r into buf until there are
+ // at least cnt newlines in buf. It will not read more blocks than needed.
+ var feedTokens = func(cnt int64) error {
+ for cntNewline < cnt {
+ if _, err := io.ReadFull(r, blk); err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+ }
+ buf.Write(blk)
+ for _, c := range blk {
+ if c == '\n' {
+ cntNewline++
+ }
+ }
+ }
+ return nil
+ }
+
+ // nextToken gets the next token delimited by a newline. This assumes that
+ // at least one newline exists in the buffer.
+ var nextToken = func() string {
+ cntNewline--
+ tok, _ := buf.ReadString('\n')
+ return tok[:len(tok)-1] // Cut off newline
+ }
+
+ // Parse for the number of entries.
+ // Use integer overflow resistant math to check this.
+ if err := feedTokens(1); err != nil {
+ return nil, err
+ }
+ numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int
+ if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
+ return nil, ErrHeader
+ }
+
+ // Parse for all member entries.
+ // numEntries is trusted after this since a potential attacker must have
+ // committed resources proportional to what this library used.
+ if err := feedTokens(2 * numEntries); err != nil {
+ return nil, err
+ }
+ sp := make([]sparseEntry, 0, numEntries)
+ for i := int64(0); i < numEntries; i++ {
+ offset, err := strconv.ParseInt(nextToken(), 10, 64)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ numBytes, err := strconv.ParseInt(nextToken(), 10, 64)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+ }
+ return sp, nil
+}
+
+// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
+// version 0.1. The sparse map is stored in the PAX headers.
+func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) {
+ // Get number of entries.
+ // Use integer overflow resistant math to check this.
+ numEntriesStr := extHdrs[paxGNUSparseNumBlocks]
+ numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
+ if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
+ return nil, ErrHeader
+ }
+
+ // There should be two numbers in sparseMap for each entry.
+ sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",")
+ if int64(len(sparseMap)) != 2*numEntries {
+ return nil, ErrHeader
+ }
+
+ // Loop through the entries in the sparse map.
+ // numEntries is trusted now.
+ sp := make([]sparseEntry, 0, numEntries)
+ for i := int64(0); i < numEntries; i++ {
+ offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+ }
+ return sp, nil
+}
+
+// numBytes returns the number of bytes left to read in the current file's entry
+// in the tar archive, or 0 if there is no current file.
+func (tr *Reader) numBytes() int64 {
+ if tr.curr == nil {
+ // No current file, so no bytes
+ return 0
+ }
+ return tr.curr.numBytes()
+}
+
+// Read reads from the current entry in the tar archive.
+// It returns 0, io.EOF when it reaches the end of that entry,
+// until Next is called to advance to the next entry.
+//
+// Calling Read on special types like TypeLink, TypeSymLink, TypeChar,
+// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what
+// the Header.Size claims.
+func (tr *Reader) Read(b []byte) (n int, err error) {
+ if tr.err != nil {
+ return 0, tr.err
+ }
+ if tr.curr == nil {
+ return 0, io.EOF
+ }
+
+ n, err = tr.curr.Read(b)
+ if err != nil && err != io.EOF {
+ tr.err = err
+ }
+ return
+}
+
+func (rfr *regFileReader) Read(b []byte) (n int, err error) {
+ if rfr.nb == 0 {
+ // file consumed
+ return 0, io.EOF
+ }
+ if int64(len(b)) > rfr.nb {
+ b = b[0:rfr.nb]
+ }
+ n, err = rfr.r.Read(b)
+ rfr.nb -= int64(n)
+
+ if err == io.EOF && rfr.nb > 0 {
+ err = io.ErrUnexpectedEOF
+ }
+ return
+}
+
+// numBytes returns the number of bytes left to read in the file's data in the tar archive.
+func (rfr *regFileReader) numBytes() int64 {
+ return rfr.nb
+}
+
+// newSparseFileReader creates a new sparseFileReader, but validates all of the
+// sparse entries before doing so.
+func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) {
+ if total < 0 {
+ return nil, ErrHeader // Total size cannot be negative
+ }
+
+ // Validate all sparse entries. These are the same checks as performed by
+ // the BSD tar utility.
+ for i, s := range sp {
+ switch {
+ case s.offset < 0 || s.numBytes < 0:
+ return nil, ErrHeader // Negative values are never okay
+ case s.offset > math.MaxInt64-s.numBytes:
+ return nil, ErrHeader // Integer overflow with large length
+ case s.offset+s.numBytes > total:
+ return nil, ErrHeader // Region extends beyond the "real" size
+ case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset:
+ return nil, ErrHeader // Regions can't overlap and must be in order
+ }
+ }
+ return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil
+}
+
+// readHole reads a sparse hole ending at endOffset.
+func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int {
+ n64 := endOffset - sfr.pos
+ if n64 > int64(len(b)) {
+ n64 = int64(len(b))
+ }
+ n := int(n64)
+ for i := 0; i < n; i++ {
+ b[i] = 0
+ }
+ sfr.pos += n64
+ return n
+}
+
+// Read reads the sparse file data in expanded form.
+func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
+ // Skip past all empty fragments.
+ for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 {
+ sfr.sp = sfr.sp[1:]
+ }
+
+ // If there are no more fragments, then it is possible that there
+ // is one last sparse hole.
+ if len(sfr.sp) == 0 {
+ // This behavior matches the BSD tar utility.
+ // However, GNU tar stops returning data even if sfr.total is unmet.
+ if sfr.pos < sfr.total {
+ return sfr.readHole(b, sfr.total), nil
+ }
+ return 0, io.EOF
+ }
+
+ // In front of a data fragment, so read a hole.
+ if sfr.pos < sfr.sp[0].offset {
+ return sfr.readHole(b, sfr.sp[0].offset), nil
+ }
+
+ // In a data fragment, so read from it.
+ // This math is overflow free since we verify that offset and numBytes can
+ // be safely added when creating the sparseFileReader.
+ endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment
+ bytesLeft := endPos - sfr.pos // Bytes left in fragment
+ if int64(len(b)) > bytesLeft {
+ b = b[:bytesLeft]
+ }
+
+ n, err = sfr.rfr.Read(b)
+ sfr.pos += int64(n)
+ if err == io.EOF {
+ if sfr.pos < endPos {
+ err = io.ErrUnexpectedEOF // There was supposed to be more data
+ } else if sfr.pos < sfr.total {
+ err = nil // There is still an implicit sparse hole at the end
+ }
+ }
+
+ if sfr.pos == endPos {
+ sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it
+ }
+ return n, err
+}
+
+// numBytes returns the number of bytes left to read in the sparse file's
+// sparse-encoded data in the tar archive.
+func (sfr *sparseFileReader) numBytes() int64 {
+ return sfr.rfr.numBytes()
+}
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_atim.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_atim.go
new file mode 100644
index 000000000..cf9cc79c5
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/stat_atim.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux dragonfly openbsd solaris
+
+package tar
+
+import (
+ "syscall"
+ "time"
+)
+
+func statAtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Atim.Unix())
+}
+
+func statCtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Ctim.Unix())
+}
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go
new file mode 100644
index 000000000..6f17dbe30
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd netbsd
+
+package tar
+
+import (
+ "syscall"
+ "time"
+)
+
+func statAtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Atimespec.Unix())
+}
+
+func statCtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Ctimespec.Unix())
+}
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go
new file mode 100644
index 000000000..cb843db4c
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go
@@ -0,0 +1,32 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin dragonfly freebsd openbsd netbsd solaris
+
+package tar
+
+import (
+ "os"
+ "syscall"
+)
+
+func init() {
+ sysStat = statUnix
+}
+
+func statUnix(fi os.FileInfo, h *Header) error {
+ sys, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ return nil
+ }
+ h.Uid = int(sys.Uid)
+ h.Gid = int(sys.Gid)
+ // TODO(bradfitz): populate username & group. os/user
+ // doesn't cache LookupId lookups, and lacks group
+ // lookup functions.
+ h.AccessTime = statAtime(sys)
+ h.ChangeTime = statCtime(sys)
+ // TODO(bradfitz): major/minor device numbers?
+ return nil
+}
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/writer.go b/vendor/github.com/vbatts/tar-split/archive/tar/writer.go
new file mode 100644
index 000000000..042638175
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/writer.go
@@ -0,0 +1,416 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+// TODO(dsymonds):
+// - catch more errors (no first header, etc.)
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ ErrWriteTooLong = errors.New("archive/tar: write too long")
+ ErrFieldTooLong = errors.New("archive/tar: header field too long")
+ ErrWriteAfterClose = errors.New("archive/tar: write after close")
+ errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
+)
+
+// A Writer provides sequential writing of a tar archive in POSIX.1 format.
+// A tar archive consists of a sequence of files.
+// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
+// writing at most hdr.Size bytes in total.
+type Writer struct {
+ w io.Writer
+ err error
+ nb int64 // number of unwritten bytes for current file entry
+ pad int64 // amount of padding to write after current file entry
+ closed bool
+ usedBinary bool // whether the binary numeric field extension was used
+ preferPax bool // use pax header instead of binary numeric header
+ hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
+ paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
+}
+
+type formatter struct {
+ err error // Last error seen
+}
+
+// NewWriter creates a new Writer writing to w.
+func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
+
+// Flush finishes writing the current file (optional).
+func (tw *Writer) Flush() error {
+ if tw.nb > 0 {
+ tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
+ return tw.err
+ }
+
+ n := tw.nb + tw.pad
+ for n > 0 && tw.err == nil {
+ nr := n
+ if nr > blockSize {
+ nr = blockSize
+ }
+ var nw int
+ nw, tw.err = tw.w.Write(zeroBlock[0:nr])
+ n -= int64(nw)
+ }
+ tw.nb = 0
+ tw.pad = 0
+ return tw.err
+}
+
+// Write s into b, terminating it with a NUL if there is room.
+func (f *formatter) formatString(b []byte, s string) {
+ if len(s) > len(b) {
+ f.err = ErrFieldTooLong
+ return
+ }
+ ascii := toASCII(s)
+ copy(b, ascii)
+ if len(ascii) < len(b) {
+ b[len(ascii)] = 0
+ }
+}
+
+// Encode x as an octal ASCII string and write it into b with leading zeros.
+func (f *formatter) formatOctal(b []byte, x int64) {
+ s := strconv.FormatInt(x, 8)
+ // leading zeros, but leave room for a NUL.
+ for len(s)+1 < len(b) {
+ s = "0" + s
+ }
+ f.formatString(b, s)
+}
+
+// fitsInBase256 reports whether x can be encoded into n bytes using base-256
+// encoding. Unlike octal encoding, base-256 encoding does not require that the
+// string ends with a NUL character. Thus, all n bytes are available for output.
+//
+// If operating in binary mode, this assumes strict GNU binary mode; which means
+// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
+// equivalent to the sign bit in two's complement form.
+func fitsInBase256(n int, x int64) bool {
+ var binBits = uint(n-1) * 8
+ return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
+}
+
+// Write x into b, as binary (GNUtar/star extension).
+func (f *formatter) formatNumeric(b []byte, x int64) {
+ if fitsInBase256(len(b), x) {
+ for i := len(b) - 1; i >= 0; i-- {
+ b[i] = byte(x)
+ x >>= 8
+ }
+ b[0] |= 0x80 // Highest bit indicates binary format
+ return
+ }
+
+ f.formatOctal(b, 0) // Last resort, just write zero
+ f.err = ErrFieldTooLong
+}
+
+var (
+ minTime = time.Unix(0, 0)
+ // There is room for 11 octal digits (33 bits) of mtime.
+ maxTime = minTime.Add((1<<33 - 1) * time.Second)
+)
+
+// WriteHeader writes hdr and prepares to accept the file's contents.
+// WriteHeader calls Flush if it is not the first header.
+// Calling after a Close will return ErrWriteAfterClose.
+func (tw *Writer) WriteHeader(hdr *Header) error {
+ return tw.writeHeader(hdr, true)
+}
+
+// WriteHeader writes hdr and prepares to accept the file's contents.
+// WriteHeader calls Flush if it is not the first header.
+// Calling after a Close will return ErrWriteAfterClose.
+// As this method is called internally by writePax header to allow it to
+// suppress writing the pax header.
+func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
+ if tw.closed {
+ return ErrWriteAfterClose
+ }
+ if tw.err == nil {
+ tw.Flush()
+ }
+ if tw.err != nil {
+ return tw.err
+ }
+
+ // a map to hold pax header records, if any are needed
+ paxHeaders := make(map[string]string)
+
+ // TODO(shanemhansen): we might want to use PAX headers for
+ // subsecond time resolution, but for now let's just capture
+ // too long fields or non ascii characters
+
+ var f formatter
+ var header []byte
+
+ // We need to select which scratch buffer to use carefully,
+ // since this method is called recursively to write PAX headers.
+ // If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
+ // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
+ // already being used by the non-recursive call, so we must use paxHdrBuff.
+ header = tw.hdrBuff[:]
+ if !allowPax {
+ header = tw.paxHdrBuff[:]
+ }
+ copy(header, zeroBlock)
+ s := slicer(header)
+
+ // Wrappers around formatter that automatically sets paxHeaders if the
+ // argument extends beyond the capacity of the input byte slice.
+ var formatString = func(b []byte, s string, paxKeyword string) {
+ needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
+ if needsPaxHeader {
+ paxHeaders[paxKeyword] = s
+ return
+ }
+ f.formatString(b, s)
+ }
+ var formatNumeric = func(b []byte, x int64, paxKeyword string) {
+ // Try octal first.
+ s := strconv.FormatInt(x, 8)
+ if len(s) < len(b) {
+ f.formatOctal(b, x)
+ return
+ }
+
+ // If it is too long for octal, and PAX is preferred, use a PAX header.
+ if paxKeyword != paxNone && tw.preferPax {
+ f.formatOctal(b, 0)
+ s := strconv.FormatInt(x, 10)
+ paxHeaders[paxKeyword] = s
+ return
+ }
+
+ tw.usedBinary = true
+ f.formatNumeric(b, x)
+ }
+
+ // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
+ pathHeaderBytes := s.next(fileNameSize)
+
+ formatString(pathHeaderBytes, hdr.Name, paxPath)
+
+ // Handle out of range ModTime carefully.
+ var modTime int64
+ if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) {
+ modTime = hdr.ModTime.Unix()
+ }
+
+ f.formatOctal(s.next(8), hdr.Mode) // 100:108
+ formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116
+ formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124
+ formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136
+ formatNumeric(s.next(12), modTime, paxNone) // 136:148 --- consider using pax for finer granularity
+ s.next(8) // chksum (148:156)
+ s.next(1)[0] = hdr.Typeflag // 156:157
+
+ formatString(s.next(100), hdr.Linkname, paxLinkpath)
+
+ copy(s.next(8), []byte("ustar\x0000")) // 257:265
+ formatString(s.next(32), hdr.Uname, paxUname) // 265:297
+ formatString(s.next(32), hdr.Gname, paxGname) // 297:329
+ formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337
+ formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345
+
+ // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
+ prefixHeaderBytes := s.next(155)
+ formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix
+
+ // Use the GNU magic instead of POSIX magic if we used any GNU extensions.
+ if tw.usedBinary {
+ copy(header[257:265], []byte("ustar \x00"))
+ }
+
+ _, paxPathUsed := paxHeaders[paxPath]
+ // try to use a ustar header when only the name is too long
+ if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
+ prefix, suffix, ok := splitUSTARPath(hdr.Name)
+ if ok {
+ // Since we can encode in USTAR format, disable PAX header.
+ delete(paxHeaders, paxPath)
+
+ // Update the path fields
+ formatString(pathHeaderBytes, suffix, paxNone)
+ formatString(prefixHeaderBytes, prefix, paxNone)
+ }
+ }
+
+ // The chksum field is terminated by a NUL and a space.
+ // This is different from the other octal fields.
+ chksum, _ := checksum(header)
+ f.formatOctal(header[148:155], chksum) // Never fails
+ header[155] = ' '
+
+ // Check if there were any formatting errors.
+ if f.err != nil {
+ tw.err = f.err
+ return tw.err
+ }
+
+ if allowPax {
+ for k, v := range hdr.Xattrs {
+ paxHeaders[paxXattr+k] = v
+ }
+ }
+
+ if len(paxHeaders) > 0 {
+ if !allowPax {
+ return errInvalidHeader
+ }
+ if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
+ return err
+ }
+ }
+ tw.nb = int64(hdr.Size)
+ tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
+
+ _, tw.err = tw.w.Write(header)
+ return tw.err
+}
+
+// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
+// If the path is not splittable, then it will return ("", "", false).
+func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
+ length := len(name)
+ if length <= fileNameSize || !isASCII(name) {
+ return "", "", false
+ } else if length > fileNamePrefixSize+1 {
+ length = fileNamePrefixSize + 1
+ } else if name[length-1] == '/' {
+ length--
+ }
+
+ i := strings.LastIndex(name[:length], "/")
+ nlen := len(name) - i - 1 // nlen is length of suffix
+ plen := i // plen is length of prefix
+ if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
+ return "", "", false
+ }
+ return name[:i], name[i+1:], true
+}
+
+// writePaxHeader writes an extended pax header to the
+// archive.
+func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
+ // Prepare extended header
+ ext := new(Header)
+ ext.Typeflag = TypeXHeader
+ // Setting ModTime is required for reader parsing to
+ // succeed, and seems harmless enough.
+ ext.ModTime = hdr.ModTime
+ // The spec asks that we namespace our pseudo files
+ // with the current pid. However, this results in differing outputs
+ // for identical inputs. As such, the constant 0 is now used instead.
+ // golang.org/issue/12358
+ dir, file := path.Split(hdr.Name)
+ fullName := path.Join(dir, "PaxHeaders.0", file)
+
+ ascii := toASCII(fullName)
+ if len(ascii) > 100 {
+ ascii = ascii[:100]
+ }
+ ext.Name = ascii
+ // Construct the body
+ var buf bytes.Buffer
+
+ // Keys are sorted before writing to body to allow deterministic output.
+ var keys []string
+ for k := range paxHeaders {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
+ }
+
+ ext.Size = int64(len(buf.Bytes()))
+ if err := tw.writeHeader(ext, false); err != nil {
+ return err
+ }
+ if _, err := tw.Write(buf.Bytes()); err != nil {
+ return err
+ }
+ if err := tw.Flush(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// formatPAXRecord formats a single PAX record, prefixing it with the
+// appropriate length.
+func formatPAXRecord(k, v string) string {
+ const padding = 3 // Extra padding for ' ', '=', and '\n'
+ size := len(k) + len(v) + padding
+ size += len(strconv.Itoa(size))
+ record := fmt.Sprintf("%d %s=%s\n", size, k, v)
+
+ // Final adjustment if adding size field increased the record size.
+ if len(record) != size {
+ size = len(record)
+ record = fmt.Sprintf("%d %s=%s\n", size, k, v)
+ }
+ return record
+}
+
+// Write writes to the current entry in the tar archive.
+// Write returns the error ErrWriteTooLong if more than
+// hdr.Size bytes are written after WriteHeader.
+func (tw *Writer) Write(b []byte) (n int, err error) {
+ if tw.closed {
+ err = ErrWriteAfterClose
+ return
+ }
+ overwrite := false
+ if int64(len(b)) > tw.nb {
+ b = b[0:tw.nb]
+ overwrite = true
+ }
+ n, err = tw.w.Write(b)
+ tw.nb -= int64(n)
+ if err == nil && overwrite {
+ err = ErrWriteTooLong
+ return
+ }
+ tw.err = err
+ return
+}
+
+// Close closes the tar archive, flushing any unwritten
+// data to the underlying writer.
+func (tw *Writer) Close() error {
+ if tw.err != nil || tw.closed {
+ return tw.err
+ }
+ tw.Flush()
+ tw.closed = true
+ if tw.err != nil {
+ return tw.err
+ }
+
+ // trailer: two zero blocks
+ for i := 0; i < 2; i++ {
+ _, tw.err = tw.w.Write(zeroBlock)
+ if tw.err != nil {
+ break
+ }
+ }
+ return tw.err
+}
diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/README.md b/vendor/github.com/vbatts/tar-split/tar/asm/README.md
new file mode 100644
index 000000000..2a3a5b56a
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/tar/asm/README.md
@@ -0,0 +1,44 @@
+asm
+===
+
+This library for assembly and disassembly of tar archives, facilitated by
+`github.com/vbatts/tar-split/tar/storage`.
+
+
+Concerns
+--------
+
+For completely safe assembly/disassembly, there will need to be a Content
+Addressable Storage (CAS) directory, that maps to a checksum in the
+`storage.Entity` of `storage.FileType`.
+
+This is due to the fact that tar archives _can_ allow multiple records for the
+same path, but the last one effectively wins. Even if the prior records had a
+different payload.
+
+In this way, when assembling an archive from relative paths, if the archive has
+multiple entries for the same path, then all payloads read in from a relative
+path would be identical.
+
+
+Thoughts
+--------
+
+Have a look-aside directory or storage. This way when a clobbering record is
+encountered from the tar stream, then the payload of the prior/existing file is
+stored to the CAS. This way the clobbering record's file payload can be
+extracted, but we'll have preserved the payload needed to reassemble a precise
+tar archive.
+
+clobbered/path/to/file.[0-N]
+
+*alternatively*
+
+We could just _not_ support tar streams that have clobbering file paths.
+Appending records to the archive is not incredibly common, and doesn't happen
+by default for most implementations. Not supporting them wouldn't be a
+security concern either, as if it did occur, we would reassemble an archive
+that doesn't validate signature/checksum, so it shouldn't be trusted anyway.
+
+Otherwise, this will allow us to defer support for appended files as a FUTURE FEATURE.
+
diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/assemble.go b/vendor/github.com/vbatts/tar-split/tar/asm/assemble.go
new file mode 100644
index 000000000..d624450ab
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/tar/asm/assemble.go
@@ -0,0 +1,130 @@
+package asm
+
+import (
+ "bytes"
+ "fmt"
+ "hash"
+ "hash/crc64"
+ "io"
+ "sync"
+
+ "github.com/vbatts/tar-split/tar/storage"
+)
+
+// NewOutputTarStream returns an io.ReadCloser that is an assembled tar archive
+// stream.
+//
+// It takes a storage.FileGetter, for mapping the file payloads that are to be read in,
+// and a storage.Unpacker, which has access to the rawbytes and file order
+// metadata. With the combination of these two items, a precise assembled Tar
+// archive is possible.
+func NewOutputTarStream(fg storage.FileGetter, up storage.Unpacker) io.ReadCloser {
+ // ... Since these are interfaces, this is possible, so let's not have a nil pointer
+ if fg == nil || up == nil {
+ return nil
+ }
+ pr, pw := io.Pipe()
+ go func() {
+ err := WriteOutputTarStream(fg, up, pw)
+ if err != nil {
+ pw.CloseWithError(err)
+ } else {
+ pw.Close()
+ }
+ }()
+ return pr
+}
+
+// WriteOutputTarStream writes assembled tar archive to a writer.
+func WriteOutputTarStream(fg storage.FileGetter, up storage.Unpacker, w io.Writer) error {
+ // ... Since these are interfaces, this is possible, so let's not have a nil pointer
+ if fg == nil || up == nil {
+ return nil
+ }
+ var copyBuffer []byte
+ var crcHash hash.Hash
+ var crcSum []byte
+ var multiWriter io.Writer
+ for {
+ entry, err := up.Next()
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ switch entry.Type {
+ case storage.SegmentType:
+ if _, err := w.Write(entry.Payload); err != nil {
+ return err
+ }
+ case storage.FileType:
+ if entry.Size == 0 {
+ continue
+ }
+ fh, err := fg.Get(entry.GetName())
+ if err != nil {
+ return err
+ }
+ if crcHash == nil {
+ crcHash = crc64.New(storage.CRCTable)
+ crcSum = make([]byte, 8)
+ multiWriter = io.MultiWriter(w, crcHash)
+ copyBuffer = byteBufferPool.Get().([]byte)
+ defer byteBufferPool.Put(copyBuffer)
+ } else {
+ crcHash.Reset()
+ }
+
+ if _, err := copyWithBuffer(multiWriter, fh, copyBuffer); err != nil {
+ fh.Close()
+ return err
+ }
+
+ if !bytes.Equal(crcHash.Sum(crcSum[:0]), entry.Payload) {
+ // I would rather this be a comparable ErrInvalidChecksum or such,
+ // but since it's coming through the PipeReader, the context of
+ // _which_ file would be lost...
+ fh.Close()
+ return fmt.Errorf("file integrity checksum failed for %q", entry.GetName())
+ }
+ fh.Close()
+ }
+ }
+}
+
+var byteBufferPool = &sync.Pool{
+ New: func() interface{} {
+ return make([]byte, 32*1024)
+ },
+}
+
+// copyWithBuffer is taken from stdlib io.Copy implementation
+// https://github.com/golang/go/blob/go1.5.1/src/io/io.go#L367
+func copyWithBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) {
+ for {
+ nr, er := src.Read(buf)
+ if nr > 0 {
+ nw, ew := dst.Write(buf[0:nr])
+ if nw > 0 {
+ written += int64(nw)
+ }
+ if ew != nil {
+ err = ew
+ break
+ }
+ if nr != nw {
+ err = io.ErrShortWrite
+ break
+ }
+ }
+ if er == io.EOF {
+ break
+ }
+ if er != nil {
+ err = er
+ break
+ }
+ }
+ return written, err
+}
diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go b/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go
new file mode 100644
index 000000000..54ef23aed
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go
@@ -0,0 +1,141 @@
+package asm
+
+import (
+ "io"
+ "io/ioutil"
+
+ "github.com/vbatts/tar-split/archive/tar"
+ "github.com/vbatts/tar-split/tar/storage"
+)
+
+// NewInputTarStream wraps the Reader stream of a tar archive and provides a
+// Reader stream of the same.
+//
+// In the middle it will pack the segments and file metadata to storage.Packer
+// `p`.
+//
+// The the storage.FilePutter is where payload of files in the stream are
+// stashed. If this stashing is not needed, you can provide a nil
+// storage.FilePutter. Since the checksumming is still needed, then a default
+// of NewDiscardFilePutter will be used internally
+func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io.Reader, error) {
+ // What to do here... folks will want their own access to the Reader that is
+ // their tar archive stream, but we'll need that same stream to use our
+ // forked 'archive/tar'.
+ // Perhaps do an io.TeeReader that hands back an io.Reader for them to read
+ // from, and we'll MITM the stream to store metadata.
+ // We'll need a storage.FilePutter too ...
+
+ // Another concern, whether to do any storage.FilePutter operations, such that we
+ // don't extract any amount of the archive. But then again, we're not making
+ // files/directories, hardlinks, etc. Just writing the io to the storage.FilePutter.
+ // Perhaps we have a DiscardFilePutter that is a bit bucket.
+
+ // we'll return the pipe reader, since TeeReader does not buffer and will
+ // only read what the outputRdr Read's. Since Tar archives have padding on
+ // the end, we want to be the one reading the padding, even if the user's
+ // `archive/tar` doesn't care.
+ pR, pW := io.Pipe()
+ outputRdr := io.TeeReader(r, pW)
+
+ // we need a putter that will generate the crc64 sums of file payloads
+ if fp == nil {
+ fp = storage.NewDiscardFilePutter()
+ }
+
+ go func() {
+ tr := tar.NewReader(outputRdr)
+ tr.RawAccounting = true
+ for {
+ hdr, err := tr.Next()
+ if err != nil {
+ if err != io.EOF {
+ pW.CloseWithError(err)
+ return
+ }
+ // even when an EOF is reached, there is often 1024 null bytes on
+ // the end of an archive. Collect them too.
+ if b := tr.RawBytes(); len(b) > 0 {
+ _, err := p.AddEntry(storage.Entry{
+ Type: storage.SegmentType,
+ Payload: b,
+ })
+ if err != nil {
+ pW.CloseWithError(err)
+ return
+ }
+ }
+ break // not return. We need the end of the reader.
+ }
+ if hdr == nil {
+ break // not return. We need the end of the reader.
+ }
+
+ if b := tr.RawBytes(); len(b) > 0 {
+ _, err := p.AddEntry(storage.Entry{
+ Type: storage.SegmentType,
+ Payload: b,
+ })
+ if err != nil {
+ pW.CloseWithError(err)
+ return
+ }
+ }
+
+ var csum []byte
+ if hdr.Size > 0 {
+ var err error
+ _, csum, err = fp.Put(hdr.Name, tr)
+ if err != nil {
+ pW.CloseWithError(err)
+ return
+ }
+ }
+
+ entry := storage.Entry{
+ Type: storage.FileType,
+ Size: hdr.Size,
+ Payload: csum,
+ }
+ // For proper marshalling of non-utf8 characters
+ entry.SetName(hdr.Name)
+
+ // File entries added, regardless of size
+ _, err = p.AddEntry(entry)
+ if err != nil {
+ pW.CloseWithError(err)
+ return
+ }
+
+ if b := tr.RawBytes(); len(b) > 0 {
+ _, err = p.AddEntry(storage.Entry{
+ Type: storage.SegmentType,
+ Payload: b,
+ })
+ if err != nil {
+ pW.CloseWithError(err)
+ return
+ }
+ }
+ }
+
+ // it is allowable, and not uncommon that there is further padding on the
+ // end of an archive, apart from the expected 1024 null bytes.
+ remainder, err := ioutil.ReadAll(outputRdr)
+ if err != nil && err != io.EOF {
+ pW.CloseWithError(err)
+ return
+ }
+ _, err = p.AddEntry(storage.Entry{
+ Type: storage.SegmentType,
+ Payload: remainder,
+ })
+ if err != nil {
+ pW.CloseWithError(err)
+ return
+ }
+ pW.Close()
+ }()
+
+ return pR, nil
+}
diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/doc.go b/vendor/github.com/vbatts/tar-split/tar/asm/doc.go
new file mode 100644
index 000000000..4367b9022
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/tar/asm/doc.go
@@ -0,0 +1,9 @@
+/*
+Package asm provides the API for streaming assembly and disassembly of tar
+archives.
+
+Using the `github.com/vbatts/tar-split/tar/storage` for Packing/Unpacking the
+metadata for a stream, as well as an implementation of Getting/Putting the file
+entries' payload.
+*/
+package asm
diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/doc.go b/vendor/github.com/vbatts/tar-split/tar/storage/doc.go
new file mode 100644
index 000000000..83f7089ff
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/tar/storage/doc.go
@@ -0,0 +1,12 @@
+/*
+Package storage is for metadata of a tar archive.
+
+Packing and unpacking the Entries of the stream. The types of streams are
+either segments of raw bytes (for the raw headers and various padding) and for
+an entry marking a file payload.
+
+The raw bytes are stored precisely in the packed (marshalled) Entry, whereas
+the file payload marker include the name of the file, size, and crc64 checksum
+(for basic file integrity).
+*/
+package storage
diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/entry.go b/vendor/github.com/vbatts/tar-split/tar/storage/entry.go
new file mode 100644
index 000000000..c91e7ea1e
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/tar/storage/entry.go
@@ -0,0 +1,78 @@
+package storage
+
+import "unicode/utf8"
+
+// Entries is for sorting by Position
+type Entries []Entry
+
+func (e Entries) Len() int { return len(e) }
+func (e Entries) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
+func (e Entries) Less(i, j int) bool { return e[i].Position < e[j].Position }
+
+// Type of Entry
+type Type int
+
+const (
+ // FileType represents a file payload from the tar stream.
+ //
+ // This will be used to map to relative paths on disk. Only Size > 0 will get
+ // read into a resulting output stream (due to hardlinks).
+ FileType Type = 1 + iota
+ // SegmentType represents a raw bytes segment from the archive stream. These raw
+ // byte segments consist of the raw headers and various padding.
+ //
+ // Its payload is to be marshalled base64 encoded.
+ SegmentType
+)
+
+// Entry is the structure for packing and unpacking the information read from
+// the Tar archive.
+//
+// FileType Payload checksum is using `hash/crc64` for basic file integrity,
+// _not_ for cryptography.
+// From http://www.backplane.com/matt/crc64.html, CRC32 has almost 40,000
+// collisions in a sample of 18.2 million, CRC64 had none.
+type Entry struct {
+ Type Type `json:"type"`
+ Name string `json:"name,omitempty"`
+ NameRaw []byte `json:"name_raw,omitempty"`
+ Size int64 `json:"size,omitempty"`
+ Payload []byte `json:"payload"` // SegmentType stores payload here; FileType stores crc64 checksum here;
+ Position int `json:"position"`
+}
+
+// SetName will check name for valid UTF-8 string, and set the appropriate
+// field. See https://github.com/vbatts/tar-split/issues/17
+func (e *Entry) SetName(name string) {
+ if utf8.ValidString(name) {
+ e.Name = name
+ } else {
+ e.NameRaw = []byte(name)
+ }
+}
+
+// SetNameBytes will check name for valid UTF-8 string, and set the appropriate
+// field
+func (e *Entry) SetNameBytes(name []byte) {
+ if utf8.Valid(name) {
+ e.Name = string(name)
+ } else {
+ e.NameRaw = name
+ }
+}
+
+// GetName returns the string for the entry's name, regardless of the field stored in
+func (e *Entry) GetName() string {
+ if len(e.NameRaw) > 0 {
+ return string(e.NameRaw)
+ }
+ return e.Name
+}
+
+// GetNameBytes returns the bytes for the entry's name, regardless of the field stored in
+func (e *Entry) GetNameBytes() []byte {
+ if len(e.NameRaw) > 0 {
+ return e.NameRaw
+ }
+ return []byte(e.Name)
+}
diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/getter.go b/vendor/github.com/vbatts/tar-split/tar/storage/getter.go
new file mode 100644
index 000000000..ae11f8ffd
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/tar/storage/getter.go
@@ -0,0 +1,104 @@
+package storage
+
+import (
+ "bytes"
+ "errors"
+ "hash/crc64"
+ "io"
+ "os"
+ "path/filepath"
+)
+
+// FileGetter is the interface for getting a stream of a file payload,
+// addressed by name/filename. Presumably, the names will be scoped to relative
+// file paths.
+type FileGetter interface {
+ // Get returns a stream for the provided file path
+ Get(filename string) (output io.ReadCloser, err error)
+}
+
+// FilePutter is the interface for storing a stream of a file payload,
+// addressed by name/filename.
+type FilePutter interface {
+ // Put returns the size of the stream received, and the crc64 checksum for
+ // the provided stream
+ Put(filename string, input io.Reader) (size int64, checksum []byte, err error)
+}
+
+// FileGetPutter is the interface that groups both Getting and Putting file
+// payloads.
+type FileGetPutter interface {
+ FileGetter
+ FilePutter
+}
+
+// NewPathFileGetter returns a FileGetter that is for files relative to path
+// relpath.
+func NewPathFileGetter(relpath string) FileGetter {
+ return &pathFileGetter{root: relpath}
+}
+
+type pathFileGetter struct {
+ root string
+}
+
+func (pfg pathFileGetter) Get(filename string) (io.ReadCloser, error) {
+ return os.Open(filepath.Join(pfg.root, filename))
+}
+
+type bufferFileGetPutter struct {
+ files map[string][]byte
+}
+
+func (bfgp bufferFileGetPutter) Get(name string) (io.ReadCloser, error) {
+ if _, ok := bfgp.files[name]; !ok {
+ return nil, errors.New("no such file")
+ }
+ b := bytes.NewBuffer(bfgp.files[name])
+ return &readCloserWrapper{b}, nil
+}
+
+func (bfgp *bufferFileGetPutter) Put(name string, r io.Reader) (int64, []byte, error) {
+ crc := crc64.New(CRCTable)
+ buf := bytes.NewBuffer(nil)
+ cw := io.MultiWriter(crc, buf)
+ i, err := io.Copy(cw, r)
+ if err != nil {
+ return 0, nil, err
+ }
+ bfgp.files[name] = buf.Bytes()
+ return i, crc.Sum(nil), nil
+}
+
+type readCloserWrapper struct {
+ io.Reader
+}
+
+func (w *readCloserWrapper) Close() error { return nil }
+
+// NewBufferFileGetPutter is a simple in-memory FileGetPutter
+//
+// Implication is this is memory intensive...
+// Probably best for testing or light weight cases.
+func NewBufferFileGetPutter() FileGetPutter {
+ return &bufferFileGetPutter{
+ files: map[string][]byte{},
+ }
+}
+
+// NewDiscardFilePutter is a bit bucket FilePutter
+func NewDiscardFilePutter() FilePutter {
+ return &bitBucketFilePutter{}
+}
+
+type bitBucketFilePutter struct {
+}
+
+func (bbfp *bitBucketFilePutter) Put(name string, r io.Reader) (int64, []byte, error) {
+ c := crc64.New(CRCTable)
+ i, err := io.Copy(c, r)
+ return i, c.Sum(nil), err
+}
+
+// CRCTable is the default table used for crc64 sum calculations
+var CRCTable = crc64.MakeTable(crc64.ISO)
diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/packer.go b/vendor/github.com/vbatts/tar-split/tar/storage/packer.go
new file mode 100644
index 000000000..aba694818
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/tar/storage/packer.go
@@ -0,0 +1,127 @@
+package storage
+
+import (
+ "encoding/json"
+ "errors"
+ "io"
+ "path/filepath"
+ "unicode/utf8"
+)
+
+// ErrDuplicatePath occurs when a tar archive has more than one entry for the
+// same file path
+var ErrDuplicatePath = errors.New("duplicates of file paths not supported")
+
+// Packer describes the methods to pack Entries to a storage destination
+type Packer interface {
+ // AddEntry packs the Entry and returns its position
+ AddEntry(e Entry) (int, error)
+}
+
+// Unpacker describes the methods to read Entries from a source
+type Unpacker interface {
+ // Next returns the next Entry being unpacked, or error, until io.EOF
+ Next() (*Entry, error)
+}
+
+/* TODO(vbatts) figure out a good model for this
+type PackUnpacker interface {
+ Packer
+ Unpacker
+}
+*/
+
+type jsonUnpacker struct {
+ seen seenNames
+ dec *json.Decoder
+}
+
+func (jup *jsonUnpacker) Next() (*Entry, error) {
+ var e Entry
+ err := jup.dec.Decode(&e)
+ if err != nil {
+ return nil, err
+ }
+
+ // check for dup name
+ if e.Type == FileType {
+ cName := filepath.Clean(e.GetName())
+ if _, ok := jup.seen[cName]; ok {
+ return nil, ErrDuplicatePath
+ }
+ jup.seen[cName] = struct{}{}
+ }
+
+ return &e, err
+}
+
+// NewJSONUnpacker provides an Unpacker that reads Entries (SegmentType and
+// FileType) as a json document.
+//
+// Each Entry read are expected to be delimited by new line.
+func NewJSONUnpacker(r io.Reader) Unpacker {
+ return &jsonUnpacker{
+ dec: json.NewDecoder(r),
+ seen: seenNames{},
+ }
+}
+
+type jsonPacker struct {
+ w io.Writer
+ e *json.Encoder
+ pos int
+ seen seenNames
+}
+
+type seenNames map[string]struct{}
+
+func (jp *jsonPacker) AddEntry(e Entry) (int, error) {
+ // if Name is not valid utf8, switch it to raw first.
+ if e.Name != "" {
+ if !utf8.ValidString(e.Name) {
+ e.NameRaw = []byte(e.Name)
+ e.Name = ""
+ }
+ }
+
+ // check early for dup name
+ if e.Type == FileType {
+ cName := filepath.Clean(e.GetName())
+ if _, ok := jp.seen[cName]; ok {
+ return -1, ErrDuplicatePath
+ }
+ jp.seen[cName] = struct{}{}
+ }
+
+ e.Position = jp.pos
+ err := jp.e.Encode(e)
+ if err != nil {
+ return -1, err
+ }
+
+ // made it this far, increment now
+ jp.pos++
+ return e.Position, nil
+}
+
+// NewJSONPacker provides a Packer that writes each Entry (SegmentType and
+// FileType) as a json document.
+//
+// The Entries are delimited by new line.
+func NewJSONPacker(w io.Writer) Packer {
+ return &jsonPacker{
+ w: w,
+ e: json.NewEncoder(w),
+ seen: seenNames{},
+ }
+}
+
+/*
+TODO(vbatts) perhaps have a more compact packer/unpacker, maybe using msgapck
+(https://github.com/ugorji/go)
+
+
+Even though, since our jsonUnpacker and jsonPacker just take
+io.Reader/io.Writer, then we can get away with passing them a
+gzip.Reader/gzip.Writer
+*/
diff --git a/vendor/github.com/vishvananda/netlink/LICENSE b/vendor/github.com/vishvananda/netlink/LICENSE
new file mode 100644
index 000000000..9f64db858
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/LICENSE
@@ -0,0 +1,192 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Vishvananda Ishaya.
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/vishvananda/netlink/README.md b/vendor/github.com/vishvananda/netlink/README.md
new file mode 100644
index 000000000..0b61be217
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/README.md
@@ -0,0 +1,91 @@
+# netlink - netlink library for go #
+
+[![Build Status](https://travis-ci.org/vishvananda/netlink.png?branch=master)](https://travis-ci.org/vishvananda/netlink) [![GoDoc](https://godoc.org/github.com/vishvananda/netlink?status.svg)](https://godoc.org/github.com/vishvananda/netlink)
+
+The netlink package provides a simple netlink library for go. Netlink
+is the interface a user-space program in linux uses to communicate with
+the kernel. It can be used to add and remove interfaces, set ip addresses
+and routes, and configure ipsec. Netlink communication requires elevated
+privileges, so in most cases this code needs to be run as root. Since
+low-level netlink messages are inscrutable at best, the library attempts
+to provide an api that is loosely modeled on the CLI provided by iproute2.
+Actions like `ip link add` will be accomplished via a similarly named
+function like AddLink(). This library began its life as a fork of the
+netlink functionality in
+[docker/libcontainer](https://github.com/docker/libcontainer) but was
+heavily rewritten to improve testability, performance, and to add new
+functionality like ipsec xfrm handling.
+
+## Local Build and Test ##
+
+You can use go get command:
+
+ go get github.com/vishvananda/netlink
+
+Testing dependencies:
+
+ go get github.com/vishvananda/netns
+
+Testing (requires root):
+
+ sudo -E go test github.com/vishvananda/netlink
+
+## Examples ##
+
+Add a new bridge and add eth1 into it:
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/vishvananda/netlink"
+)
+
+func main() {
+ la := netlink.NewLinkAttrs()
+ la.Name = "foo"
+ mybridge := &netlink.Bridge{LinkAttrs: la}
+ err := netlink.LinkAdd(mybridge)
+ if err != nil {
+ fmt.Printf("could not add %s: %v\n", la.Name, err)
+ }
+ eth1, _ := netlink.LinkByName("eth1")
+ netlink.LinkSetMaster(eth1, mybridge)
+}
+
+```
+Note `NewLinkAttrs` constructor, it sets default values in structure. For now
+it sets only `TxQLen` to `-1`, so kernel will set default by itself. If you're
+using simple initialization(`LinkAttrs{Name: "foo"}`) `TxQLen` will be set to
+`0` unless you specify it like `LinkAttrs{Name: "foo", TxQLen: 1000}`.
+
+Add a new ip address to loopback:
+
+```go
+package main
+
+import (
+ "github.com/vishvananda/netlink"
+)
+
+func main() {
+ lo, _ := netlink.LinkByName("lo")
+ addr, _ := netlink.ParseAddr("169.254.169.254/32")
+ netlink.AddrAdd(lo, addr)
+}
+
+```
+
+## Future Work ##
+
+Many pieces of netlink are not yet fully supported in the high-level
+interface. Aspects of virtually all of the high-level objects don't exist.
+Many of the underlying primitives are there, so its a matter of putting
+the right fields into the high-level objects and making sure that they
+are serialized and deserialized correctly in the Add and List methods.
+
+There are also a few pieces of low level netlink functionality that still
+need to be implemented. Routing rules are not in place and some of the
+more advanced link types. Hopefully there is decent structure and testing
+in place to make these fairly straightforward to add.
diff --git a/vendor/github.com/vishvananda/netlink/addr.go b/vendor/github.com/vishvananda/netlink/addr.go
new file mode 100644
index 000000000..f08c95696
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/addr.go
@@ -0,0 +1,56 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// Addr represents an IP address from netlink. Netlink ip addresses
+// include a mask, so it stores the address as a net.IPNet.
+type Addr struct {
+ *net.IPNet
+ Label string
+ Flags int
+ Scope int
+ Peer *net.IPNet
+ Broadcast net.IP
+ PreferedLft int
+ ValidLft int
+}
+
+// String returns $ip/$netmask $label
+func (a Addr) String() string {
+ return strings.TrimSpace(fmt.Sprintf("%s %s", a.IPNet, a.Label))
+}
+
+// ParseAddr parses the string representation of an address in the
+// form $ip/$netmask $label. The label portion is optional
+func ParseAddr(s string) (*Addr, error) {
+ label := ""
+ parts := strings.Split(s, " ")
+ if len(parts) > 1 {
+ s = parts[0]
+ label = parts[1]
+ }
+ m, err := ParseIPNet(s)
+ if err != nil {
+ return nil, err
+ }
+ return &Addr{IPNet: m, Label: label}, nil
+}
+
+// Equal returns true if both Addrs have the same net.IPNet value.
+func (a Addr) Equal(x Addr) bool {
+ sizea, _ := a.Mask.Size()
+ sizeb, _ := x.Mask.Size()
+ // ignore label for comparison
+ return a.IP.Equal(x.IP) && sizea == sizeb
+}
+
+func (a Addr) PeerEqual(x Addr) bool {
+ sizea, _ := a.Peer.Mask.Size()
+ sizeb, _ := x.Peer.Mask.Size()
+ // ignore label for comparison
+ return a.Peer.IP.Equal(x.Peer.IP) && sizea == sizeb
+}
diff --git a/vendor/github.com/vishvananda/netlink/addr_linux.go b/vendor/github.com/vishvananda/netlink/addr_linux.go
new file mode 100644
index 000000000..f33242a7c
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/addr_linux.go
@@ -0,0 +1,288 @@
+package netlink
+
+import (
+ "fmt"
+ "log"
+ "net"
+ "strings"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+ "github.com/vishvananda/netns"
+)
+
+// IFA_FLAGS is a u32 attribute.
+const IFA_FLAGS = 0x8
+
+// AddrAdd will add an IP address to a link device.
+// Equivalent to: `ip addr add $addr dev $link`
+func AddrAdd(link Link, addr *Addr) error {
+ return pkgHandle.AddrAdd(link, addr)
+}
+
+// AddrAdd will add an IP address to a link device.
+// Equivalent to: `ip addr add $addr dev $link`
+func (h *Handle) AddrAdd(link Link, addr *Addr) error {
+ req := h.newNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ return h.addrHandle(link, addr, req)
+}
+
+// AddrReplace will replace (or, if not present, add) an IP address on a link device.
+// Equivalent to: `ip addr replace $addr dev $link`
+func AddrReplace(link Link, addr *Addr) error {
+ return pkgHandle.AddrReplace(link, addr)
+}
+
+// AddrReplace will replace (or, if not present, add) an IP address on a link device.
+// Equivalent to: `ip addr replace $addr dev $link`
+func (h *Handle) AddrReplace(link Link, addr *Addr) error {
+ req := h.newNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE|syscall.NLM_F_ACK)
+ return h.addrHandle(link, addr, req)
+}
+
+// AddrDel will delete an IP address from a link device.
+// Equivalent to: `ip addr del $addr dev $link`
+func AddrDel(link Link, addr *Addr) error {
+ return pkgHandle.AddrDel(link, addr)
+}
+
+// AddrDel will delete an IP address from a link device.
+// Equivalent to: `ip addr del $addr dev $link`
+func (h *Handle) AddrDel(link Link, addr *Addr) error {
+ req := h.newNetlinkRequest(syscall.RTM_DELADDR, syscall.NLM_F_ACK)
+ return h.addrHandle(link, addr, req)
+}
+
+func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error {
+ base := link.Attrs()
+ if addr.Label != "" && !strings.HasPrefix(addr.Label, base.Name) {
+ return fmt.Errorf("label must begin with interface name")
+ }
+ h.ensureIndex(base)
+
+ family := nl.GetIPFamily(addr.IP)
+
+ msg := nl.NewIfAddrmsg(family)
+ msg.Index = uint32(base.Index)
+ msg.Scope = uint8(addr.Scope)
+ prefixlen, _ := addr.Mask.Size()
+ msg.Prefixlen = uint8(prefixlen)
+ req.AddData(msg)
+
+ var localAddrData []byte
+ if family == FAMILY_V4 {
+ localAddrData = addr.IP.To4()
+ } else {
+ localAddrData = addr.IP.To16()
+ }
+
+ localData := nl.NewRtAttr(syscall.IFA_LOCAL, localAddrData)
+ req.AddData(localData)
+ var peerAddrData []byte
+ if addr.Peer != nil {
+ if family == FAMILY_V4 {
+ peerAddrData = addr.Peer.IP.To4()
+ } else {
+ peerAddrData = addr.Peer.IP.To16()
+ }
+ } else {
+ peerAddrData = localAddrData
+ }
+
+ addressData := nl.NewRtAttr(syscall.IFA_ADDRESS, peerAddrData)
+ req.AddData(addressData)
+
+ if addr.Flags != 0 {
+ if addr.Flags <= 0xff {
+ msg.IfAddrmsg.Flags = uint8(addr.Flags)
+ } else {
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(addr.Flags))
+ flagsData := nl.NewRtAttr(IFA_FLAGS, b)
+ req.AddData(flagsData)
+ }
+ }
+
+ if addr.Broadcast != nil {
+ req.AddData(nl.NewRtAttr(syscall.IFA_BROADCAST, addr.Broadcast))
+ }
+
+ if addr.Label != "" {
+ labelData := nl.NewRtAttr(syscall.IFA_LABEL, nl.ZeroTerminated(addr.Label))
+ req.AddData(labelData)
+ }
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// AddrList gets a list of IP addresses in the system.
+// Equivalent to: `ip addr show`.
+// The list can be filtered by link and ip family.
+func AddrList(link Link, family int) ([]Addr, error) {
+ return pkgHandle.AddrList(link, family)
+}
+
+// AddrList gets a list of IP addresses in the system.
+// Equivalent to: `ip addr show`.
+// The list can be filtered by link and ip family.
+func (h *Handle) AddrList(link Link, family int) ([]Addr, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETADDR, syscall.NLM_F_DUMP)
+ msg := nl.NewIfInfomsg(family)
+ req.AddData(msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWADDR)
+ if err != nil {
+ return nil, err
+ }
+
+ indexFilter := 0
+ if link != nil {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ indexFilter = base.Index
+ }
+
+ var res []Addr
+ for _, m := range msgs {
+ addr, msgFamily, ifindex, err := parseAddr(m)
+ if err != nil {
+ return res, err
+ }
+
+ if link != nil && ifindex != indexFilter {
+ // Ignore messages from other interfaces
+ continue
+ }
+
+ if family != FAMILY_ALL && msgFamily != family {
+ continue
+ }
+
+ res = append(res, addr)
+ }
+
+ return res, nil
+}
+
+func parseAddr(m []byte) (addr Addr, family, index int, err error) {
+ msg := nl.DeserializeIfAddrmsg(m)
+
+ family = -1
+ index = -1
+
+ attrs, err1 := nl.ParseRouteAttr(m[msg.Len():])
+ if err1 != nil {
+ err = err1
+ return
+ }
+
+ family = int(msg.Family)
+ index = int(msg.Index)
+
+ var local, dst *net.IPNet
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case syscall.IFA_ADDRESS:
+ dst = &net.IPNet{
+ IP: attr.Value,
+ Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),
+ }
+ addr.Peer = dst
+ case syscall.IFA_LOCAL:
+ local = &net.IPNet{
+ IP: attr.Value,
+ Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),
+ }
+ addr.IPNet = local
+ case syscall.IFA_BROADCAST:
+ addr.Broadcast = attr.Value
+ case syscall.IFA_LABEL:
+ addr.Label = string(attr.Value[:len(attr.Value)-1])
+ case IFA_FLAGS:
+ addr.Flags = int(native.Uint32(attr.Value[0:4]))
+ case nl.IFA_CACHEINFO:
+ ci := nl.DeserializeIfaCacheInfo(attr.Value)
+ addr.PreferedLft = int(ci.IfaPrefered)
+ addr.ValidLft = int(ci.IfaValid)
+ }
+ }
+
+ // IFA_LOCAL should be there but if not, fall back to IFA_ADDRESS
+ if local != nil {
+ addr.IPNet = local
+ } else {
+ addr.IPNet = dst
+ }
+ addr.Scope = int(msg.Scope)
+
+ return
+}
+
+type AddrUpdate struct {
+ LinkAddress net.IPNet
+ LinkIndex int
+ Flags int
+ Scope int
+ PreferedLft int
+ ValidLft int
+ NewAddr bool // true=added false=deleted
+}
+
+// AddrSubscribe takes a chan down which notifications will be sent
+// when addresses change. Close the 'done' chan to stop subscription.
+func AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error {
+ return addrSubscribe(netns.None(), netns.None(), ch, done)
+}
+
+// AddrSubscribeAt works like AddrSubscribe plus it allows the caller
+// to choose the network namespace in which to subscribe (ns).
+func AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error {
+ return addrSubscribe(ns, netns.None(), ch, done)
+}
+
+func addrSubscribe(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error {
+ s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_IFADDR, syscall.RTNLGRP_IPV6_IFADDR)
+ if err != nil {
+ return err
+ }
+ if done != nil {
+ go func() {
+ <-done
+ s.Close()
+ }()
+ }
+ go func() {
+ defer close(ch)
+ for {
+ msgs, err := s.Receive()
+ if err != nil {
+ log.Printf("netlink.AddrSubscribe: Receive() error: %v", err)
+ return
+ }
+ for _, m := range msgs {
+ msgType := m.Header.Type
+ if msgType != syscall.RTM_NEWADDR && msgType != syscall.RTM_DELADDR {
+ log.Printf("netlink.AddrSubscribe: bad message type: %d", msgType)
+ continue
+ }
+
+ addr, _, ifindex, err := parseAddr(m.Data)
+ if err != nil {
+ log.Printf("netlink.AddrSubscribe: could not parse address: %v", err)
+ continue
+ }
+
+ ch <- AddrUpdate{LinkAddress: *addr.IPNet,
+ LinkIndex: ifindex,
+ NewAddr: msgType == syscall.RTM_NEWADDR,
+ Flags: addr.Flags,
+ Scope: addr.Scope,
+ PreferedLft: addr.PreferedLft,
+ ValidLft: addr.ValidLft}
+ }
+ }
+ }()
+
+ return nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/bpf_linux.go b/vendor/github.com/vishvananda/netlink/bpf_linux.go
new file mode 100644
index 000000000..533743987
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/bpf_linux.go
@@ -0,0 +1,62 @@
+package netlink
+
+/*
+#include <asm/types.h>
+#include <asm/unistd.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <unistd.h>
+
+static int load_simple_bpf(int prog_type, int ret) {
+#ifdef __NR_bpf
+ // { return ret; }
+ __u64 __attribute__((aligned(8))) insns[] = {
+ 0x00000000000000b7ull | ((__u64)ret<<32),
+ 0x0000000000000095ull,
+ };
+ __u8 __attribute__((aligned(8))) license[] = "ASL2";
+ // Copied from a header file since libc is notoriously slow to update.
+ // The call will succeed or fail and that will be our indication on
+ // whether or not it is supported.
+ struct {
+ __u32 prog_type;
+ __u32 insn_cnt;
+ __u64 insns;
+ __u64 license;
+ __u32 log_level;
+ __u32 log_size;
+ __u64 log_buf;
+ __u32 kern_version;
+ } __attribute__((aligned(8))) attr = {
+ .prog_type = prog_type,
+ .insn_cnt = 2,
+ .insns = (uintptr_t)&insns,
+ .license = (uintptr_t)&license,
+ };
+ return syscall(__NR_bpf, 5, &attr, sizeof(attr));
+#else
+ errno = EINVAL;
+ return -1;
+#endif
+}
+*/
+import "C"
+
+type BpfProgType C.int
+
+const (
+ BPF_PROG_TYPE_UNSPEC BpfProgType = iota
+ BPF_PROG_TYPE_SOCKET_FILTER
+ BPF_PROG_TYPE_KPROBE
+ BPF_PROG_TYPE_SCHED_CLS
+ BPF_PROG_TYPE_SCHED_ACT
+ BPF_PROG_TYPE_TRACEPOINT
+ BPF_PROG_TYPE_XDP
+)
+
+// loadSimpleBpf loads a trivial bpf program for testing purposes
+func loadSimpleBpf(progType BpfProgType, ret int) (int, error) {
+ fd, err := C.load_simple_bpf(C.int(progType), C.int(ret))
+ return int(fd), err
+}
diff --git a/vendor/github.com/vishvananda/netlink/bridge_linux.go b/vendor/github.com/vishvananda/netlink/bridge_linux.go
new file mode 100644
index 000000000..a65d6a131
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/bridge_linux.go
@@ -0,0 +1,115 @@
+package netlink
+
+import (
+ "fmt"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+// BridgeVlanList gets a map of device id to bridge vlan infos.
+// Equivalent to: `bridge vlan show`
+func BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) {
+ return pkgHandle.BridgeVlanList()
+}
+
+// BridgeVlanList gets a map of device id to bridge vlan infos.
+// Equivalent to: `bridge vlan show`
+func (h *Handle) BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP)
+ msg := nl.NewIfInfomsg(syscall.AF_BRIDGE)
+ req.AddData(msg)
+ req.AddData(nl.NewRtAttr(nl.IFLA_EXT_MASK, nl.Uint32Attr(uint32(nl.RTEXT_FILTER_BRVLAN))))
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWLINK)
+ if err != nil {
+ return nil, err
+ }
+ ret := make(map[int32][]*nl.BridgeVlanInfo)
+ for _, m := range msgs {
+ msg := nl.DeserializeIfInfomsg(m)
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.IFLA_AF_SPEC:
+ //nested attr
+ nestAttrs, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse nested attr %v", err)
+ }
+ for _, nestAttr := range nestAttrs {
+ switch nestAttr.Attr.Type {
+ case nl.IFLA_BRIDGE_VLAN_INFO:
+ vlanInfo := nl.DeserializeBridgeVlanInfo(nestAttr.Value)
+ ret[msg.Index] = append(ret[msg.Index], vlanInfo)
+ }
+ }
+ }
+ }
+ }
+ return ret, nil
+}
+
+// BridgeVlanAdd adds a new vlan filter entry
+// Equivalent to: `bridge vlan add dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]`
+func BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) error {
+ return pkgHandle.BridgeVlanAdd(link, vid, pvid, untagged, self, master)
+}
+
+// BridgeVlanAdd adds a new vlan filter entry
+// Equivalent to: `bridge vlan add dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]`
+func (h *Handle) BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) error {
+ return h.bridgeVlanModify(syscall.RTM_SETLINK, link, vid, pvid, untagged, self, master)
+}
+
+// BridgeVlanDel adds a new vlan filter entry
+// Equivalent to: `bridge vlan del dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]`
+func BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) error {
+ return pkgHandle.BridgeVlanDel(link, vid, pvid, untagged, self, master)
+}
+
+// BridgeVlanDel adds a new vlan filter entry
+// Equivalent to: `bridge vlan del dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]`
+func (h *Handle) BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) error {
+ return h.bridgeVlanModify(syscall.RTM_DELLINK, link, vid, pvid, untagged, self, master)
+}
+
+func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged, self, master bool) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(cmd, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_BRIDGE)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ br := nl.NewRtAttr(nl.IFLA_AF_SPEC, nil)
+ var flags uint16
+ if self {
+ flags |= nl.BRIDGE_FLAGS_SELF
+ }
+ if master {
+ flags |= nl.BRIDGE_FLAGS_MASTER
+ }
+ if flags > 0 {
+ nl.NewRtAttrChild(br, nl.IFLA_BRIDGE_FLAGS, nl.Uint16Attr(flags))
+ }
+ vlanInfo := &nl.BridgeVlanInfo{Vid: vid}
+ if pvid {
+ vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_PVID
+ }
+ if untagged {
+ vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_UNTAGGED
+ }
+ nl.NewRtAttrChild(br, nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize())
+ req.AddData(br)
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/class.go b/vendor/github.com/vishvananda/netlink/class.go
new file mode 100644
index 000000000..8ee13af48
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/class.go
@@ -0,0 +1,78 @@
+package netlink
+
+import (
+ "fmt"
+)
+
+type Class interface {
+ Attrs() *ClassAttrs
+ Type() string
+}
+
+// ClassAttrs represents a netlink class. A filter is associated with a link,
+// has a handle and a parent. The root filter of a device should have a
+// parent == HANDLE_ROOT.
+type ClassAttrs struct {
+ LinkIndex int
+ Handle uint32
+ Parent uint32
+ Leaf uint32
+}
+
+func (q ClassAttrs) String() string {
+ return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Leaf: %d}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Leaf)
+}
+
+type HtbClassAttrs struct {
+ // TODO handle all attributes
+ Rate uint64
+ Ceil uint64
+ Buffer uint32
+ Cbuffer uint32
+ Quantum uint32
+ Level uint32
+ Prio uint32
+}
+
+func (q HtbClassAttrs) String() string {
+ return fmt.Sprintf("{Rate: %d, Ceil: %d, Buffer: %d, Cbuffer: %d}", q.Rate, q.Ceil, q.Buffer, q.Cbuffer)
+}
+
+// HtbClass represents an Htb class
+type HtbClass struct {
+ ClassAttrs
+ Rate uint64
+ Ceil uint64
+ Buffer uint32
+ Cbuffer uint32
+ Quantum uint32
+ Level uint32
+ Prio uint32
+}
+
+func (q HtbClass) String() string {
+ return fmt.Sprintf("{Rate: %d, Ceil: %d, Buffer: %d, Cbuffer: %d}", q.Rate, q.Ceil, q.Buffer, q.Cbuffer)
+}
+
+func (q *HtbClass) Attrs() *ClassAttrs {
+ return &q.ClassAttrs
+}
+
+func (q *HtbClass) Type() string {
+ return "htb"
+}
+
+// GenericClass classes represent types that are not currently understood
+// by this netlink library.
+type GenericClass struct {
+ ClassAttrs
+ ClassType string
+}
+
+func (class *GenericClass) Attrs() *ClassAttrs {
+ return &class.ClassAttrs
+}
+
+func (class *GenericClass) Type() string {
+ return class.ClassType
+}
diff --git a/vendor/github.com/vishvananda/netlink/class_linux.go b/vendor/github.com/vishvananda/netlink/class_linux.go
new file mode 100644
index 000000000..91cd3883d
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/class_linux.go
@@ -0,0 +1,254 @@
+package netlink
+
+import (
+ "errors"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+// NOTE: function is in here because it uses other linux functions
+func NewHtbClass(attrs ClassAttrs, cattrs HtbClassAttrs) *HtbClass {
+ mtu := 1600
+ rate := cattrs.Rate / 8
+ ceil := cattrs.Ceil / 8
+ buffer := cattrs.Buffer
+ cbuffer := cattrs.Cbuffer
+
+ if ceil == 0 {
+ ceil = rate
+ }
+
+ if buffer == 0 {
+ buffer = uint32(float64(rate)/Hz() + float64(mtu))
+ }
+ buffer = uint32(Xmittime(rate, buffer))
+
+ if cbuffer == 0 {
+ cbuffer = uint32(float64(ceil)/Hz() + float64(mtu))
+ }
+ cbuffer = uint32(Xmittime(ceil, cbuffer))
+
+ return &HtbClass{
+ ClassAttrs: attrs,
+ Rate: rate,
+ Ceil: ceil,
+ Buffer: buffer,
+ Cbuffer: cbuffer,
+ Quantum: 10,
+ Level: 0,
+ Prio: 0,
+ }
+}
+
+// ClassDel will delete a class from the system.
+// Equivalent to: `tc class del $class`
+func ClassDel(class Class) error {
+ return pkgHandle.ClassDel(class)
+}
+
+// ClassDel will delete a class from the system.
+// Equivalent to: `tc class del $class`
+func (h *Handle) ClassDel(class Class) error {
+ return h.classModify(syscall.RTM_DELTCLASS, 0, class)
+}
+
+// ClassChange will change a class in place
+// Equivalent to: `tc class change $class`
+// The parent and handle MUST NOT be changed.
+func ClassChange(class Class) error {
+ return pkgHandle.ClassChange(class)
+}
+
+// ClassChange will change a class in place
+// Equivalent to: `tc class change $class`
+// The parent and handle MUST NOT be changed.
+func (h *Handle) ClassChange(class Class) error {
+ return h.classModify(syscall.RTM_NEWTCLASS, 0, class)
+}
+
+// ClassReplace will replace a class to the system.
+// quivalent to: `tc class replace $class`
+// The handle MAY be changed.
+// If a class already exist with this parent/handle pair, the class is changed.
+// If a class does not already exist with this parent/handle, a new class is created.
+func ClassReplace(class Class) error {
+ return pkgHandle.ClassReplace(class)
+}
+
+// ClassReplace will replace a class to the system.
+// quivalent to: `tc class replace $class`
+// The handle MAY be changed.
+// If a class already exist with this parent/handle pair, the class is changed.
+// If a class does not already exist with this parent/handle, a new class is created.
+func (h *Handle) ClassReplace(class Class) error {
+ return h.classModify(syscall.RTM_NEWTCLASS, syscall.NLM_F_CREATE, class)
+}
+
+// ClassAdd will add a class to the system.
+// Equivalent to: `tc class add $class`
+func ClassAdd(class Class) error {
+ return pkgHandle.ClassAdd(class)
+}
+
+// ClassAdd will add a class to the system.
+// Equivalent to: `tc class add $class`
+func (h *Handle) ClassAdd(class Class) error {
+ return h.classModify(
+ syscall.RTM_NEWTCLASS,
+ syscall.NLM_F_CREATE|syscall.NLM_F_EXCL,
+ class,
+ )
+}
+
+func (h *Handle) classModify(cmd, flags int, class Class) error {
+ req := h.newNetlinkRequest(cmd, flags|syscall.NLM_F_ACK)
+ base := class.Attrs()
+ msg := &nl.TcMsg{
+ Family: nl.FAMILY_ALL,
+ Ifindex: int32(base.LinkIndex),
+ Handle: base.Handle,
+ Parent: base.Parent,
+ }
+ req.AddData(msg)
+
+ if cmd != syscall.RTM_DELTCLASS {
+ if err := classPayload(req, class); err != nil {
+ return err
+ }
+ }
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+func classPayload(req *nl.NetlinkRequest, class Class) error {
+ req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(class.Type())))
+
+ options := nl.NewRtAttr(nl.TCA_OPTIONS, nil)
+ if htb, ok := class.(*HtbClass); ok {
+ opt := nl.TcHtbCopt{}
+ opt.Buffer = htb.Buffer
+ opt.Cbuffer = htb.Cbuffer
+ opt.Quantum = htb.Quantum
+ opt.Level = htb.Level
+ opt.Prio = htb.Prio
+ // TODO: Handle Debug properly. For now default to 0
+ /* Calculate {R,C}Tab and set Rate and Ceil */
+ cellLog := -1
+ ccellLog := -1
+ linklayer := nl.LINKLAYER_ETHERNET
+ mtu := 1600
+ var rtab [256]uint32
+ var ctab [256]uint32
+ tcrate := nl.TcRateSpec{Rate: uint32(htb.Rate)}
+ if CalcRtable(&tcrate, rtab, cellLog, uint32(mtu), linklayer) < 0 {
+ return errors.New("HTB: failed to calculate rate table")
+ }
+ opt.Rate = tcrate
+ tcceil := nl.TcRateSpec{Rate: uint32(htb.Ceil)}
+ if CalcRtable(&tcceil, ctab, ccellLog, uint32(mtu), linklayer) < 0 {
+ return errors.New("HTB: failed to calculate ceil rate table")
+ }
+ opt.Ceil = tcceil
+ nl.NewRtAttrChild(options, nl.TCA_HTB_PARMS, opt.Serialize())
+ nl.NewRtAttrChild(options, nl.TCA_HTB_RTAB, SerializeRtab(rtab))
+ nl.NewRtAttrChild(options, nl.TCA_HTB_CTAB, SerializeRtab(ctab))
+ }
+ req.AddData(options)
+ return nil
+}
+
+// ClassList gets a list of classes in the system.
+// Equivalent to: `tc class show`.
+// Generally returns nothing if link and parent are not specified.
+func ClassList(link Link, parent uint32) ([]Class, error) {
+ return pkgHandle.ClassList(link, parent)
+}
+
+// ClassList gets a list of classes in the system.
+// Equivalent to: `tc class show`.
+// Generally returns nothing if link and parent are not specified.
+func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETTCLASS, syscall.NLM_F_DUMP)
+ msg := &nl.TcMsg{
+ Family: nl.FAMILY_ALL,
+ Parent: parent,
+ }
+ if link != nil {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ msg.Ifindex = int32(base.Index)
+ }
+ req.AddData(msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWTCLASS)
+ if err != nil {
+ return nil, err
+ }
+
+ var res []Class
+ for _, m := range msgs {
+ msg := nl.DeserializeTcMsg(m)
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ base := ClassAttrs{
+ LinkIndex: int(msg.Ifindex),
+ Handle: msg.Handle,
+ Parent: msg.Parent,
+ }
+
+ var class Class
+ classType := ""
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.TCA_KIND:
+ classType = string(attr.Value[:len(attr.Value)-1])
+ switch classType {
+ case "htb":
+ class = &HtbClass{}
+ default:
+ class = &GenericClass{ClassType: classType}
+ }
+ case nl.TCA_OPTIONS:
+ switch classType {
+ case "htb":
+ data, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return nil, err
+ }
+ _, err = parseHtbClassData(class, data)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
+ *class.Attrs() = base
+ res = append(res, class)
+ }
+
+ return res, nil
+}
+
+func parseHtbClassData(class Class, data []syscall.NetlinkRouteAttr) (bool, error) {
+ htb := class.(*HtbClass)
+ detailed := false
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.TCA_HTB_PARMS:
+ opt := nl.DeserializeTcHtbCopt(datum.Value)
+ htb.Rate = uint64(opt.Rate.Rate)
+ htb.Ceil = uint64(opt.Ceil.Rate)
+ htb.Buffer = opt.Buffer
+ htb.Cbuffer = opt.Cbuffer
+ htb.Quantum = opt.Quantum
+ htb.Level = opt.Level
+ htb.Prio = opt.Prio
+ }
+ }
+ return detailed, nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/conntrack_linux.go
new file mode 100644
index 000000000..ecf044565
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/conntrack_linux.go
@@ -0,0 +1,371 @@
+package netlink
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "net"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+// ConntrackTableType Conntrack table for the netlink operation
+type ConntrackTableType uint8
+
+const (
+ // ConntrackTable Conntrack table
+ // https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink.h -> #define NFNL_SUBSYS_CTNETLINK 1
+ ConntrackTable = 1
+ // ConntrackExpectTable Conntrack expect table
+ // https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink.h -> #define NFNL_SUBSYS_CTNETLINK_EXP 2
+ ConntrackExpectTable = 2
+)
+const (
+ // For Parsing Mark
+ TCP_PROTO = 6
+ UDP_PROTO = 17
+)
+const (
+ // backward compatibility with golang 1.6 which does not have io.SeekCurrent
+ seekCurrent = 1
+)
+
+// InetFamily Family type
+type InetFamily uint8
+
+// -L [table] [options] List conntrack or expectation table
+// -G [table] parameters Get conntrack or expectation
+
+// -I [table] parameters Create a conntrack or expectation
+// -U [table] parameters Update a conntrack
+// -E [table] [options] Show events
+
+// -C [table] Show counter
+// -S Show statistics
+
+// ConntrackTableList returns the flow list of a table of a specific family
+// conntrack -L [table] [options] List conntrack or expectation table
+func ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) {
+ return pkgHandle.ConntrackTableList(table, family)
+}
+
+// ConntrackTableFlush flushes all the flows of a specified table
+// conntrack -F [table] Flush table
+// The flush operation applies to all the family types
+func ConntrackTableFlush(table ConntrackTableType) error {
+ return pkgHandle.ConntrackTableFlush(table)
+}
+
+// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter
+// conntrack -D [table] parameters Delete conntrack or expectation
+func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter CustomConntrackFilter) (uint, error) {
+ return pkgHandle.ConntrackDeleteFilter(table, family, filter)
+}
+
+// ConntrackTableList returns the flow list of a table of a specific family using the netlink handle passed
+// conntrack -L [table] [options] List conntrack or expectation table
+func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) {
+ res, err := h.dumpConntrackTable(table, family)
+ if err != nil {
+ return nil, err
+ }
+
+ // Deserialize all the flows
+ var result []*ConntrackFlow
+ for _, dataRaw := range res {
+ result = append(result, parseRawData(dataRaw))
+ }
+
+ return result, nil
+}
+
+// ConntrackTableFlush flushes all the flows of a specified table using the netlink handle passed
+// conntrack -F [table] Flush table
+// The flush operation applies to all the family types
+func (h *Handle) ConntrackTableFlush(table ConntrackTableType) error {
+ req := h.newConntrackRequest(table, syscall.AF_INET, nl.IPCTNL_MSG_CT_DELETE, syscall.NLM_F_ACK)
+ _, err := req.Execute(syscall.NETLINK_NETFILTER, 0)
+ return err
+}
+
+// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter using the netlink handle passed
+// conntrack -D [table] parameters Delete conntrack or expectation
+func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter CustomConntrackFilter) (uint, error) {
+ res, err := h.dumpConntrackTable(table, family)
+ if err != nil {
+ return 0, err
+ }
+
+ var matched uint
+ for _, dataRaw := range res {
+ flow := parseRawData(dataRaw)
+ if match := filter.MatchConntrackFlow(flow); match {
+ req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, syscall.NLM_F_ACK)
+ // skip the first 4 byte that are the netfilter header, the newConntrackRequest is adding it already
+ req2.AddRawData(dataRaw[4:])
+ req2.Execute(syscall.NETLINK_NETFILTER, 0)
+ matched++
+ }
+ }
+
+ return matched, nil
+}
+
+func (h *Handle) newConntrackRequest(table ConntrackTableType, family InetFamily, operation, flags int) *nl.NetlinkRequest {
+ // Create the Netlink request object
+ req := h.newNetlinkRequest((int(table)<<8)|operation, flags)
+ // Add the netfilter header
+ msg := &nl.Nfgenmsg{
+ NfgenFamily: uint8(family),
+ Version: nl.NFNETLINK_V0,
+ ResId: 0,
+ }
+ req.AddData(msg)
+ return req
+}
+
+func (h *Handle) dumpConntrackTable(table ConntrackTableType, family InetFamily) ([][]byte, error) {
+ req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_GET, syscall.NLM_F_DUMP)
+ return req.Execute(syscall.NETLINK_NETFILTER, 0)
+}
+
+// The full conntrack flow structure is very complicated and can be found in the file:
+// http://git.netfilter.org/libnetfilter_conntrack/tree/include/internal/object.h
+// For the time being, the structure below allows to parse and extract the base information of a flow
+type ipTuple struct {
+ SrcIP net.IP
+ DstIP net.IP
+ Protocol uint8
+ SrcPort uint16
+ DstPort uint16
+}
+
+type ConntrackFlow struct {
+ FamilyType uint8
+ Forward ipTuple
+ Reverse ipTuple
+ Mark uint32
+}
+
+func (s *ConntrackFlow) String() string {
+ // conntrack cmd output:
+ // udp 17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001 mark=0
+ return fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d\tsrc=%s dst=%s sport=%d dport=%d mark=%d",
+ nl.L4ProtoMap[s.Forward.Protocol], s.Forward.Protocol,
+ s.Forward.SrcIP.String(), s.Forward.DstIP.String(), s.Forward.SrcPort, s.Forward.DstPort,
+ s.Reverse.SrcIP.String(), s.Reverse.DstIP.String(), s.Reverse.SrcPort, s.Reverse.DstPort, s.Mark)
+}
+
+// This method parse the ip tuple structure
+// The message structure is the following:
+// <len, [CTA_IP_V4_SRC|CTA_IP_V6_SRC], 16 bytes for the IP>
+// <len, [CTA_IP_V4_DST|CTA_IP_V6_DST], 16 bytes for the IP>
+// <len, NLA_F_NESTED|nl.CTA_TUPLE_PROTO, 1 byte for the protocol, 3 bytes of padding>
+// <len, CTA_PROTO_SRC_PORT, 2 bytes for the source port, 2 bytes of padding>
+// <len, CTA_PROTO_DST_PORT, 2 bytes for the source port, 2 bytes of padding>
+func parseIpTuple(reader *bytes.Reader, tpl *ipTuple) uint8 {
+ for i := 0; i < 2; i++ {
+ _, t, _, v := parseNfAttrTLV(reader)
+ switch t {
+ case nl.CTA_IP_V4_SRC, nl.CTA_IP_V6_SRC:
+ tpl.SrcIP = v
+ case nl.CTA_IP_V4_DST, nl.CTA_IP_V6_DST:
+ tpl.DstIP = v
+ }
+ }
+ // Skip the next 4 bytes nl.NLA_F_NESTED|nl.CTA_TUPLE_PROTO
+ reader.Seek(4, seekCurrent)
+ _, t, _, v := parseNfAttrTLV(reader)
+ if t == nl.CTA_PROTO_NUM {
+ tpl.Protocol = uint8(v[0])
+ }
+ // Skip some padding 3 bytes
+ reader.Seek(3, seekCurrent)
+ for i := 0; i < 2; i++ {
+ _, t, _ := parseNfAttrTL(reader)
+ switch t {
+ case nl.CTA_PROTO_SRC_PORT:
+ parseBERaw16(reader, &tpl.SrcPort)
+ case nl.CTA_PROTO_DST_PORT:
+ parseBERaw16(reader, &tpl.DstPort)
+ }
+ // Skip some padding 2 byte
+ reader.Seek(2, seekCurrent)
+ }
+ return tpl.Protocol
+}
+
+func parseNfAttrTLV(r *bytes.Reader) (isNested bool, attrType, len uint16, value []byte) {
+ isNested, attrType, len = parseNfAttrTL(r)
+
+ value = make([]byte, len)
+ binary.Read(r, binary.BigEndian, &value)
+ return isNested, attrType, len, value
+}
+
+func parseNfAttrTL(r *bytes.Reader) (isNested bool, attrType, len uint16) {
+ binary.Read(r, nl.NativeEndian(), &len)
+ len -= nl.SizeofNfattr
+
+ binary.Read(r, nl.NativeEndian(), &attrType)
+ isNested = (attrType & nl.NLA_F_NESTED) == nl.NLA_F_NESTED
+ attrType = attrType & (nl.NLA_F_NESTED - 1)
+
+ return isNested, attrType, len
+}
+
+func parseBERaw16(r *bytes.Reader, v *uint16) {
+ binary.Read(r, binary.BigEndian, v)
+}
+
+func parseRawData(data []byte) *ConntrackFlow {
+ s := &ConntrackFlow{}
+ var proto uint8
+ // First there is the Nfgenmsg header
+ // consume only the family field
+ reader := bytes.NewReader(data)
+ binary.Read(reader, nl.NativeEndian(), &s.FamilyType)
+
+ // skip rest of the Netfilter header
+ reader.Seek(3, seekCurrent)
+ // The message structure is the following:
+ // <len, NLA_F_NESTED|CTA_TUPLE_ORIG> 4 bytes
+ // <len, NLA_F_NESTED|CTA_TUPLE_IP> 4 bytes
+ // flow information of the forward flow
+ // <len, NLA_F_NESTED|CTA_TUPLE_REPLY> 4 bytes
+ // <len, NLA_F_NESTED|CTA_TUPLE_IP> 4 bytes
+ // flow information of the reverse flow
+ for reader.Len() > 0 {
+ nested, t, l := parseNfAttrTL(reader)
+ if nested && t == nl.CTA_TUPLE_ORIG {
+ if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP {
+ proto = parseIpTuple(reader, &s.Forward)
+ }
+ } else if nested && t == nl.CTA_TUPLE_REPLY {
+ if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP {
+ parseIpTuple(reader, &s.Reverse)
+
+ // Got all the useful information stop parsing
+ break
+ } else {
+ // Header not recognized skip it
+ reader.Seek(int64(l), seekCurrent)
+ }
+ }
+ }
+ if proto == TCP_PROTO {
+ reader.Seek(64, seekCurrent)
+ _, t, _, v := parseNfAttrTLV(reader)
+ if t == nl.CTA_MARK {
+ s.Mark = uint32(v[3])
+ }
+ } else if proto == UDP_PROTO {
+ reader.Seek(16, seekCurrent)
+ _, t, _, v := parseNfAttrTLV(reader)
+ if t == nl.CTA_MARK {
+ s.Mark = uint32(v[3])
+ }
+ }
+ return s
+}
+
+// Conntrack parameters and options:
+// -n, --src-nat ip source NAT ip
+// -g, --dst-nat ip destination NAT ip
+// -j, --any-nat ip source or destination NAT ip
+// -m, --mark mark Set mark
+// -c, --secmark secmark Set selinux secmark
+// -e, --event-mask eventmask Event mask, eg. NEW,DESTROY
+// -z, --zero Zero counters while listing
+// -o, --output type[,...] Output format, eg. xml
+// -l, --label label[,...] conntrack labels
+
+// Common parameters and options:
+// -s, --src, --orig-src ip Source address from original direction
+// -d, --dst, --orig-dst ip Destination address from original direction
+// -r, --reply-src ip Source addres from reply direction
+// -q, --reply-dst ip Destination address from reply direction
+// -p, --protonum proto Layer 4 Protocol, eg. 'tcp'
+// -f, --family proto Layer 3 Protocol, eg. 'ipv6'
+// -t, --timeout timeout Set timeout
+// -u, --status status Set status, eg. ASSURED
+// -w, --zone value Set conntrack zone
+// --orig-zone value Set zone for original direction
+// --reply-zone value Set zone for reply direction
+// -b, --buffer-size Netlink socket buffer size
+// --mask-src ip Source mask address
+// --mask-dst ip Destination mask address
+
+// Filter types
+type ConntrackFilterType uint8
+
+const (
+ ConntrackOrigSrcIP = iota // -orig-src ip Source address from original direction
+ ConntrackOrigDstIP // -orig-dst ip Destination address from original direction
+ ConntrackNatSrcIP // -src-nat ip Source NAT ip
+ ConntrackNatDstIP // -dst-nat ip Destination NAT ip
+ ConntrackNatAnyIP // -any-nat ip Source or destination NAT ip
+)
+
+type CustomConntrackFilter interface {
+ // MatchConntrackFlow applies the filter to the flow and returns true if the flow matches
+ // the filter or false otherwise
+ MatchConntrackFlow(flow *ConntrackFlow) bool
+}
+
+type ConntrackFilter struct {
+ ipFilter map[ConntrackFilterType]net.IP
+}
+
+// AddIP adds an IP to the conntrack filter
+func (f *ConntrackFilter) AddIP(tp ConntrackFilterType, ip net.IP) error {
+ if f.ipFilter == nil {
+ f.ipFilter = make(map[ConntrackFilterType]net.IP)
+ }
+ if _, ok := f.ipFilter[tp]; ok {
+ return errors.New("Filter attribute already present")
+ }
+ f.ipFilter[tp] = ip
+ return nil
+}
+
+// MatchConntrackFlow applies the filter to the flow and returns true if the flow matches the filter
+// false otherwise
+func (f *ConntrackFilter) MatchConntrackFlow(flow *ConntrackFlow) bool {
+ if len(f.ipFilter) == 0 {
+ // empty filter always not match
+ return false
+ }
+
+ match := true
+ // -orig-src ip Source address from original direction
+ if elem, found := f.ipFilter[ConntrackOrigSrcIP]; found {
+ match = match && elem.Equal(flow.Forward.SrcIP)
+ }
+
+ // -orig-dst ip Destination address from original direction
+ if elem, found := f.ipFilter[ConntrackOrigDstIP]; match && found {
+ match = match && elem.Equal(flow.Forward.DstIP)
+ }
+
+ // -src-nat ip Source NAT ip
+ if elem, found := f.ipFilter[ConntrackNatSrcIP]; match && found {
+ match = match && elem.Equal(flow.Reverse.SrcIP)
+ }
+
+ // -dst-nat ip Destination NAT ip
+ if elem, found := f.ipFilter[ConntrackNatDstIP]; match && found {
+ match = match && elem.Equal(flow.Reverse.DstIP)
+ }
+
+ // -any-nat ip Source or destination NAT ip
+ if elem, found := f.ipFilter[ConntrackNatAnyIP]; match && found {
+ match = match && (elem.Equal(flow.Reverse.SrcIP) || elem.Equal(flow.Reverse.DstIP))
+ }
+
+ return match
+}
+
+var _ CustomConntrackFilter = (*ConntrackFilter)(nil)
diff --git a/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go
new file mode 100644
index 000000000..af7af799e
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go
@@ -0,0 +1,53 @@
+// +build !linux
+
+package netlink
+
+// ConntrackTableType Conntrack table for the netlink operation
+type ConntrackTableType uint8
+
+// InetFamily Family type
+type InetFamily uint8
+
+// ConntrackFlow placeholder
+type ConntrackFlow struct{}
+
+// ConntrackFilter placeholder
+type ConntrackFilter struct{}
+
+// ConntrackTableList returns the flow list of a table of a specific family
+// conntrack -L [table] [options] List conntrack or expectation table
+func ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) {
+ return nil, ErrNotImplemented
+}
+
+// ConntrackTableFlush flushes all the flows of a specified table
+// conntrack -F [table] Flush table
+// The flush operation applies to all the family types
+func ConntrackTableFlush(table ConntrackTableType) error {
+ return ErrNotImplemented
+}
+
+// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter
+// conntrack -D [table] parameters Delete conntrack or expectation
+func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) {
+ return 0, ErrNotImplemented
+}
+
+// ConntrackTableList returns the flow list of a table of a specific family using the netlink handle passed
+// conntrack -L [table] [options] List conntrack or expectation table
+func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) {
+ return nil, ErrNotImplemented
+}
+
+// ConntrackTableFlush flushes all the flows of a specified table using the netlink handle passed
+// conntrack -F [table] Flush table
+// The flush operation applies to all the family types
+func (h *Handle) ConntrackTableFlush(table ConntrackTableType) error {
+ return ErrNotImplemented
+}
+
+// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter using the netlink handle passed
+// conntrack -D [table] parameters Delete conntrack or expectation
+func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) {
+ return 0, ErrNotImplemented
+}
diff --git a/vendor/github.com/vishvananda/netlink/filter.go b/vendor/github.com/vishvananda/netlink/filter.go
new file mode 100644
index 000000000..938b28b0b
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/filter.go
@@ -0,0 +1,283 @@
+package netlink
+
+import (
+ "fmt"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+type Filter interface {
+ Attrs() *FilterAttrs
+ Type() string
+}
+
+// FilterAttrs represents a netlink filter. A filter is associated with a link,
+// has a handle and a parent. The root filter of a device should have a
+// parent == HANDLE_ROOT.
+type FilterAttrs struct {
+ LinkIndex int
+ Handle uint32
+ Parent uint32
+ Priority uint16 // lower is higher priority
+ Protocol uint16 // syscall.ETH_P_*
+}
+
+func (q FilterAttrs) String() string {
+ return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Priority: %d, Protocol: %d}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Priority, q.Protocol)
+}
+
+type TcAct int32
+
+const (
+ TC_ACT_UNSPEC TcAct = -1
+ TC_ACT_OK TcAct = 0
+ TC_ACT_RECLASSIFY TcAct = 1
+ TC_ACT_SHOT TcAct = 2
+ TC_ACT_PIPE TcAct = 3
+ TC_ACT_STOLEN TcAct = 4
+ TC_ACT_QUEUED TcAct = 5
+ TC_ACT_REPEAT TcAct = 6
+ TC_ACT_REDIRECT TcAct = 7
+ TC_ACT_JUMP TcAct = 0x10000000
+)
+
+func (a TcAct) String() string {
+ switch a {
+ case TC_ACT_UNSPEC:
+ return "unspec"
+ case TC_ACT_OK:
+ return "ok"
+ case TC_ACT_RECLASSIFY:
+ return "reclassify"
+ case TC_ACT_SHOT:
+ return "shot"
+ case TC_ACT_PIPE:
+ return "pipe"
+ case TC_ACT_STOLEN:
+ return "stolen"
+ case TC_ACT_QUEUED:
+ return "queued"
+ case TC_ACT_REPEAT:
+ return "repeat"
+ case TC_ACT_REDIRECT:
+ return "redirect"
+ case TC_ACT_JUMP:
+ return "jump"
+ }
+ return fmt.Sprintf("0x%x", int32(a))
+}
+
+type TcPolAct int32
+
+const (
+ TC_POLICE_UNSPEC TcPolAct = TcPolAct(TC_ACT_UNSPEC)
+ TC_POLICE_OK TcPolAct = TcPolAct(TC_ACT_OK)
+ TC_POLICE_RECLASSIFY TcPolAct = TcPolAct(TC_ACT_RECLASSIFY)
+ TC_POLICE_SHOT TcPolAct = TcPolAct(TC_ACT_SHOT)
+ TC_POLICE_PIPE TcPolAct = TcPolAct(TC_ACT_PIPE)
+)
+
+func (a TcPolAct) String() string {
+ switch a {
+ case TC_POLICE_UNSPEC:
+ return "unspec"
+ case TC_POLICE_OK:
+ return "ok"
+ case TC_POLICE_RECLASSIFY:
+ return "reclassify"
+ case TC_POLICE_SHOT:
+ return "shot"
+ case TC_POLICE_PIPE:
+ return "pipe"
+ }
+ return fmt.Sprintf("0x%x", int32(a))
+}
+
+type ActionAttrs struct {
+ Index int
+ Capab int
+ Action TcAct
+ Refcnt int
+ Bindcnt int
+}
+
+func (q ActionAttrs) String() string {
+ return fmt.Sprintf("{Index: %d, Capab: %x, Action: %s, Refcnt: %d, Bindcnt: %d}", q.Index, q.Capab, q.Action.String(), q.Refcnt, q.Bindcnt)
+}
+
+// Action represents an action in any supported filter.
+type Action interface {
+ Attrs() *ActionAttrs
+ Type() string
+}
+
+type GenericAction struct {
+ ActionAttrs
+}
+
+func (action *GenericAction) Type() string {
+ return "generic"
+}
+
+func (action *GenericAction) Attrs() *ActionAttrs {
+ return &action.ActionAttrs
+}
+
+type BpfAction struct {
+ ActionAttrs
+ Fd int
+ Name string
+}
+
+func (action *BpfAction) Type() string {
+ return "bpf"
+}
+
+func (action *BpfAction) Attrs() *ActionAttrs {
+ return &action.ActionAttrs
+}
+
+type MirredAct uint8
+
+func (a MirredAct) String() string {
+ switch a {
+ case TCA_EGRESS_REDIR:
+ return "egress redir"
+ case TCA_EGRESS_MIRROR:
+ return "egress mirror"
+ case TCA_INGRESS_REDIR:
+ return "ingress redir"
+ case TCA_INGRESS_MIRROR:
+ return "ingress mirror"
+ }
+ return "unknown"
+}
+
+const (
+ TCA_EGRESS_REDIR MirredAct = 1 /* packet redirect to EGRESS*/
+ TCA_EGRESS_MIRROR MirredAct = 2 /* mirror packet to EGRESS */
+ TCA_INGRESS_REDIR MirredAct = 3 /* packet redirect to INGRESS*/
+ TCA_INGRESS_MIRROR MirredAct = 4 /* mirror packet to INGRESS */
+)
+
+type MirredAction struct {
+ ActionAttrs
+ MirredAction MirredAct
+ Ifindex int
+}
+
+func (action *MirredAction) Type() string {
+ return "mirred"
+}
+
+func (action *MirredAction) Attrs() *ActionAttrs {
+ return &action.ActionAttrs
+}
+
+func NewMirredAction(redirIndex int) *MirredAction {
+ return &MirredAction{
+ ActionAttrs: ActionAttrs{
+ Action: TC_ACT_STOLEN,
+ },
+ MirredAction: TCA_EGRESS_REDIR,
+ Ifindex: redirIndex,
+ }
+}
+
+// Constants used in TcU32Sel.Flags.
+const (
+ TC_U32_TERMINAL = nl.TC_U32_TERMINAL
+ TC_U32_OFFSET = nl.TC_U32_OFFSET
+ TC_U32_VAROFFSET = nl.TC_U32_VAROFFSET
+ TC_U32_EAT = nl.TC_U32_EAT
+)
+
+// Sel of the U32 filters that contains multiple TcU32Key. This is the copy
+// and the frontend representation of nl.TcU32Sel. It is serialized into canonical
+// nl.TcU32Sel with the appropriate endianness.
+type TcU32Sel struct {
+ Flags uint8
+ Offshift uint8
+ Nkeys uint8
+ Pad uint8
+ Offmask uint16
+ Off uint16
+ Offoff int16
+ Hoff int16
+ Hmask uint32
+ Keys []TcU32Key
+}
+
+// TcU32Key contained of Sel in the U32 filters. This is the copy and the frontend
+// representation of nl.TcU32Key. It is serialized into chanonical nl.TcU32Sel
+// with the appropriate endianness.
+type TcU32Key struct {
+ Mask uint32
+ Val uint32
+ Off int32
+ OffMask int32
+}
+
+// U32 filters on many packet related properties
+type U32 struct {
+ FilterAttrs
+ ClassId uint32
+ RedirIndex int
+ Sel *TcU32Sel
+ Actions []Action
+}
+
+func (filter *U32) Attrs() *FilterAttrs {
+ return &filter.FilterAttrs
+}
+
+func (filter *U32) Type() string {
+ return "u32"
+}
+
+type FilterFwAttrs struct {
+ ClassId uint32
+ InDev string
+ Mask uint32
+ Index uint32
+ Buffer uint32
+ Mtu uint32
+ Mpu uint16
+ Rate uint32
+ AvRate uint32
+ PeakRate uint32
+ Action TcPolAct
+ Overhead uint16
+ LinkLayer int
+}
+
+type BpfFilter struct {
+ FilterAttrs
+ ClassId uint32
+ Fd int
+ Name string
+ DirectAction bool
+}
+
+func (filter *BpfFilter) Type() string {
+ return "bpf"
+}
+
+func (filter *BpfFilter) Attrs() *FilterAttrs {
+ return &filter.FilterAttrs
+}
+
+// GenericFilter filters represent types that are not currently understood
+// by this netlink library.
+type GenericFilter struct {
+ FilterAttrs
+ FilterType string
+}
+
+func (filter *GenericFilter) Attrs() *FilterAttrs {
+ return &filter.FilterAttrs
+}
+
+func (filter *GenericFilter) Type() string {
+ return filter.FilterType
+}
diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go
new file mode 100644
index 000000000..dc0f90af8
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/filter_linux.go
@@ -0,0 +1,591 @@
+package netlink
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "syscall"
+ "unsafe"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+// Fw filter filters on firewall marks
+// NOTE: this is in filter_linux because it refers to nl.TcPolice which
+// is defined in nl/tc_linux.go
+type Fw struct {
+ FilterAttrs
+ ClassId uint32
+ // TODO remove nl type from interface
+ Police nl.TcPolice
+ InDev string
+ // TODO Action
+ Mask uint32
+ AvRate uint32
+ Rtab [256]uint32
+ Ptab [256]uint32
+}
+
+func NewFw(attrs FilterAttrs, fattrs FilterFwAttrs) (*Fw, error) {
+ var rtab [256]uint32
+ var ptab [256]uint32
+ rcellLog := -1
+ pcellLog := -1
+ avrate := fattrs.AvRate / 8
+ police := nl.TcPolice{}
+ police.Rate.Rate = fattrs.Rate / 8
+ police.PeakRate.Rate = fattrs.PeakRate / 8
+ buffer := fattrs.Buffer
+ linklayer := nl.LINKLAYER_ETHERNET
+
+ if fattrs.LinkLayer != nl.LINKLAYER_UNSPEC {
+ linklayer = fattrs.LinkLayer
+ }
+
+ police.Action = int32(fattrs.Action)
+ if police.Rate.Rate != 0 {
+ police.Rate.Mpu = fattrs.Mpu
+ police.Rate.Overhead = fattrs.Overhead
+ if CalcRtable(&police.Rate, rtab, rcellLog, fattrs.Mtu, linklayer) < 0 {
+ return nil, errors.New("TBF: failed to calculate rate table")
+ }
+ police.Burst = uint32(Xmittime(uint64(police.Rate.Rate), uint32(buffer)))
+ }
+ police.Mtu = fattrs.Mtu
+ if police.PeakRate.Rate != 0 {
+ police.PeakRate.Mpu = fattrs.Mpu
+ police.PeakRate.Overhead = fattrs.Overhead
+ if CalcRtable(&police.PeakRate, ptab, pcellLog, fattrs.Mtu, linklayer) < 0 {
+ return nil, errors.New("POLICE: failed to calculate peak rate table")
+ }
+ }
+
+ return &Fw{
+ FilterAttrs: attrs,
+ ClassId: fattrs.ClassId,
+ InDev: fattrs.InDev,
+ Mask: fattrs.Mask,
+ Police: police,
+ AvRate: avrate,
+ Rtab: rtab,
+ Ptab: ptab,
+ }, nil
+}
+
+func (filter *Fw) Attrs() *FilterAttrs {
+ return &filter.FilterAttrs
+}
+
+func (filter *Fw) Type() string {
+ return "fw"
+}
+
+// FilterDel will delete a filter from the system.
+// Equivalent to: `tc filter del $filter`
+func FilterDel(filter Filter) error {
+ return pkgHandle.FilterDel(filter)
+}
+
+// FilterDel will delete a filter from the system.
+// Equivalent to: `tc filter del $filter`
+func (h *Handle) FilterDel(filter Filter) error {
+ req := h.newNetlinkRequest(syscall.RTM_DELTFILTER, syscall.NLM_F_ACK)
+ base := filter.Attrs()
+ msg := &nl.TcMsg{
+ Family: nl.FAMILY_ALL,
+ Ifindex: int32(base.LinkIndex),
+ Handle: base.Handle,
+ Parent: base.Parent,
+ Info: MakeHandle(base.Priority, nl.Swap16(base.Protocol)),
+ }
+ req.AddData(msg)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// FilterAdd will add a filter to the system.
+// Equivalent to: `tc filter add $filter`
+func FilterAdd(filter Filter) error {
+ return pkgHandle.FilterAdd(filter)
+}
+
+// FilterAdd will add a filter to the system.
+// Equivalent to: `tc filter add $filter`
+func (h *Handle) FilterAdd(filter Filter) error {
+ native = nl.NativeEndian()
+ req := h.newNetlinkRequest(syscall.RTM_NEWTFILTER, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ base := filter.Attrs()
+ msg := &nl.TcMsg{
+ Family: nl.FAMILY_ALL,
+ Ifindex: int32(base.LinkIndex),
+ Handle: base.Handle,
+ Parent: base.Parent,
+ Info: MakeHandle(base.Priority, nl.Swap16(base.Protocol)),
+ }
+ req.AddData(msg)
+ req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(filter.Type())))
+
+ options := nl.NewRtAttr(nl.TCA_OPTIONS, nil)
+ if u32, ok := filter.(*U32); ok {
+ // Convert TcU32Sel into nl.TcU32Sel as it is without copy.
+ sel := (*nl.TcU32Sel)(unsafe.Pointer(u32.Sel))
+ if sel == nil {
+ // match all
+ sel = &nl.TcU32Sel{
+ Nkeys: 1,
+ Flags: nl.TC_U32_TERMINAL,
+ }
+ sel.Keys = append(sel.Keys, nl.TcU32Key{})
+ }
+
+ if native != networkOrder {
+ // Copy TcU32Sel.
+ cSel := *sel
+ keys := make([]nl.TcU32Key, cap(sel.Keys))
+ copy(keys, sel.Keys)
+ cSel.Keys = keys
+ sel = &cSel
+
+ // Handle the endianness of attributes
+ sel.Offmask = native.Uint16(htons(sel.Offmask))
+ sel.Hmask = native.Uint32(htonl(sel.Hmask))
+ for i, key := range sel.Keys {
+ sel.Keys[i].Mask = native.Uint32(htonl(key.Mask))
+ sel.Keys[i].Val = native.Uint32(htonl(key.Val))
+ }
+ }
+ sel.Nkeys = uint8(len(sel.Keys))
+ nl.NewRtAttrChild(options, nl.TCA_U32_SEL, sel.Serialize())
+ if u32.ClassId != 0 {
+ nl.NewRtAttrChild(options, nl.TCA_U32_CLASSID, nl.Uint32Attr(u32.ClassId))
+ }
+ actionsAttr := nl.NewRtAttrChild(options, nl.TCA_U32_ACT, nil)
+ // backwards compatibility
+ if u32.RedirIndex != 0 {
+ u32.Actions = append([]Action{NewMirredAction(u32.RedirIndex)}, u32.Actions...)
+ }
+ if err := EncodeActions(actionsAttr, u32.Actions); err != nil {
+ return err
+ }
+ } else if fw, ok := filter.(*Fw); ok {
+ if fw.Mask != 0 {
+ b := make([]byte, 4)
+ native.PutUint32(b, fw.Mask)
+ nl.NewRtAttrChild(options, nl.TCA_FW_MASK, b)
+ }
+ if fw.InDev != "" {
+ nl.NewRtAttrChild(options, nl.TCA_FW_INDEV, nl.ZeroTerminated(fw.InDev))
+ }
+ if (fw.Police != nl.TcPolice{}) {
+
+ police := nl.NewRtAttrChild(options, nl.TCA_FW_POLICE, nil)
+ nl.NewRtAttrChild(police, nl.TCA_POLICE_TBF, fw.Police.Serialize())
+ if (fw.Police.Rate != nl.TcRateSpec{}) {
+ payload := SerializeRtab(fw.Rtab)
+ nl.NewRtAttrChild(police, nl.TCA_POLICE_RATE, payload)
+ }
+ if (fw.Police.PeakRate != nl.TcRateSpec{}) {
+ payload := SerializeRtab(fw.Ptab)
+ nl.NewRtAttrChild(police, nl.TCA_POLICE_PEAKRATE, payload)
+ }
+ }
+ if fw.ClassId != 0 {
+ b := make([]byte, 4)
+ native.PutUint32(b, fw.ClassId)
+ nl.NewRtAttrChild(options, nl.TCA_FW_CLASSID, b)
+ }
+ } else if bpf, ok := filter.(*BpfFilter); ok {
+ var bpfFlags uint32
+ if bpf.ClassId != 0 {
+ nl.NewRtAttrChild(options, nl.TCA_BPF_CLASSID, nl.Uint32Attr(bpf.ClassId))
+ }
+ if bpf.Fd >= 0 {
+ nl.NewRtAttrChild(options, nl.TCA_BPF_FD, nl.Uint32Attr((uint32(bpf.Fd))))
+ }
+ if bpf.Name != "" {
+ nl.NewRtAttrChild(options, nl.TCA_BPF_NAME, nl.ZeroTerminated(bpf.Name))
+ }
+ if bpf.DirectAction {
+ bpfFlags |= nl.TCA_BPF_FLAG_ACT_DIRECT
+ }
+ nl.NewRtAttrChild(options, nl.TCA_BPF_FLAGS, nl.Uint32Attr(bpfFlags))
+ }
+
+ req.AddData(options)
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// FilterList gets a list of filters in the system.
+// Equivalent to: `tc filter show`.
+// Generally returns nothing if link and parent are not specified.
+func FilterList(link Link, parent uint32) ([]Filter, error) {
+ return pkgHandle.FilterList(link, parent)
+}
+
+// FilterList gets a list of filters in the system.
+// Equivalent to: `tc filter show`.
+// Generally returns nothing if link and parent are not specified.
+func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETTFILTER, syscall.NLM_F_DUMP)
+ msg := &nl.TcMsg{
+ Family: nl.FAMILY_ALL,
+ Parent: parent,
+ }
+ if link != nil {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ msg.Ifindex = int32(base.Index)
+ }
+ req.AddData(msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWTFILTER)
+ if err != nil {
+ return nil, err
+ }
+
+ var res []Filter
+ for _, m := range msgs {
+ msg := nl.DeserializeTcMsg(m)
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ base := FilterAttrs{
+ LinkIndex: int(msg.Ifindex),
+ Handle: msg.Handle,
+ Parent: msg.Parent,
+ }
+ base.Priority, base.Protocol = MajorMinor(msg.Info)
+ base.Protocol = nl.Swap16(base.Protocol)
+
+ var filter Filter
+ filterType := ""
+ detailed := false
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.TCA_KIND:
+ filterType = string(attr.Value[:len(attr.Value)-1])
+ switch filterType {
+ case "u32":
+ filter = &U32{}
+ case "fw":
+ filter = &Fw{}
+ case "bpf":
+ filter = &BpfFilter{}
+ default:
+ filter = &GenericFilter{FilterType: filterType}
+ }
+ case nl.TCA_OPTIONS:
+ data, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return nil, err
+ }
+ switch filterType {
+ case "u32":
+ detailed, err = parseU32Data(filter, data)
+ if err != nil {
+ return nil, err
+ }
+ case "fw":
+ detailed, err = parseFwData(filter, data)
+ if err != nil {
+ return nil, err
+ }
+ case "bpf":
+ detailed, err = parseBpfData(filter, data)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ detailed = true
+ }
+ }
+ }
+ // only return the detailed version of the filter
+ if detailed {
+ *filter.Attrs() = base
+ res = append(res, filter)
+ }
+ }
+
+ return res, nil
+}
+
+func toTcGen(attrs *ActionAttrs, tcgen *nl.TcGen) {
+ tcgen.Index = uint32(attrs.Index)
+ tcgen.Capab = uint32(attrs.Capab)
+ tcgen.Action = int32(attrs.Action)
+ tcgen.Refcnt = int32(attrs.Refcnt)
+ tcgen.Bindcnt = int32(attrs.Bindcnt)
+}
+
+func toAttrs(tcgen *nl.TcGen, attrs *ActionAttrs) {
+ attrs.Index = int(tcgen.Index)
+ attrs.Capab = int(tcgen.Capab)
+ attrs.Action = TcAct(tcgen.Action)
+ attrs.Refcnt = int(tcgen.Refcnt)
+ attrs.Bindcnt = int(tcgen.Bindcnt)
+}
+
+func EncodeActions(attr *nl.RtAttr, actions []Action) error {
+ tabIndex := int(nl.TCA_ACT_TAB)
+
+ for _, action := range actions {
+ switch action := action.(type) {
+ default:
+ return fmt.Errorf("unknown action type %s", action.Type())
+ case *MirredAction:
+ table := nl.NewRtAttrChild(attr, tabIndex, nil)
+ tabIndex++
+ nl.NewRtAttrChild(table, nl.TCA_ACT_KIND, nl.ZeroTerminated("mirred"))
+ aopts := nl.NewRtAttrChild(table, nl.TCA_ACT_OPTIONS, nil)
+ mirred := nl.TcMirred{
+ Eaction: int32(action.MirredAction),
+ Ifindex: uint32(action.Ifindex),
+ }
+ toTcGen(action.Attrs(), &mirred.TcGen)
+ nl.NewRtAttrChild(aopts, nl.TCA_MIRRED_PARMS, mirred.Serialize())
+ case *BpfAction:
+ table := nl.NewRtAttrChild(attr, tabIndex, nil)
+ tabIndex++
+ nl.NewRtAttrChild(table, nl.TCA_ACT_KIND, nl.ZeroTerminated("bpf"))
+ aopts := nl.NewRtAttrChild(table, nl.TCA_ACT_OPTIONS, nil)
+ gen := nl.TcGen{}
+ toTcGen(action.Attrs(), &gen)
+ nl.NewRtAttrChild(aopts, nl.TCA_ACT_BPF_PARMS, gen.Serialize())
+ nl.NewRtAttrChild(aopts, nl.TCA_ACT_BPF_FD, nl.Uint32Attr(uint32(action.Fd)))
+ nl.NewRtAttrChild(aopts, nl.TCA_ACT_BPF_NAME, nl.ZeroTerminated(action.Name))
+ case *GenericAction:
+ table := nl.NewRtAttrChild(attr, tabIndex, nil)
+ tabIndex++
+ nl.NewRtAttrChild(table, nl.TCA_ACT_KIND, nl.ZeroTerminated("gact"))
+ aopts := nl.NewRtAttrChild(table, nl.TCA_ACT_OPTIONS, nil)
+ gen := nl.TcGen{}
+ toTcGen(action.Attrs(), &gen)
+ nl.NewRtAttrChild(aopts, nl.TCA_GACT_PARMS, gen.Serialize())
+ }
+ }
+ return nil
+}
+
+func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) {
+ var actions []Action
+ for _, table := range tables {
+ var action Action
+ var actionType string
+ aattrs, err := nl.ParseRouteAttr(table.Value)
+ if err != nil {
+ return nil, err
+ }
+ nextattr:
+ for _, aattr := range aattrs {
+ switch aattr.Attr.Type {
+ case nl.TCA_KIND:
+ actionType = string(aattr.Value[:len(aattr.Value)-1])
+ // only parse if the action is mirred or bpf
+ switch actionType {
+ case "mirred":
+ action = &MirredAction{}
+ case "bpf":
+ action = &BpfAction{}
+ case "gact":
+ action = &GenericAction{}
+ default:
+ break nextattr
+ }
+ case nl.TCA_OPTIONS:
+ adata, err := nl.ParseRouteAttr(aattr.Value)
+ if err != nil {
+ return nil, err
+ }
+ for _, adatum := range adata {
+ switch actionType {
+ case "mirred":
+ switch adatum.Attr.Type {
+ case nl.TCA_MIRRED_PARMS:
+ mirred := *nl.DeserializeTcMirred(adatum.Value)
+ toAttrs(&mirred.TcGen, action.Attrs())
+ action.(*MirredAction).ActionAttrs = ActionAttrs{}
+ action.(*MirredAction).Ifindex = int(mirred.Ifindex)
+ action.(*MirredAction).MirredAction = MirredAct(mirred.Eaction)
+ }
+ case "bpf":
+ switch adatum.Attr.Type {
+ case nl.TCA_ACT_BPF_PARMS:
+ gen := *nl.DeserializeTcGen(adatum.Value)
+ toAttrs(&gen, action.Attrs())
+ case nl.TCA_ACT_BPF_FD:
+ action.(*BpfAction).Fd = int(native.Uint32(adatum.Value[0:4]))
+ case nl.TCA_ACT_BPF_NAME:
+ action.(*BpfAction).Name = string(adatum.Value[:len(adatum.Value)-1])
+ }
+ case "gact":
+ switch adatum.Attr.Type {
+ case nl.TCA_GACT_PARMS:
+ gen := *nl.DeserializeTcGen(adatum.Value)
+ toAttrs(&gen, action.Attrs())
+ }
+ }
+ }
+ }
+ }
+ actions = append(actions, action)
+ }
+ return actions, nil
+}
+
+func parseU32Data(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) {
+ native = nl.NativeEndian()
+ u32 := filter.(*U32)
+ detailed := false
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.TCA_U32_SEL:
+ detailed = true
+ sel := nl.DeserializeTcU32Sel(datum.Value)
+ u32.Sel = (*TcU32Sel)(unsafe.Pointer(sel))
+ if native != networkOrder {
+ // Handle the endianness of attributes
+ u32.Sel.Offmask = native.Uint16(htons(sel.Offmask))
+ u32.Sel.Hmask = native.Uint32(htonl(sel.Hmask))
+ for i, key := range u32.Sel.Keys {
+ u32.Sel.Keys[i].Mask = native.Uint32(htonl(key.Mask))
+ u32.Sel.Keys[i].Val = native.Uint32(htonl(key.Val))
+ }
+ }
+ case nl.TCA_U32_ACT:
+ tables, err := nl.ParseRouteAttr(datum.Value)
+ if err != nil {
+ return detailed, err
+ }
+ u32.Actions, err = parseActions(tables)
+ if err != nil {
+ return detailed, err
+ }
+ for _, action := range u32.Actions {
+ if action, ok := action.(*MirredAction); ok {
+ u32.RedirIndex = int(action.Ifindex)
+ }
+ }
+ case nl.TCA_U32_CLASSID:
+ u32.ClassId = native.Uint32(datum.Value)
+ }
+ }
+ return detailed, nil
+}
+
+func parseFwData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) {
+ native = nl.NativeEndian()
+ fw := filter.(*Fw)
+ detailed := true
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.TCA_FW_MASK:
+ fw.Mask = native.Uint32(datum.Value[0:4])
+ case nl.TCA_FW_CLASSID:
+ fw.ClassId = native.Uint32(datum.Value[0:4])
+ case nl.TCA_FW_INDEV:
+ fw.InDev = string(datum.Value[:len(datum.Value)-1])
+ case nl.TCA_FW_POLICE:
+ adata, _ := nl.ParseRouteAttr(datum.Value)
+ for _, aattr := range adata {
+ switch aattr.Attr.Type {
+ case nl.TCA_POLICE_TBF:
+ fw.Police = *nl.DeserializeTcPolice(aattr.Value)
+ case nl.TCA_POLICE_RATE:
+ fw.Rtab = DeserializeRtab(aattr.Value)
+ case nl.TCA_POLICE_PEAKRATE:
+ fw.Ptab = DeserializeRtab(aattr.Value)
+ }
+ }
+ }
+ }
+ return detailed, nil
+}
+
+func parseBpfData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) {
+ native = nl.NativeEndian()
+ bpf := filter.(*BpfFilter)
+ detailed := true
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.TCA_BPF_FD:
+ bpf.Fd = int(native.Uint32(datum.Value[0:4]))
+ case nl.TCA_BPF_NAME:
+ bpf.Name = string(datum.Value[:len(datum.Value)-1])
+ case nl.TCA_BPF_CLASSID:
+ bpf.ClassId = native.Uint32(datum.Value[0:4])
+ case nl.TCA_BPF_FLAGS:
+ flags := native.Uint32(datum.Value[0:4])
+ if (flags & nl.TCA_BPF_FLAG_ACT_DIRECT) != 0 {
+ bpf.DirectAction = true
+ }
+ }
+ }
+ return detailed, nil
+}
+
+func AlignToAtm(size uint) uint {
+ var linksize, cells int
+ cells = int(size / nl.ATM_CELL_PAYLOAD)
+ if (size % nl.ATM_CELL_PAYLOAD) > 0 {
+ cells++
+ }
+ linksize = cells * nl.ATM_CELL_SIZE
+ return uint(linksize)
+}
+
+func AdjustSize(sz uint, mpu uint, linklayer int) uint {
+ if sz < mpu {
+ sz = mpu
+ }
+ switch linklayer {
+ case nl.LINKLAYER_ATM:
+ return AlignToAtm(sz)
+ default:
+ return sz
+ }
+}
+
+func CalcRtable(rate *nl.TcRateSpec, rtab [256]uint32, cellLog int, mtu uint32, linklayer int) int {
+ bps := rate.Rate
+ mpu := rate.Mpu
+ var sz uint
+ if mtu == 0 {
+ mtu = 2047
+ }
+ if cellLog < 0 {
+ cellLog = 0
+ for (mtu >> uint(cellLog)) > 255 {
+ cellLog++
+ }
+ }
+ for i := 0; i < 256; i++ {
+ sz = AdjustSize(uint((i+1)<<uint32(cellLog)), uint(mpu), linklayer)
+ rtab[i] = uint32(Xmittime(uint64(bps), uint32(sz)))
+ }
+ rate.CellAlign = -1
+ rate.CellLog = uint8(cellLog)
+ rate.Linklayer = uint8(linklayer & nl.TC_LINKLAYER_MASK)
+ return cellLog
+}
+
+func DeserializeRtab(b []byte) [256]uint32 {
+ var rtab [256]uint32
+ native := nl.NativeEndian()
+ r := bytes.NewReader(b)
+ _ = binary.Read(r, native, &rtab)
+ return rtab
+}
+
+func SerializeRtab(rtab [256]uint32) []byte {
+ native := nl.NativeEndian()
+ var w bytes.Buffer
+ _ = binary.Write(&w, native, rtab)
+ return w.Bytes()
+}
diff --git a/vendor/github.com/vishvananda/netlink/genetlink_linux.go b/vendor/github.com/vishvananda/netlink/genetlink_linux.go
new file mode 100644
index 000000000..a388a8700
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/genetlink_linux.go
@@ -0,0 +1,167 @@
+package netlink
+
+import (
+ "fmt"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+type GenlOp struct {
+ ID uint32
+ Flags uint32
+}
+
+type GenlMulticastGroup struct {
+ ID uint32
+ Name string
+}
+
+type GenlFamily struct {
+ ID uint16
+ HdrSize uint32
+ Name string
+ Version uint32
+ MaxAttr uint32
+ Ops []GenlOp
+ Groups []GenlMulticastGroup
+}
+
+func parseOps(b []byte) ([]GenlOp, error) {
+ attrs, err := nl.ParseRouteAttr(b)
+ if err != nil {
+ return nil, err
+ }
+ ops := make([]GenlOp, 0, len(attrs))
+ for _, a := range attrs {
+ nattrs, err := nl.ParseRouteAttr(a.Value)
+ if err != nil {
+ return nil, err
+ }
+ var op GenlOp
+ for _, na := range nattrs {
+ switch na.Attr.Type {
+ case nl.GENL_CTRL_ATTR_OP_ID:
+ op.ID = native.Uint32(na.Value)
+ case nl.GENL_CTRL_ATTR_OP_FLAGS:
+ op.Flags = native.Uint32(na.Value)
+ }
+ }
+ ops = append(ops, op)
+ }
+ return ops, nil
+}
+
+func parseMulticastGroups(b []byte) ([]GenlMulticastGroup, error) {
+ attrs, err := nl.ParseRouteAttr(b)
+ if err != nil {
+ return nil, err
+ }
+ groups := make([]GenlMulticastGroup, 0, len(attrs))
+ for _, a := range attrs {
+ nattrs, err := nl.ParseRouteAttr(a.Value)
+ if err != nil {
+ return nil, err
+ }
+ var g GenlMulticastGroup
+ for _, na := range nattrs {
+ switch na.Attr.Type {
+ case nl.GENL_CTRL_ATTR_MCAST_GRP_NAME:
+ g.Name = nl.BytesToString(na.Value)
+ case nl.GENL_CTRL_ATTR_MCAST_GRP_ID:
+ g.ID = native.Uint32(na.Value)
+ }
+ }
+ groups = append(groups, g)
+ }
+ return groups, nil
+}
+
+func (f *GenlFamily) parseAttributes(attrs []syscall.NetlinkRouteAttr) error {
+ for _, a := range attrs {
+ switch a.Attr.Type {
+ case nl.GENL_CTRL_ATTR_FAMILY_NAME:
+ f.Name = nl.BytesToString(a.Value)
+ case nl.GENL_CTRL_ATTR_FAMILY_ID:
+ f.ID = native.Uint16(a.Value)
+ case nl.GENL_CTRL_ATTR_VERSION:
+ f.Version = native.Uint32(a.Value)
+ case nl.GENL_CTRL_ATTR_HDRSIZE:
+ f.HdrSize = native.Uint32(a.Value)
+ case nl.GENL_CTRL_ATTR_MAXATTR:
+ f.MaxAttr = native.Uint32(a.Value)
+ case nl.GENL_CTRL_ATTR_OPS:
+ ops, err := parseOps(a.Value)
+ if err != nil {
+ return err
+ }
+ f.Ops = ops
+ case nl.GENL_CTRL_ATTR_MCAST_GROUPS:
+ groups, err := parseMulticastGroups(a.Value)
+ if err != nil {
+ return err
+ }
+ f.Groups = groups
+ }
+ }
+
+ return nil
+}
+
+func parseFamilies(msgs [][]byte) ([]*GenlFamily, error) {
+ families := make([]*GenlFamily, 0, len(msgs))
+ for _, m := range msgs {
+ attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:])
+ if err != nil {
+ return nil, err
+ }
+ family := &GenlFamily{}
+ if err := family.parseAttributes(attrs); err != nil {
+ return nil, err
+ }
+
+ families = append(families, family)
+ }
+ return families, nil
+}
+
+func (h *Handle) GenlFamilyList() ([]*GenlFamily, error) {
+ msg := &nl.Genlmsg{
+ Command: nl.GENL_CTRL_CMD_GETFAMILY,
+ Version: nl.GENL_CTRL_VERSION,
+ }
+ req := h.newNetlinkRequest(nl.GENL_ID_CTRL, syscall.NLM_F_DUMP)
+ req.AddData(msg)
+ msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0)
+ if err != nil {
+ return nil, err
+ }
+ return parseFamilies(msgs)
+}
+
+func GenlFamilyList() ([]*GenlFamily, error) {
+ return pkgHandle.GenlFamilyList()
+}
+
+func (h *Handle) GenlFamilyGet(name string) (*GenlFamily, error) {
+ msg := &nl.Genlmsg{
+ Command: nl.GENL_CTRL_CMD_GETFAMILY,
+ Version: nl.GENL_CTRL_VERSION,
+ }
+ req := h.newNetlinkRequest(nl.GENL_ID_CTRL, 0)
+ req.AddData(msg)
+ req.AddData(nl.NewRtAttr(nl.GENL_CTRL_ATTR_FAMILY_NAME, nl.ZeroTerminated(name)))
+ msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0)
+ if err != nil {
+ return nil, err
+ }
+ families, err := parseFamilies(msgs)
+ if len(families) != 1 {
+ return nil, fmt.Errorf("invalid response for GENL_CTRL_CMD_GETFAMILY")
+ }
+ return families[0], nil
+}
+
+func GenlFamilyGet(name string) (*GenlFamily, error) {
+ return pkgHandle.GenlFamilyGet(name)
+}
diff --git a/vendor/github.com/vishvananda/netlink/genetlink_unspecified.go b/vendor/github.com/vishvananda/netlink/genetlink_unspecified.go
new file mode 100644
index 000000000..0192b991e
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/genetlink_unspecified.go
@@ -0,0 +1,25 @@
+// +build !linux
+
+package netlink
+
+type GenlOp struct{}
+
+type GenlMulticastGroup struct{}
+
+type GenlFamily struct{}
+
+func (h *Handle) GenlFamilyList() ([]*GenlFamily, error) {
+ return nil, ErrNotImplemented
+}
+
+func GenlFamilyList() ([]*GenlFamily, error) {
+ return nil, ErrNotImplemented
+}
+
+func (h *Handle) GenlFamilyGet(name string) (*GenlFamily, error) {
+ return nil, ErrNotImplemented
+}
+
+func GenlFamilyGet(name string) (*GenlFamily, error) {
+ return nil, ErrNotImplemented
+}
diff --git a/vendor/github.com/vishvananda/netlink/gtp_linux.go b/vendor/github.com/vishvananda/netlink/gtp_linux.go
new file mode 100644
index 000000000..7331303ec
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/gtp_linux.go
@@ -0,0 +1,238 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+ "strings"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+type PDP struct {
+ Version uint32
+ TID uint64
+ PeerAddress net.IP
+ MSAddress net.IP
+ Flow uint16
+ NetNSFD uint32
+ ITEI uint32
+ OTEI uint32
+}
+
+func (pdp *PDP) String() string {
+ elems := []string{}
+ elems = append(elems, fmt.Sprintf("Version: %d", pdp.Version))
+ if pdp.Version == 0 {
+ elems = append(elems, fmt.Sprintf("TID: %d", pdp.TID))
+ } else if pdp.Version == 1 {
+ elems = append(elems, fmt.Sprintf("TEI: %d/%d", pdp.ITEI, pdp.OTEI))
+ }
+ elems = append(elems, fmt.Sprintf("MS-Address: %s", pdp.MSAddress))
+ elems = append(elems, fmt.Sprintf("Peer-Address: %s", pdp.PeerAddress))
+ return fmt.Sprintf("{%s}", strings.Join(elems, " "))
+}
+
+func (p *PDP) parseAttributes(attrs []syscall.NetlinkRouteAttr) error {
+ for _, a := range attrs {
+ switch a.Attr.Type {
+ case nl.GENL_GTP_ATTR_VERSION:
+ p.Version = native.Uint32(a.Value)
+ case nl.GENL_GTP_ATTR_TID:
+ p.TID = native.Uint64(a.Value)
+ case nl.GENL_GTP_ATTR_PEER_ADDRESS:
+ p.PeerAddress = net.IP(a.Value)
+ case nl.GENL_GTP_ATTR_MS_ADDRESS:
+ p.MSAddress = net.IP(a.Value)
+ case nl.GENL_GTP_ATTR_FLOW:
+ p.Flow = native.Uint16(a.Value)
+ case nl.GENL_GTP_ATTR_NET_NS_FD:
+ p.NetNSFD = native.Uint32(a.Value)
+ case nl.GENL_GTP_ATTR_I_TEI:
+ p.ITEI = native.Uint32(a.Value)
+ case nl.GENL_GTP_ATTR_O_TEI:
+ p.OTEI = native.Uint32(a.Value)
+ }
+ }
+ return nil
+}
+
+func parsePDP(msgs [][]byte) ([]*PDP, error) {
+ pdps := make([]*PDP, 0, len(msgs))
+ for _, m := range msgs {
+ attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:])
+ if err != nil {
+ return nil, err
+ }
+ pdp := &PDP{}
+ if err := pdp.parseAttributes(attrs); err != nil {
+ return nil, err
+ }
+ pdps = append(pdps, pdp)
+ }
+ return pdps, nil
+}
+
+func (h *Handle) GTPPDPList() ([]*PDP, error) {
+ f, err := h.GenlFamilyGet(nl.GENL_GTP_NAME)
+ if err != nil {
+ return nil, err
+ }
+ msg := &nl.Genlmsg{
+ Command: nl.GENL_GTP_CMD_GETPDP,
+ Version: nl.GENL_GTP_VERSION,
+ }
+ req := h.newNetlinkRequest(int(f.ID), syscall.NLM_F_DUMP)
+ req.AddData(msg)
+ msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0)
+ if err != nil {
+ return nil, err
+ }
+ return parsePDP(msgs)
+}
+
+func GTPPDPList() ([]*PDP, error) {
+ return pkgHandle.GTPPDPList()
+}
+
+func gtpPDPGet(req *nl.NetlinkRequest) (*PDP, error) {
+ msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0)
+ if err != nil {
+ return nil, err
+ }
+ pdps, err := parsePDP(msgs)
+ if err != nil {
+ return nil, err
+ }
+ if len(pdps) != 1 {
+ return nil, fmt.Errorf("invalid reqponse for GENL_GTP_CMD_GETPDP")
+ }
+ return pdps[0], nil
+}
+
+func (h *Handle) GTPPDPByTID(link Link, tid int) (*PDP, error) {
+ f, err := h.GenlFamilyGet(nl.GENL_GTP_NAME)
+ if err != nil {
+ return nil, err
+ }
+ msg := &nl.Genlmsg{
+ Command: nl.GENL_GTP_CMD_GETPDP,
+ Version: nl.GENL_GTP_VERSION,
+ }
+ req := h.newNetlinkRequest(int(f.ID), 0)
+ req.AddData(msg)
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_VERSION, nl.Uint32Attr(0)))
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_LINK, nl.Uint32Attr(uint32(link.Attrs().Index))))
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_TID, nl.Uint64Attr(uint64(tid))))
+ return gtpPDPGet(req)
+}
+
+func GTPPDPByTID(link Link, tid int) (*PDP, error) {
+ return pkgHandle.GTPPDPByTID(link, tid)
+}
+
+func (h *Handle) GTPPDPByITEI(link Link, itei int) (*PDP, error) {
+ f, err := h.GenlFamilyGet(nl.GENL_GTP_NAME)
+ if err != nil {
+ return nil, err
+ }
+ msg := &nl.Genlmsg{
+ Command: nl.GENL_GTP_CMD_GETPDP,
+ Version: nl.GENL_GTP_VERSION,
+ }
+ req := h.newNetlinkRequest(int(f.ID), 0)
+ req.AddData(msg)
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_VERSION, nl.Uint32Attr(1)))
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_LINK, nl.Uint32Attr(uint32(link.Attrs().Index))))
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_I_TEI, nl.Uint32Attr(uint32(itei))))
+ return gtpPDPGet(req)
+}
+
+func GTPPDPByITEI(link Link, itei int) (*PDP, error) {
+ return pkgHandle.GTPPDPByITEI(link, itei)
+}
+
+func (h *Handle) GTPPDPByMSAddress(link Link, addr net.IP) (*PDP, error) {
+ f, err := h.GenlFamilyGet(nl.GENL_GTP_NAME)
+ if err != nil {
+ return nil, err
+ }
+ msg := &nl.Genlmsg{
+ Command: nl.GENL_GTP_CMD_GETPDP,
+ Version: nl.GENL_GTP_VERSION,
+ }
+ req := h.newNetlinkRequest(int(f.ID), 0)
+ req.AddData(msg)
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_VERSION, nl.Uint32Attr(0)))
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_LINK, nl.Uint32Attr(uint32(link.Attrs().Index))))
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_MS_ADDRESS, []byte(addr.To4())))
+ return gtpPDPGet(req)
+}
+
+func GTPPDPByMSAddress(link Link, addr net.IP) (*PDP, error) {
+ return pkgHandle.GTPPDPByMSAddress(link, addr)
+}
+
+func (h *Handle) GTPPDPAdd(link Link, pdp *PDP) error {
+ f, err := h.GenlFamilyGet(nl.GENL_GTP_NAME)
+ if err != nil {
+ return err
+ }
+ msg := &nl.Genlmsg{
+ Command: nl.GENL_GTP_CMD_NEWPDP,
+ Version: nl.GENL_GTP_VERSION,
+ }
+ req := h.newNetlinkRequest(int(f.ID), syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ req.AddData(msg)
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_VERSION, nl.Uint32Attr(pdp.Version)))
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_LINK, nl.Uint32Attr(uint32(link.Attrs().Index))))
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_PEER_ADDRESS, []byte(pdp.PeerAddress.To4())))
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_MS_ADDRESS, []byte(pdp.MSAddress.To4())))
+
+ switch pdp.Version {
+ case 0:
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_TID, nl.Uint64Attr(pdp.TID)))
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_FLOW, nl.Uint16Attr(pdp.Flow)))
+ case 1:
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_I_TEI, nl.Uint32Attr(pdp.ITEI)))
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_O_TEI, nl.Uint32Attr(pdp.OTEI)))
+ default:
+ return fmt.Errorf("unsupported GTP version: %d", pdp.Version)
+ }
+ _, err = req.Execute(syscall.NETLINK_GENERIC, 0)
+ return err
+}
+
+func GTPPDPAdd(link Link, pdp *PDP) error {
+ return pkgHandle.GTPPDPAdd(link, pdp)
+}
+
+func (h *Handle) GTPPDPDel(link Link, pdp *PDP) error {
+ f, err := h.GenlFamilyGet(nl.GENL_GTP_NAME)
+ if err != nil {
+ return err
+ }
+ msg := &nl.Genlmsg{
+ Command: nl.GENL_GTP_CMD_DELPDP,
+ Version: nl.GENL_GTP_VERSION,
+ }
+ req := h.newNetlinkRequest(int(f.ID), syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ req.AddData(msg)
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_VERSION, nl.Uint32Attr(pdp.Version)))
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_LINK, nl.Uint32Attr(uint32(link.Attrs().Index))))
+
+ switch pdp.Version {
+ case 0:
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_TID, nl.Uint64Attr(pdp.TID)))
+ case 1:
+ req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_I_TEI, nl.Uint32Attr(pdp.ITEI)))
+ default:
+ return fmt.Errorf("unsupported GTP version: %d", pdp.Version)
+ }
+ _, err = req.Execute(syscall.NETLINK_GENERIC, 0)
+ return err
+}
+
+func GTPPDPDel(link Link, pdp *PDP) error {
+ return pkgHandle.GTPPDPDel(link, pdp)
+}
diff --git a/vendor/github.com/vishvananda/netlink/handle_linux.go b/vendor/github.com/vishvananda/netlink/handle_linux.go
new file mode 100644
index 000000000..a04ceae6b
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/handle_linux.go
@@ -0,0 +1,111 @@
+package netlink
+
+import (
+ "fmt"
+ "syscall"
+ "time"
+
+ "github.com/vishvananda/netlink/nl"
+ "github.com/vishvananda/netns"
+)
+
+// Empty handle used by the netlink package methods
+var pkgHandle = &Handle{}
+
+// Handle is an handle for the netlink requests on a
+// specific network namespace. All the requests on the
+// same netlink family share the same netlink socket,
+// which gets released when the handle is deleted.
+type Handle struct {
+ sockets map[int]*nl.SocketHandle
+ lookupByDump bool
+}
+
+// SupportsNetlinkFamily reports whether the passed netlink family is supported by this Handle
+func (h *Handle) SupportsNetlinkFamily(nlFamily int) bool {
+ _, ok := h.sockets[nlFamily]
+ return ok
+}
+
+// NewHandle returns a netlink handle on the current network namespace.
+// Caller may specify the netlink families the handle should support.
+// If no families are specified, all the families the netlink package
+// supports will be automatically added.
+func NewHandle(nlFamilies ...int) (*Handle, error) {
+ return newHandle(netns.None(), netns.None(), nlFamilies...)
+}
+
+// SetSocketTimeout sets the send and receive timeout for each socket in the
+// netlink handle. Although the socket timeout has granularity of one
+// microsecond, the effective granularity is floored by the kernel timer tick,
+// which default value is four milliseconds.
+func (h *Handle) SetSocketTimeout(to time.Duration) error {
+ if to < time.Microsecond {
+ return fmt.Errorf("invalid timeout, minimul value is %s", time.Microsecond)
+ }
+ tv := syscall.NsecToTimeval(to.Nanoseconds())
+ for _, sh := range h.sockets {
+ fd := sh.Socket.GetFd()
+ err := syscall.SetsockoptTimeval(fd, syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, &tv)
+ if err != nil {
+ return err
+ }
+ err = syscall.SetsockoptTimeval(fd, syscall.SOL_SOCKET, syscall.SO_SNDTIMEO, &tv)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// NewHandle returns a netlink handle on the network namespace
+// specified by ns. If ns=netns.None(), current network namespace
+// will be assumed
+func NewHandleAt(ns netns.NsHandle, nlFamilies ...int) (*Handle, error) {
+ return newHandle(ns, netns.None(), nlFamilies...)
+}
+
+// NewHandleAtFrom works as NewHandle but allows client to specify the
+// new and the origin netns Handle.
+func NewHandleAtFrom(newNs, curNs netns.NsHandle) (*Handle, error) {
+ return newHandle(newNs, curNs)
+}
+
+func newHandle(newNs, curNs netns.NsHandle, nlFamilies ...int) (*Handle, error) {
+ h := &Handle{sockets: map[int]*nl.SocketHandle{}}
+ fams := nl.SupportedNlFamilies
+ if len(nlFamilies) != 0 {
+ fams = nlFamilies
+ }
+ for _, f := range fams {
+ s, err := nl.GetNetlinkSocketAt(newNs, curNs, f)
+ if err != nil {
+ return nil, err
+ }
+ h.sockets[f] = &nl.SocketHandle{Socket: s}
+ }
+ return h, nil
+}
+
+// Delete releases the resources allocated to this handle
+func (h *Handle) Delete() {
+ for _, sh := range h.sockets {
+ sh.Close()
+ }
+ h.sockets = nil
+}
+
+func (h *Handle) newNetlinkRequest(proto, flags int) *nl.NetlinkRequest {
+ // Do this so that package API still use nl package variable nextSeqNr
+ if h.sockets == nil {
+ return nl.NewNetlinkRequest(proto, flags)
+ }
+ return &nl.NetlinkRequest{
+ NlMsghdr: syscall.NlMsghdr{
+ Len: uint32(syscall.SizeofNlMsghdr),
+ Type: uint16(proto),
+ Flags: syscall.NLM_F_REQUEST | uint16(flags),
+ },
+ Sockets: h.sockets,
+ }
+}
diff --git a/vendor/github.com/vishvananda/netlink/handle_unspecified.go b/vendor/github.com/vishvananda/netlink/handle_unspecified.go
new file mode 100644
index 000000000..32cf02273
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/handle_unspecified.go
@@ -0,0 +1,218 @@
+// +build !linux
+
+package netlink
+
+import (
+ "net"
+ "time"
+
+ "github.com/vishvananda/netns"
+)
+
+type Handle struct{}
+
+func NewHandle(nlFamilies ...int) (*Handle, error) {
+ return nil, ErrNotImplemented
+}
+
+func NewHandleAt(ns netns.NsHandle, nlFamilies ...int) (*Handle, error) {
+ return nil, ErrNotImplemented
+}
+
+func NewHandleAtFrom(newNs, curNs netns.NsHandle) (*Handle, error) {
+ return nil, ErrNotImplemented
+}
+
+func (h *Handle) Delete() {}
+
+func (h *Handle) SupportsNetlinkFamily(nlFamily int) bool {
+ return false
+}
+
+func (h *Handle) SetSocketTimeout(to time.Duration) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) SetPromiscOn(link Link) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) SetPromiscOff(link Link) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetUp(link Link) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetDown(link Link) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetMTU(link Link, mtu int) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetName(link Link, name string) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetAlias(link Link, name string) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAddr) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetVfVlan(link Link, vf, vlan int) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetVfTxRate(link Link, vf, rate int) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetMaster(link Link, master *Bridge) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetNoMaster(link Link) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetMasterByIndex(link Link, masterIndex int) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetNsPid(link Link, nspid int) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetNsFd(link Link, fd int) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkAdd(link Link) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkDel(link Link) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkByName(name string) (Link, error) {
+ return nil, ErrNotImplemented
+}
+
+func (h *Handle) LinkByAlias(alias string) (Link, error) {
+ return nil, ErrNotImplemented
+}
+
+func (h *Handle) LinkByIndex(index int) (Link, error) {
+ return nil, ErrNotImplemented
+}
+
+func (h *Handle) LinkList() ([]Link, error) {
+ return nil, ErrNotImplemented
+}
+
+func (h *Handle) LinkSetHairpin(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetGuard(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetFastLeave(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetLearning(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetRootBlock(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetFlood(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) AddrAdd(link Link, addr *Addr) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) AddrDel(link Link, addr *Addr) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) AddrList(link Link, family int) ([]Addr, error) {
+ return nil, ErrNotImplemented
+}
+
+func (h *Handle) ClassDel(class Class) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) ClassChange(class Class) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) ClassReplace(class Class) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) ClassAdd(class Class) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) {
+ return nil, ErrNotImplemented
+}
+
+func (h *Handle) FilterDel(filter Filter) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) FilterAdd(filter Filter) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) {
+ return nil, ErrNotImplemented
+}
+
+func (h *Handle) NeighAdd(neigh *Neigh) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) NeighSet(neigh *Neigh) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) NeighAppend(neigh *Neigh) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) NeighDel(neigh *Neigh) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) NeighList(linkIndex, family int) ([]Neigh, error) {
+ return nil, ErrNotImplemented
+}
+
+func (h *Handle) NeighProxyList(linkIndex, family int) ([]Neigh, error) {
+ return nil, ErrNotImplemented
+}
diff --git a/vendor/github.com/vishvananda/netlink/link.go b/vendor/github.com/vishvananda/netlink/link.go
new file mode 100644
index 000000000..4e77037b5
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/link.go
@@ -0,0 +1,776 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+)
+
+// Link represents a link device from netlink. Shared link attributes
+// like name may be retrieved using the Attrs() method. Unique data
+// can be retrieved by casting the object to the proper type.
+type Link interface {
+ Attrs() *LinkAttrs
+ Type() string
+}
+
+type (
+ NsPid int
+ NsFd int
+)
+
+// LinkAttrs represents data shared by most link types
+type LinkAttrs struct {
+ Index int
+ MTU int
+ TxQLen int // Transmit Queue Length
+ Name string
+ HardwareAddr net.HardwareAddr
+ Flags net.Flags
+ RawFlags uint32
+ ParentIndex int // index of the parent link device
+ MasterIndex int // must be the index of a bridge
+ Namespace interface{} // nil | NsPid | NsFd
+ Alias string
+ Statistics *LinkStatistics
+ Promisc int
+ Xdp *LinkXdp
+ EncapType string
+ Protinfo *Protinfo
+ OperState LinkOperState
+}
+
+// LinkOperState represents the values of the IFLA_OPERSTATE link
+// attribute, which contains the RFC2863 state of the interface.
+type LinkOperState uint8
+
+const (
+ OperUnknown = iota // Status can't be determined.
+ OperNotPresent // Some component is missing.
+ OperDown // Down.
+ OperLowerLayerDown // Down due to state of lower layer.
+ OperTesting // In some test mode.
+ OperDormant // Not up but pending an external event.
+ OperUp // Up, ready to send packets.
+)
+
+func (s LinkOperState) String() string {
+ switch s {
+ case OperNotPresent:
+ return "not-present"
+ case OperDown:
+ return "down"
+ case OperLowerLayerDown:
+ return "lower-layer-down"
+ case OperTesting:
+ return "testing"
+ case OperDormant:
+ return "dormant"
+ case OperUp:
+ return "up"
+ default:
+ return "unknown"
+ }
+}
+
+// NewLinkAttrs returns LinkAttrs structure filled with default values
+func NewLinkAttrs() LinkAttrs {
+ return LinkAttrs{
+ TxQLen: -1,
+ }
+}
+
+type LinkStatistics LinkStatistics64
+
+/*
+Ref: struct rtnl_link_stats {...}
+*/
+type LinkStatistics32 struct {
+ RxPackets uint32
+ TxPackets uint32
+ RxBytes uint32
+ TxBytes uint32
+ RxErrors uint32
+ TxErrors uint32
+ RxDropped uint32
+ TxDropped uint32
+ Multicast uint32
+ Collisions uint32
+ RxLengthErrors uint32
+ RxOverErrors uint32
+ RxCrcErrors uint32
+ RxFrameErrors uint32
+ RxFifoErrors uint32
+ RxMissedErrors uint32
+ TxAbortedErrors uint32
+ TxCarrierErrors uint32
+ TxFifoErrors uint32
+ TxHeartbeatErrors uint32
+ TxWindowErrors uint32
+ RxCompressed uint32
+ TxCompressed uint32
+}
+
+func (s32 LinkStatistics32) to64() *LinkStatistics64 {
+ return &LinkStatistics64{
+ RxPackets: uint64(s32.RxPackets),
+ TxPackets: uint64(s32.TxPackets),
+ RxBytes: uint64(s32.RxBytes),
+ TxBytes: uint64(s32.TxBytes),
+ RxErrors: uint64(s32.RxErrors),
+ TxErrors: uint64(s32.TxErrors),
+ RxDropped: uint64(s32.RxDropped),
+ TxDropped: uint64(s32.TxDropped),
+ Multicast: uint64(s32.Multicast),
+ Collisions: uint64(s32.Collisions),
+ RxLengthErrors: uint64(s32.RxLengthErrors),
+ RxOverErrors: uint64(s32.RxOverErrors),
+ RxCrcErrors: uint64(s32.RxCrcErrors),
+ RxFrameErrors: uint64(s32.RxFrameErrors),
+ RxFifoErrors: uint64(s32.RxFifoErrors),
+ RxMissedErrors: uint64(s32.RxMissedErrors),
+ TxAbortedErrors: uint64(s32.TxAbortedErrors),
+ TxCarrierErrors: uint64(s32.TxCarrierErrors),
+ TxFifoErrors: uint64(s32.TxFifoErrors),
+ TxHeartbeatErrors: uint64(s32.TxHeartbeatErrors),
+ TxWindowErrors: uint64(s32.TxWindowErrors),
+ RxCompressed: uint64(s32.RxCompressed),
+ TxCompressed: uint64(s32.TxCompressed),
+ }
+}
+
+/*
+Ref: struct rtnl_link_stats64 {...}
+*/
+type LinkStatistics64 struct {
+ RxPackets uint64
+ TxPackets uint64
+ RxBytes uint64
+ TxBytes uint64
+ RxErrors uint64
+ TxErrors uint64
+ RxDropped uint64
+ TxDropped uint64
+ Multicast uint64
+ Collisions uint64
+ RxLengthErrors uint64
+ RxOverErrors uint64
+ RxCrcErrors uint64
+ RxFrameErrors uint64
+ RxFifoErrors uint64
+ RxMissedErrors uint64
+ TxAbortedErrors uint64
+ TxCarrierErrors uint64
+ TxFifoErrors uint64
+ TxHeartbeatErrors uint64
+ TxWindowErrors uint64
+ RxCompressed uint64
+ TxCompressed uint64
+}
+
+type LinkXdp struct {
+ Fd int
+ Attached bool
+ Flags uint32
+ ProgId uint32
+}
+
+// Device links cannot be created via netlink. These links
+// are links created by udev like 'lo' and 'etho0'
+type Device struct {
+ LinkAttrs
+}
+
+func (device *Device) Attrs() *LinkAttrs {
+ return &device.LinkAttrs
+}
+
+func (device *Device) Type() string {
+ return "device"
+}
+
+// Dummy links are dummy ethernet devices
+type Dummy struct {
+ LinkAttrs
+}
+
+func (dummy *Dummy) Attrs() *LinkAttrs {
+ return &dummy.LinkAttrs
+}
+
+func (dummy *Dummy) Type() string {
+ return "dummy"
+}
+
+// Ifb links are advanced dummy devices for packet filtering
+type Ifb struct {
+ LinkAttrs
+}
+
+func (ifb *Ifb) Attrs() *LinkAttrs {
+ return &ifb.LinkAttrs
+}
+
+func (ifb *Ifb) Type() string {
+ return "ifb"
+}
+
+// Bridge links are simple linux bridges
+type Bridge struct {
+ LinkAttrs
+ MulticastSnooping *bool
+ HelloTime *uint32
+}
+
+func (bridge *Bridge) Attrs() *LinkAttrs {
+ return &bridge.LinkAttrs
+}
+
+func (bridge *Bridge) Type() string {
+ return "bridge"
+}
+
+// Vlan links have ParentIndex set in their Attrs()
+type Vlan struct {
+ LinkAttrs
+ VlanId int
+}
+
+func (vlan *Vlan) Attrs() *LinkAttrs {
+ return &vlan.LinkAttrs
+}
+
+func (vlan *Vlan) Type() string {
+ return "vlan"
+}
+
+type MacvlanMode uint16
+
+const (
+ MACVLAN_MODE_DEFAULT MacvlanMode = iota
+ MACVLAN_MODE_PRIVATE
+ MACVLAN_MODE_VEPA
+ MACVLAN_MODE_BRIDGE
+ MACVLAN_MODE_PASSTHRU
+ MACVLAN_MODE_SOURCE
+)
+
+// Macvlan links have ParentIndex set in their Attrs()
+type Macvlan struct {
+ LinkAttrs
+ Mode MacvlanMode
+}
+
+func (macvlan *Macvlan) Attrs() *LinkAttrs {
+ return &macvlan.LinkAttrs
+}
+
+func (macvlan *Macvlan) Type() string {
+ return "macvlan"
+}
+
+// Macvtap - macvtap is a virtual interfaces based on macvlan
+type Macvtap struct {
+ Macvlan
+}
+
+func (macvtap Macvtap) Type() string {
+ return "macvtap"
+}
+
+type TuntapMode uint16
+type TuntapFlag uint16
+
+// Tuntap links created via /dev/tun/tap, but can be destroyed via netlink
+type Tuntap struct {
+ LinkAttrs
+ Mode TuntapMode
+ Flags TuntapFlag
+}
+
+func (tuntap *Tuntap) Attrs() *LinkAttrs {
+ return &tuntap.LinkAttrs
+}
+
+func (tuntap *Tuntap) Type() string {
+ return "tuntap"
+}
+
+// Veth devices must specify PeerName on create
+type Veth struct {
+ LinkAttrs
+ PeerName string // veth on create only
+}
+
+func (veth *Veth) Attrs() *LinkAttrs {
+ return &veth.LinkAttrs
+}
+
+func (veth *Veth) Type() string {
+ return "veth"
+}
+
+// GenericLink links represent types that are not currently understood
+// by this netlink library.
+type GenericLink struct {
+ LinkAttrs
+ LinkType string
+}
+
+func (generic *GenericLink) Attrs() *LinkAttrs {
+ return &generic.LinkAttrs
+}
+
+func (generic *GenericLink) Type() string {
+ return generic.LinkType
+}
+
+type Vxlan struct {
+ LinkAttrs
+ VxlanId int
+ VtepDevIndex int
+ SrcAddr net.IP
+ Group net.IP
+ TTL int
+ TOS int
+ Learning bool
+ Proxy bool
+ RSC bool
+ L2miss bool
+ L3miss bool
+ UDPCSum bool
+ NoAge bool
+ GBP bool
+ FlowBased bool
+ Age int
+ Limit int
+ Port int
+ PortLow int
+ PortHigh int
+}
+
+func (vxlan *Vxlan) Attrs() *LinkAttrs {
+ return &vxlan.LinkAttrs
+}
+
+func (vxlan *Vxlan) Type() string {
+ return "vxlan"
+}
+
+type IPVlanMode uint16
+
+const (
+ IPVLAN_MODE_L2 IPVlanMode = iota
+ IPVLAN_MODE_L3
+ IPVLAN_MODE_L3S
+ IPVLAN_MODE_MAX
+)
+
+type IPVlan struct {
+ LinkAttrs
+ Mode IPVlanMode
+}
+
+func (ipvlan *IPVlan) Attrs() *LinkAttrs {
+ return &ipvlan.LinkAttrs
+}
+
+func (ipvlan *IPVlan) Type() string {
+ return "ipvlan"
+}
+
+// BondMode type
+type BondMode int
+
+func (b BondMode) String() string {
+ s, ok := bondModeToString[b]
+ if !ok {
+ return fmt.Sprintf("BondMode(%d)", b)
+ }
+ return s
+}
+
+// StringToBondMode returns bond mode, or uknonw is the s is invalid.
+func StringToBondMode(s string) BondMode {
+ mode, ok := StringToBondModeMap[s]
+ if !ok {
+ return BOND_MODE_UNKNOWN
+ }
+ return mode
+}
+
+// Possible BondMode
+const (
+ BOND_MODE_BALANCE_RR BondMode = iota
+ BOND_MODE_ACTIVE_BACKUP
+ BOND_MODE_BALANCE_XOR
+ BOND_MODE_BROADCAST
+ BOND_MODE_802_3AD
+ BOND_MODE_BALANCE_TLB
+ BOND_MODE_BALANCE_ALB
+ BOND_MODE_UNKNOWN
+)
+
+var bondModeToString = map[BondMode]string{
+ BOND_MODE_BALANCE_RR: "balance-rr",
+ BOND_MODE_ACTIVE_BACKUP: "active-backup",
+ BOND_MODE_BALANCE_XOR: "balance-xor",
+ BOND_MODE_BROADCAST: "broadcast",
+ BOND_MODE_802_3AD: "802.3ad",
+ BOND_MODE_BALANCE_TLB: "balance-tlb",
+ BOND_MODE_BALANCE_ALB: "balance-alb",
+}
+var StringToBondModeMap = map[string]BondMode{
+ "balance-rr": BOND_MODE_BALANCE_RR,
+ "active-backup": BOND_MODE_ACTIVE_BACKUP,
+ "balance-xor": BOND_MODE_BALANCE_XOR,
+ "broadcast": BOND_MODE_BROADCAST,
+ "802.3ad": BOND_MODE_802_3AD,
+ "balance-tlb": BOND_MODE_BALANCE_TLB,
+ "balance-alb": BOND_MODE_BALANCE_ALB,
+}
+
+// BondArpValidate type
+type BondArpValidate int
+
+// Possible BondArpValidate value
+const (
+ BOND_ARP_VALIDATE_NONE BondArpValidate = iota
+ BOND_ARP_VALIDATE_ACTIVE
+ BOND_ARP_VALIDATE_BACKUP
+ BOND_ARP_VALIDATE_ALL
+)
+
+// BondPrimaryReselect type
+type BondPrimaryReselect int
+
+// Possible BondPrimaryReselect value
+const (
+ BOND_PRIMARY_RESELECT_ALWAYS BondPrimaryReselect = iota
+ BOND_PRIMARY_RESELECT_BETTER
+ BOND_PRIMARY_RESELECT_FAILURE
+)
+
+// BondArpAllTargets type
+type BondArpAllTargets int
+
+// Possible BondArpAllTargets value
+const (
+ BOND_ARP_ALL_TARGETS_ANY BondArpAllTargets = iota
+ BOND_ARP_ALL_TARGETS_ALL
+)
+
+// BondFailOverMac type
+type BondFailOverMac int
+
+// Possible BondFailOverMac value
+const (
+ BOND_FAIL_OVER_MAC_NONE BondFailOverMac = iota
+ BOND_FAIL_OVER_MAC_ACTIVE
+ BOND_FAIL_OVER_MAC_FOLLOW
+)
+
+// BondXmitHashPolicy type
+type BondXmitHashPolicy int
+
+func (b BondXmitHashPolicy) String() string {
+ s, ok := bondXmitHashPolicyToString[b]
+ if !ok {
+ return fmt.Sprintf("XmitHashPolicy(%d)", b)
+ }
+ return s
+}
+
+// StringToBondXmitHashPolicy returns bond lacp arte, or uknonw is the s is invalid.
+func StringToBondXmitHashPolicy(s string) BondXmitHashPolicy {
+ lacp, ok := StringToBondXmitHashPolicyMap[s]
+ if !ok {
+ return BOND_XMIT_HASH_POLICY_UNKNOWN
+ }
+ return lacp
+}
+
+// Possible BondXmitHashPolicy value
+const (
+ BOND_XMIT_HASH_POLICY_LAYER2 BondXmitHashPolicy = iota
+ BOND_XMIT_HASH_POLICY_LAYER3_4
+ BOND_XMIT_HASH_POLICY_LAYER2_3
+ BOND_XMIT_HASH_POLICY_ENCAP2_3
+ BOND_XMIT_HASH_POLICY_ENCAP3_4
+ BOND_XMIT_HASH_POLICY_UNKNOWN
+)
+
+var bondXmitHashPolicyToString = map[BondXmitHashPolicy]string{
+ BOND_XMIT_HASH_POLICY_LAYER2: "layer2",
+ BOND_XMIT_HASH_POLICY_LAYER3_4: "layer3+4",
+ BOND_XMIT_HASH_POLICY_LAYER2_3: "layer2+3",
+ BOND_XMIT_HASH_POLICY_ENCAP2_3: "encap2+3",
+ BOND_XMIT_HASH_POLICY_ENCAP3_4: "encap3+4",
+}
+var StringToBondXmitHashPolicyMap = map[string]BondXmitHashPolicy{
+ "layer2": BOND_XMIT_HASH_POLICY_LAYER2,
+ "layer3+4": BOND_XMIT_HASH_POLICY_LAYER3_4,
+ "layer2+3": BOND_XMIT_HASH_POLICY_LAYER2_3,
+ "encap2+3": BOND_XMIT_HASH_POLICY_ENCAP2_3,
+ "encap3+4": BOND_XMIT_HASH_POLICY_ENCAP3_4,
+}
+
+// BondLacpRate type
+type BondLacpRate int
+
+func (b BondLacpRate) String() string {
+ s, ok := bondLacpRateToString[b]
+ if !ok {
+ return fmt.Sprintf("LacpRate(%d)", b)
+ }
+ return s
+}
+
+// StringToBondLacpRate returns bond lacp arte, or uknonw is the s is invalid.
+func StringToBondLacpRate(s string) BondLacpRate {
+ lacp, ok := StringToBondLacpRateMap[s]
+ if !ok {
+ return BOND_LACP_RATE_UNKNOWN
+ }
+ return lacp
+}
+
+// Possible BondLacpRate value
+const (
+ BOND_LACP_RATE_SLOW BondLacpRate = iota
+ BOND_LACP_RATE_FAST
+ BOND_LACP_RATE_UNKNOWN
+)
+
+var bondLacpRateToString = map[BondLacpRate]string{
+ BOND_LACP_RATE_SLOW: "slow",
+ BOND_LACP_RATE_FAST: "fast",
+}
+var StringToBondLacpRateMap = map[string]BondLacpRate{
+ "slow": BOND_LACP_RATE_SLOW,
+ "fast": BOND_LACP_RATE_FAST,
+}
+
+// BondAdSelect type
+type BondAdSelect int
+
+// Possible BondAdSelect value
+const (
+ BOND_AD_SELECT_STABLE BondAdSelect = iota
+ BOND_AD_SELECT_BANDWIDTH
+ BOND_AD_SELECT_COUNT
+)
+
+// BondAdInfo represents ad info for bond
+type BondAdInfo struct {
+ AggregatorId int
+ NumPorts int
+ ActorKey int
+ PartnerKey int
+ PartnerMac net.HardwareAddr
+}
+
+// Bond representation
+type Bond struct {
+ LinkAttrs
+ Mode BondMode
+ ActiveSlave int
+ Miimon int
+ UpDelay int
+ DownDelay int
+ UseCarrier int
+ ArpInterval int
+ ArpIpTargets []net.IP
+ ArpValidate BondArpValidate
+ ArpAllTargets BondArpAllTargets
+ Primary int
+ PrimaryReselect BondPrimaryReselect
+ FailOverMac BondFailOverMac
+ XmitHashPolicy BondXmitHashPolicy
+ ResendIgmp int
+ NumPeerNotif int
+ AllSlavesActive int
+ MinLinks int
+ LpInterval int
+ PackersPerSlave int
+ LacpRate BondLacpRate
+ AdSelect BondAdSelect
+ // looking at iproute tool AdInfo can only be retrived. It can't be set.
+ AdInfo *BondAdInfo
+ AdActorSysPrio int
+ AdUserPortKey int
+ AdActorSystem net.HardwareAddr
+ TlbDynamicLb int
+}
+
+func NewLinkBond(atr LinkAttrs) *Bond {
+ return &Bond{
+ LinkAttrs: atr,
+ Mode: -1,
+ ActiveSlave: -1,
+ Miimon: -1,
+ UpDelay: -1,
+ DownDelay: -1,
+ UseCarrier: -1,
+ ArpInterval: -1,
+ ArpIpTargets: nil,
+ ArpValidate: -1,
+ ArpAllTargets: -1,
+ Primary: -1,
+ PrimaryReselect: -1,
+ FailOverMac: -1,
+ XmitHashPolicy: -1,
+ ResendIgmp: -1,
+ NumPeerNotif: -1,
+ AllSlavesActive: -1,
+ MinLinks: -1,
+ LpInterval: -1,
+ PackersPerSlave: -1,
+ LacpRate: -1,
+ AdSelect: -1,
+ AdActorSysPrio: -1,
+ AdUserPortKey: -1,
+ AdActorSystem: nil,
+ TlbDynamicLb: -1,
+ }
+}
+
+// Flag mask for bond options. Bond.Flagmask must be set to on for option to work.
+const (
+ BOND_MODE_MASK uint64 = 1 << (1 + iota)
+ BOND_ACTIVE_SLAVE_MASK
+ BOND_MIIMON_MASK
+ BOND_UPDELAY_MASK
+ BOND_DOWNDELAY_MASK
+ BOND_USE_CARRIER_MASK
+ BOND_ARP_INTERVAL_MASK
+ BOND_ARP_VALIDATE_MASK
+ BOND_ARP_ALL_TARGETS_MASK
+ BOND_PRIMARY_MASK
+ BOND_PRIMARY_RESELECT_MASK
+ BOND_FAIL_OVER_MAC_MASK
+ BOND_XMIT_HASH_POLICY_MASK
+ BOND_RESEND_IGMP_MASK
+ BOND_NUM_PEER_NOTIF_MASK
+ BOND_ALL_SLAVES_ACTIVE_MASK
+ BOND_MIN_LINKS_MASK
+ BOND_LP_INTERVAL_MASK
+ BOND_PACKETS_PER_SLAVE_MASK
+ BOND_LACP_RATE_MASK
+ BOND_AD_SELECT_MASK
+)
+
+// Attrs implementation.
+func (bond *Bond) Attrs() *LinkAttrs {
+ return &bond.LinkAttrs
+}
+
+// Type implementation fro Vxlan.
+func (bond *Bond) Type() string {
+ return "bond"
+}
+
+// Gretap devices must specify LocalIP and RemoteIP on create
+type Gretap struct {
+ LinkAttrs
+ IKey uint32
+ OKey uint32
+ EncapSport uint16
+ EncapDport uint16
+ Local net.IP
+ Remote net.IP
+ IFlags uint16
+ OFlags uint16
+ PMtuDisc uint8
+ Ttl uint8
+ Tos uint8
+ EncapType uint16
+ EncapFlags uint16
+ Link uint32
+ FlowBased bool
+}
+
+func (gretap *Gretap) Attrs() *LinkAttrs {
+ return &gretap.LinkAttrs
+}
+
+func (gretap *Gretap) Type() string {
+ return "gretap"
+}
+
+type Iptun struct {
+ LinkAttrs
+ Ttl uint8
+ Tos uint8
+ PMtuDisc uint8
+ Link uint32
+ Local net.IP
+ Remote net.IP
+}
+
+func (iptun *Iptun) Attrs() *LinkAttrs {
+ return &iptun.LinkAttrs
+}
+
+func (iptun *Iptun) Type() string {
+ return "ipip"
+}
+
+type Vti struct {
+ LinkAttrs
+ IKey uint32
+ OKey uint32
+ Link uint32
+ Local net.IP
+ Remote net.IP
+}
+
+func (vti *Vti) Attrs() *LinkAttrs {
+ return &vti.LinkAttrs
+}
+
+func (iptun *Vti) Type() string {
+ return "vti"
+}
+
+type Vrf struct {
+ LinkAttrs
+ Table uint32
+}
+
+func (vrf *Vrf) Attrs() *LinkAttrs {
+ return &vrf.LinkAttrs
+}
+
+func (vrf *Vrf) Type() string {
+ return "vrf"
+}
+
+type GTP struct {
+ LinkAttrs
+ FD0 int
+ FD1 int
+ Role int
+ PDPHashsize int
+}
+
+func (gtp *GTP) Attrs() *LinkAttrs {
+ return &gtp.LinkAttrs
+}
+
+func (gtp *GTP) Type() string {
+ return "gtp"
+}
+
+// iproute2 supported devices;
+// vlan | veth | vcan | dummy | ifb | macvlan | macvtap |
+// bridge | bond | ipoib | ip6tnl | ipip | sit | vxlan |
+// gre | gretap | ip6gre | ip6gretap | vti | nlmon |
+// bond_slave | ipvlan
+
+// LinkNotFoundError wraps the various not found errors when
+// getting/reading links. This is intended for better error
+// handling by dependent code so that "not found error" can
+// be distinguished from other errors
+type LinkNotFoundError struct {
+ error
+}
diff --git a/vendor/github.com/vishvananda/netlink/link_linux.go b/vendor/github.com/vishvananda/netlink/link_linux.go
new file mode 100644
index 000000000..18a4d1a5f
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/link_linux.go
@@ -0,0 +1,1819 @@
+package netlink
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "net"
+ "os"
+ "syscall"
+ "unsafe"
+
+ "github.com/vishvananda/netlink/nl"
+ "github.com/vishvananda/netns"
+)
+
+const (
+ SizeofLinkStats32 = 0x5c
+ SizeofLinkStats64 = 0xd8
+ IFLA_STATS64 = 0x17 // syscall pkg does not contain this one
+)
+
+const (
+ TUNTAP_MODE_TUN TuntapMode = syscall.IFF_TUN
+ TUNTAP_MODE_TAP TuntapMode = syscall.IFF_TAP
+ TUNTAP_DEFAULTS TuntapFlag = syscall.IFF_TUN_EXCL | syscall.IFF_ONE_QUEUE
+ TUNTAP_VNET_HDR TuntapFlag = syscall.IFF_VNET_HDR
+ TUNTAP_TUN_EXCL TuntapFlag = syscall.IFF_TUN_EXCL
+ TUNTAP_NO_PI TuntapFlag = syscall.IFF_NO_PI
+ TUNTAP_ONE_QUEUE TuntapFlag = syscall.IFF_ONE_QUEUE
+)
+
+var lookupByDump = false
+
+var macvlanModes = [...]uint32{
+ 0,
+ nl.MACVLAN_MODE_PRIVATE,
+ nl.MACVLAN_MODE_VEPA,
+ nl.MACVLAN_MODE_BRIDGE,
+ nl.MACVLAN_MODE_PASSTHRU,
+ nl.MACVLAN_MODE_SOURCE,
+}
+
+func ensureIndex(link *LinkAttrs) {
+ if link != nil && link.Index == 0 {
+ newlink, _ := LinkByName(link.Name)
+ if newlink != nil {
+ link.Index = newlink.Attrs().Index
+ }
+ }
+}
+
+func (h *Handle) ensureIndex(link *LinkAttrs) {
+ if link != nil && link.Index == 0 {
+ newlink, _ := h.LinkByName(link.Name)
+ if newlink != nil {
+ link.Index = newlink.Attrs().Index
+ }
+ }
+}
+
+func (h *Handle) LinkSetARPOff(link Link) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Change |= syscall.IFF_NOARP
+ msg.Flags |= syscall.IFF_NOARP
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+func LinkSetARPOff(link Link) error {
+ return pkgHandle.LinkSetARPOff(link)
+}
+
+func (h *Handle) LinkSetARPOn(link Link) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Change |= syscall.IFF_NOARP
+ msg.Flags &= ^uint32(syscall.IFF_NOARP)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+func LinkSetARPOn(link Link) error {
+ return pkgHandle.LinkSetARPOn(link)
+}
+
+func (h *Handle) SetPromiscOn(link Link) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Change = syscall.IFF_PROMISC
+ msg.Flags = syscall.IFF_PROMISC
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+func BridgeSetMcastSnoop(link Link, on bool) error {
+ return pkgHandle.BridgeSetMcastSnoop(link, on)
+}
+
+func (h *Handle) BridgeSetMcastSnoop(link Link, on bool) error {
+ bridge := link.(*Bridge)
+ bridge.MulticastSnooping = &on
+ return h.linkModify(bridge, syscall.NLM_F_ACK)
+}
+
+func SetPromiscOn(link Link) error {
+ return pkgHandle.SetPromiscOn(link)
+}
+
+func (h *Handle) SetPromiscOff(link Link) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Change = syscall.IFF_PROMISC
+ msg.Flags = 0 & ^syscall.IFF_PROMISC
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+func SetPromiscOff(link Link) error {
+ return pkgHandle.SetPromiscOff(link)
+}
+
+// LinkSetUp enables the link device.
+// Equivalent to: `ip link set $link up`
+func LinkSetUp(link Link) error {
+ return pkgHandle.LinkSetUp(link)
+}
+
+// LinkSetUp enables the link device.
+// Equivalent to: `ip link set $link up`
+func (h *Handle) LinkSetUp(link Link) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Change = syscall.IFF_UP
+ msg.Flags = syscall.IFF_UP
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetDown disables link device.
+// Equivalent to: `ip link set $link down`
+func LinkSetDown(link Link) error {
+ return pkgHandle.LinkSetDown(link)
+}
+
+// LinkSetDown disables link device.
+// Equivalent to: `ip link set $link down`
+func (h *Handle) LinkSetDown(link Link) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Change = syscall.IFF_UP
+ msg.Flags = 0 & ^syscall.IFF_UP
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetMTU sets the mtu of the link device.
+// Equivalent to: `ip link set $link mtu $mtu`
+func LinkSetMTU(link Link, mtu int) error {
+ return pkgHandle.LinkSetMTU(link, mtu)
+}
+
+// LinkSetMTU sets the mtu of the link device.
+// Equivalent to: `ip link set $link mtu $mtu`
+func (h *Handle) LinkSetMTU(link Link, mtu int) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(mtu))
+
+ data := nl.NewRtAttr(syscall.IFLA_MTU, b)
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetName sets the name of the link device.
+// Equivalent to: `ip link set $link name $name`
+func LinkSetName(link Link, name string) error {
+ return pkgHandle.LinkSetName(link, name)
+}
+
+// LinkSetName sets the name of the link device.
+// Equivalent to: `ip link set $link name $name`
+func (h *Handle) LinkSetName(link Link, name string) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ data := nl.NewRtAttr(syscall.IFLA_IFNAME, []byte(name))
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetAlias sets the alias of the link device.
+// Equivalent to: `ip link set dev $link alias $name`
+func LinkSetAlias(link Link, name string) error {
+ return pkgHandle.LinkSetAlias(link, name)
+}
+
+// LinkSetAlias sets the alias of the link device.
+// Equivalent to: `ip link set dev $link alias $name`
+func (h *Handle) LinkSetAlias(link Link, name string) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ data := nl.NewRtAttr(syscall.IFLA_IFALIAS, []byte(name))
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetHardwareAddr sets the hardware address of the link device.
+// Equivalent to: `ip link set $link address $hwaddr`
+func LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error {
+ return pkgHandle.LinkSetHardwareAddr(link, hwaddr)
+}
+
+// LinkSetHardwareAddr sets the hardware address of the link device.
+// Equivalent to: `ip link set $link address $hwaddr`
+func (h *Handle) LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ data := nl.NewRtAttr(syscall.IFLA_ADDRESS, []byte(hwaddr))
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetVfHardwareAddr sets the hardware address of a vf for the link.
+// Equivalent to: `ip link set $link vf $vf mac $hwaddr`
+func LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAddr) error {
+ return pkgHandle.LinkSetVfHardwareAddr(link, vf, hwaddr)
+}
+
+// LinkSetVfHardwareAddr sets the hardware address of a vf for the link.
+// Equivalent to: `ip link set $link vf $vf mac $hwaddr`
+func (h *Handle) LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAddr) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ data := nl.NewRtAttr(nl.IFLA_VFINFO_LIST, nil)
+ info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil)
+ vfmsg := nl.VfMac{
+ Vf: uint32(vf),
+ }
+ copy(vfmsg.Mac[:], []byte(hwaddr))
+ nl.NewRtAttrChild(info, nl.IFLA_VF_MAC, vfmsg.Serialize())
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetVfVlan sets the vlan of a vf for the link.
+// Equivalent to: `ip link set $link vf $vf vlan $vlan`
+func LinkSetVfVlan(link Link, vf, vlan int) error {
+ return pkgHandle.LinkSetVfVlan(link, vf, vlan)
+}
+
+// LinkSetVfVlan sets the vlan of a vf for the link.
+// Equivalent to: `ip link set $link vf $vf vlan $vlan`
+func (h *Handle) LinkSetVfVlan(link Link, vf, vlan int) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ data := nl.NewRtAttr(nl.IFLA_VFINFO_LIST, nil)
+ info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil)
+ vfmsg := nl.VfVlan{
+ Vf: uint32(vf),
+ Vlan: uint32(vlan),
+ }
+ nl.NewRtAttrChild(info, nl.IFLA_VF_VLAN, vfmsg.Serialize())
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetVfTxRate sets the tx rate of a vf for the link.
+// Equivalent to: `ip link set $link vf $vf rate $rate`
+func LinkSetVfTxRate(link Link, vf, rate int) error {
+ return pkgHandle.LinkSetVfTxRate(link, vf, rate)
+}
+
+// LinkSetVfTxRate sets the tx rate of a vf for the link.
+// Equivalent to: `ip link set $link vf $vf rate $rate`
+func (h *Handle) LinkSetVfTxRate(link Link, vf, rate int) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ data := nl.NewRtAttr(nl.IFLA_VFINFO_LIST, nil)
+ info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil)
+ vfmsg := nl.VfTxRate{
+ Vf: uint32(vf),
+ Rate: uint32(rate),
+ }
+ nl.NewRtAttrChild(info, nl.IFLA_VF_TX_RATE, vfmsg.Serialize())
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetMaster sets the master of the link device.
+// Equivalent to: `ip link set $link master $master`
+func LinkSetMaster(link Link, master *Bridge) error {
+ return pkgHandle.LinkSetMaster(link, master)
+}
+
+// LinkSetMaster sets the master of the link device.
+// Equivalent to: `ip link set $link master $master`
+func (h *Handle) LinkSetMaster(link Link, master *Bridge) error {
+ index := 0
+ if master != nil {
+ masterBase := master.Attrs()
+ h.ensureIndex(masterBase)
+ index = masterBase.Index
+ }
+ if index <= 0 {
+ return fmt.Errorf("Device does not exist")
+ }
+ return h.LinkSetMasterByIndex(link, index)
+}
+
+// LinkSetNoMaster removes the master of the link device.
+// Equivalent to: `ip link set $link nomaster`
+func LinkSetNoMaster(link Link) error {
+ return pkgHandle.LinkSetNoMaster(link)
+}
+
+// LinkSetNoMaster removes the master of the link device.
+// Equivalent to: `ip link set $link nomaster`
+func (h *Handle) LinkSetNoMaster(link Link) error {
+ return h.LinkSetMasterByIndex(link, 0)
+}
+
+// LinkSetMasterByIndex sets the master of the link device.
+// Equivalent to: `ip link set $link master $master`
+func LinkSetMasterByIndex(link Link, masterIndex int) error {
+ return pkgHandle.LinkSetMasterByIndex(link, masterIndex)
+}
+
+// LinkSetMasterByIndex sets the master of the link device.
+// Equivalent to: `ip link set $link master $master`
+func (h *Handle) LinkSetMasterByIndex(link Link, masterIndex int) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(masterIndex))
+
+ data := nl.NewRtAttr(syscall.IFLA_MASTER, b)
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetNsPid puts the device into a new network namespace. The
+// pid must be a pid of a running process.
+// Equivalent to: `ip link set $link netns $pid`
+func LinkSetNsPid(link Link, nspid int) error {
+ return pkgHandle.LinkSetNsPid(link, nspid)
+}
+
+// LinkSetNsPid puts the device into a new network namespace. The
+// pid must be a pid of a running process.
+// Equivalent to: `ip link set $link netns $pid`
+func (h *Handle) LinkSetNsPid(link Link, nspid int) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(nspid))
+
+ data := nl.NewRtAttr(syscall.IFLA_NET_NS_PID, b)
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetNsFd puts the device into a new network namespace. The
+// fd must be an open file descriptor to a network namespace.
+// Similar to: `ip link set $link netns $ns`
+func LinkSetNsFd(link Link, fd int) error {
+ return pkgHandle.LinkSetNsFd(link, fd)
+}
+
+// LinkSetNsFd puts the device into a new network namespace. The
+// fd must be an open file descriptor to a network namespace.
+// Similar to: `ip link set $link netns $ns`
+func (h *Handle) LinkSetNsFd(link Link, fd int) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(fd))
+
+ data := nl.NewRtAttr(nl.IFLA_NET_NS_FD, b)
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetXdpFd adds a bpf function to the driver. The fd must be a bpf
+// program loaded with bpf(type=BPF_PROG_TYPE_XDP)
+func LinkSetXdpFd(link Link, fd int) error {
+ base := link.Attrs()
+ ensureIndex(base)
+ req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ addXdpAttrs(&LinkXdp{Fd: fd}, req)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+func boolAttr(val bool) []byte {
+ var v uint8
+ if val {
+ v = 1
+ }
+ return nl.Uint8Attr(v)
+}
+
+type vxlanPortRange struct {
+ Lo, Hi uint16
+}
+
+func addVxlanAttrs(vxlan *Vxlan, linkInfo *nl.RtAttr) {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+
+ if vxlan.FlowBased {
+ vxlan.VxlanId = 0
+ }
+
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_ID, nl.Uint32Attr(uint32(vxlan.VxlanId)))
+
+ if vxlan.VtepDevIndex != 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LINK, nl.Uint32Attr(uint32(vxlan.VtepDevIndex)))
+ }
+ if vxlan.SrcAddr != nil {
+ ip := vxlan.SrcAddr.To4()
+ if ip != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LOCAL, []byte(ip))
+ } else {
+ ip = vxlan.SrcAddr.To16()
+ if ip != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LOCAL6, []byte(ip))
+ }
+ }
+ }
+ if vxlan.Group != nil {
+ group := vxlan.Group.To4()
+ if group != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GROUP, []byte(group))
+ } else {
+ group = vxlan.Group.To16()
+ if group != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GROUP6, []byte(group))
+ }
+ }
+ }
+
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_TTL, nl.Uint8Attr(uint8(vxlan.TTL)))
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_TOS, nl.Uint8Attr(uint8(vxlan.TOS)))
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LEARNING, boolAttr(vxlan.Learning))
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PROXY, boolAttr(vxlan.Proxy))
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_RSC, boolAttr(vxlan.RSC))
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L2MISS, boolAttr(vxlan.L2miss))
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L3MISS, boolAttr(vxlan.L3miss))
+
+ if vxlan.UDPCSum {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_CSUM, boolAttr(vxlan.UDPCSum))
+ }
+ if vxlan.GBP {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GBP, []byte{})
+ }
+ if vxlan.FlowBased {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_FLOWBASED, boolAttr(vxlan.FlowBased))
+ }
+ if vxlan.NoAge {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(0))
+ } else if vxlan.Age > 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(uint32(vxlan.Age)))
+ }
+ if vxlan.Limit > 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LIMIT, nl.Uint32Attr(uint32(vxlan.Limit)))
+ }
+ if vxlan.Port > 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT, htons(uint16(vxlan.Port)))
+ }
+ if vxlan.PortLow > 0 || vxlan.PortHigh > 0 {
+ pr := vxlanPortRange{uint16(vxlan.PortLow), uint16(vxlan.PortHigh)}
+
+ buf := new(bytes.Buffer)
+ binary.Write(buf, binary.BigEndian, &pr)
+
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT_RANGE, buf.Bytes())
+ }
+}
+
+func addBondAttrs(bond *Bond, linkInfo *nl.RtAttr) {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ if bond.Mode >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_MODE, nl.Uint8Attr(uint8(bond.Mode)))
+ }
+ if bond.ActiveSlave >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_ACTIVE_SLAVE, nl.Uint32Attr(uint32(bond.ActiveSlave)))
+ }
+ if bond.Miimon >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_MIIMON, nl.Uint32Attr(uint32(bond.Miimon)))
+ }
+ if bond.UpDelay >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_UPDELAY, nl.Uint32Attr(uint32(bond.UpDelay)))
+ }
+ if bond.DownDelay >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_DOWNDELAY, nl.Uint32Attr(uint32(bond.DownDelay)))
+ }
+ if bond.UseCarrier >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_USE_CARRIER, nl.Uint8Attr(uint8(bond.UseCarrier)))
+ }
+ if bond.ArpInterval >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_INTERVAL, nl.Uint32Attr(uint32(bond.ArpInterval)))
+ }
+ if bond.ArpIpTargets != nil {
+ msg := nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_IP_TARGET, nil)
+ for i := range bond.ArpIpTargets {
+ ip := bond.ArpIpTargets[i].To4()
+ if ip != nil {
+ nl.NewRtAttrChild(msg, i, []byte(ip))
+ continue
+ }
+ ip = bond.ArpIpTargets[i].To16()
+ if ip != nil {
+ nl.NewRtAttrChild(msg, i, []byte(ip))
+ }
+ }
+ }
+ if bond.ArpValidate >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_VALIDATE, nl.Uint32Attr(uint32(bond.ArpValidate)))
+ }
+ if bond.ArpAllTargets >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_ALL_TARGETS, nl.Uint32Attr(uint32(bond.ArpAllTargets)))
+ }
+ if bond.Primary >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_PRIMARY, nl.Uint32Attr(uint32(bond.Primary)))
+ }
+ if bond.PrimaryReselect >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_PRIMARY_RESELECT, nl.Uint8Attr(uint8(bond.PrimaryReselect)))
+ }
+ if bond.FailOverMac >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_FAIL_OVER_MAC, nl.Uint8Attr(uint8(bond.FailOverMac)))
+ }
+ if bond.XmitHashPolicy >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_XMIT_HASH_POLICY, nl.Uint8Attr(uint8(bond.XmitHashPolicy)))
+ }
+ if bond.ResendIgmp >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_RESEND_IGMP, nl.Uint32Attr(uint32(bond.ResendIgmp)))
+ }
+ if bond.NumPeerNotif >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_NUM_PEER_NOTIF, nl.Uint8Attr(uint8(bond.NumPeerNotif)))
+ }
+ if bond.AllSlavesActive >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_ALL_SLAVES_ACTIVE, nl.Uint8Attr(uint8(bond.AllSlavesActive)))
+ }
+ if bond.MinLinks >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_MIN_LINKS, nl.Uint32Attr(uint32(bond.MinLinks)))
+ }
+ if bond.LpInterval >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_LP_INTERVAL, nl.Uint32Attr(uint32(bond.LpInterval)))
+ }
+ if bond.PackersPerSlave >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_PACKETS_PER_SLAVE, nl.Uint32Attr(uint32(bond.PackersPerSlave)))
+ }
+ if bond.LacpRate >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_LACP_RATE, nl.Uint8Attr(uint8(bond.LacpRate)))
+ }
+ if bond.AdSelect >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_SELECT, nl.Uint8Attr(uint8(bond.AdSelect)))
+ }
+ if bond.AdActorSysPrio >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_ACTOR_SYS_PRIO, nl.Uint16Attr(uint16(bond.AdActorSysPrio)))
+ }
+ if bond.AdUserPortKey >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_USER_PORT_KEY, nl.Uint16Attr(uint16(bond.AdUserPortKey)))
+ }
+ if bond.AdActorSystem != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_ACTOR_SYSTEM, []byte(bond.AdActorSystem))
+ }
+ if bond.TlbDynamicLb >= 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_BOND_TLB_DYNAMIC_LB, nl.Uint8Attr(uint8(bond.TlbDynamicLb)))
+ }
+}
+
+// LinkAdd adds a new link device. The type and features of the device
+// are taken from the parameters in the link object.
+// Equivalent to: `ip link add $link`
+func LinkAdd(link Link) error {
+ return pkgHandle.LinkAdd(link)
+}
+
+// LinkAdd adds a new link device. The type and features of the device
+// are taken fromt the parameters in the link object.
+// Equivalent to: `ip link add $link`
+func (h *Handle) LinkAdd(link Link) error {
+ return h.linkModify(link, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+}
+
+func (h *Handle) linkModify(link Link, flags int) error {
+ // TODO: support extra data for macvlan
+ base := link.Attrs()
+
+ if base.Name == "" {
+ return fmt.Errorf("LinkAttrs.Name cannot be empty!")
+ }
+
+ if tuntap, ok := link.(*Tuntap); ok {
+ // TODO: support user
+ // TODO: support group
+ // TODO: multi_queue
+ // TODO: support non- persistent
+ if tuntap.Mode < syscall.IFF_TUN || tuntap.Mode > syscall.IFF_TAP {
+ return fmt.Errorf("Tuntap.Mode %v unknown!", tuntap.Mode)
+ }
+ file, err := os.OpenFile("/dev/net/tun", os.O_RDWR, 0)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+ var req ifReq
+ if tuntap.Flags == 0 {
+ req.Flags = uint16(TUNTAP_DEFAULTS)
+ } else {
+ req.Flags = uint16(tuntap.Flags)
+ }
+ req.Flags |= uint16(tuntap.Mode)
+ copy(req.Name[:15], base.Name)
+ _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), uintptr(syscall.TUNSETIFF), uintptr(unsafe.Pointer(&req)))
+ if errno != 0 {
+ return fmt.Errorf("Tuntap IOCTL TUNSETIFF failed, errno %v", errno)
+ }
+ _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), uintptr(syscall.TUNSETPERSIST), 1)
+ if errno != 0 {
+ return fmt.Errorf("Tuntap IOCTL TUNSETPERSIST failed, errno %v", errno)
+ }
+ h.ensureIndex(base)
+
+ // can't set master during create, so set it afterwards
+ if base.MasterIndex != 0 {
+ // TODO: verify MasterIndex is actually a bridge?
+ return h.LinkSetMasterByIndex(link, base.MasterIndex)
+ }
+ return nil
+ }
+
+ req := h.newNetlinkRequest(syscall.RTM_NEWLINK, flags)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ // TODO: make it shorter
+ if base.Flags&net.FlagUp != 0 {
+ msg.Change = syscall.IFF_UP
+ msg.Flags = syscall.IFF_UP
+ }
+ if base.Flags&net.FlagBroadcast != 0 {
+ msg.Change |= syscall.IFF_BROADCAST
+ msg.Flags |= syscall.IFF_BROADCAST
+ }
+ if base.Flags&net.FlagLoopback != 0 {
+ msg.Change |= syscall.IFF_LOOPBACK
+ msg.Flags |= syscall.IFF_LOOPBACK
+ }
+ if base.Flags&net.FlagPointToPoint != 0 {
+ msg.Change |= syscall.IFF_POINTOPOINT
+ msg.Flags |= syscall.IFF_POINTOPOINT
+ }
+ if base.Flags&net.FlagMulticast != 0 {
+ msg.Change |= syscall.IFF_MULTICAST
+ msg.Flags |= syscall.IFF_MULTICAST
+ }
+ req.AddData(msg)
+
+ if base.ParentIndex != 0 {
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(base.ParentIndex))
+ data := nl.NewRtAttr(syscall.IFLA_LINK, b)
+ req.AddData(data)
+ } else if link.Type() == "ipvlan" {
+ return fmt.Errorf("Can't create ipvlan link without ParentIndex")
+ }
+
+ nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(base.Name))
+ req.AddData(nameData)
+
+ if base.MTU > 0 {
+ mtu := nl.NewRtAttr(syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU)))
+ req.AddData(mtu)
+ }
+
+ if base.TxQLen >= 0 {
+ qlen := nl.NewRtAttr(syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen)))
+ req.AddData(qlen)
+ }
+
+ if base.HardwareAddr != nil {
+ hwaddr := nl.NewRtAttr(syscall.IFLA_ADDRESS, []byte(base.HardwareAddr))
+ req.AddData(hwaddr)
+ }
+
+ if base.Namespace != nil {
+ var attr *nl.RtAttr
+ switch base.Namespace.(type) {
+ case NsPid:
+ val := nl.Uint32Attr(uint32(base.Namespace.(NsPid)))
+ attr = nl.NewRtAttr(syscall.IFLA_NET_NS_PID, val)
+ case NsFd:
+ val := nl.Uint32Attr(uint32(base.Namespace.(NsFd)))
+ attr = nl.NewRtAttr(nl.IFLA_NET_NS_FD, val)
+ }
+
+ req.AddData(attr)
+ }
+
+ if base.Xdp != nil {
+ addXdpAttrs(base.Xdp, req)
+ }
+
+ linkInfo := nl.NewRtAttr(syscall.IFLA_LINKINFO, nil)
+ nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type()))
+
+ if vlan, ok := link.(*Vlan); ok {
+ b := make([]byte, 2)
+ native.PutUint16(b, uint16(vlan.VlanId))
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ nl.NewRtAttrChild(data, nl.IFLA_VLAN_ID, b)
+ } else if veth, ok := link.(*Veth); ok {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ peer := nl.NewRtAttrChild(data, nl.VETH_INFO_PEER, nil)
+ nl.NewIfInfomsgChild(peer, syscall.AF_UNSPEC)
+ nl.NewRtAttrChild(peer, syscall.IFLA_IFNAME, nl.ZeroTerminated(veth.PeerName))
+ if base.TxQLen >= 0 {
+ nl.NewRtAttrChild(peer, syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen)))
+ }
+ if base.MTU > 0 {
+ nl.NewRtAttrChild(peer, syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU)))
+ }
+
+ } else if vxlan, ok := link.(*Vxlan); ok {
+ addVxlanAttrs(vxlan, linkInfo)
+ } else if bond, ok := link.(*Bond); ok {
+ addBondAttrs(bond, linkInfo)
+ } else if ipv, ok := link.(*IPVlan); ok {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ nl.NewRtAttrChild(data, nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(ipv.Mode)))
+ } else if macv, ok := link.(*Macvlan); ok {
+ if macv.Mode != MACVLAN_MODE_DEFAULT {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[macv.Mode]))
+ }
+ } else if macv, ok := link.(*Macvtap); ok {
+ if macv.Mode != MACVLAN_MODE_DEFAULT {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[macv.Mode]))
+ }
+ } else if gretap, ok := link.(*Gretap); ok {
+ addGretapAttrs(gretap, linkInfo)
+ } else if iptun, ok := link.(*Iptun); ok {
+ addIptunAttrs(iptun, linkInfo)
+ } else if vti, ok := link.(*Vti); ok {
+ addVtiAttrs(vti, linkInfo)
+ } else if vrf, ok := link.(*Vrf); ok {
+ addVrfAttrs(vrf, linkInfo)
+ } else if bridge, ok := link.(*Bridge); ok {
+ addBridgeAttrs(bridge, linkInfo)
+ } else if gtp, ok := link.(*GTP); ok {
+ addGTPAttrs(gtp, linkInfo)
+ }
+
+ req.AddData(linkInfo)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ if err != nil {
+ return err
+ }
+
+ h.ensureIndex(base)
+
+ // can't set master during create, so set it afterwards
+ if base.MasterIndex != 0 {
+ // TODO: verify MasterIndex is actually a bridge?
+ return h.LinkSetMasterByIndex(link, base.MasterIndex)
+ }
+ return nil
+}
+
+// LinkDel deletes link device. Either Index or Name must be set in
+// the link object for it to be deleted. The other values are ignored.
+// Equivalent to: `ip link del $link`
+func LinkDel(link Link) error {
+ return pkgHandle.LinkDel(link)
+}
+
+// LinkDel deletes link device. Either Index or Name must be set in
+// the link object for it to be deleted. The other values are ignored.
+// Equivalent to: `ip link del $link`
+func (h *Handle) LinkDel(link Link) error {
+ base := link.Attrs()
+
+ h.ensureIndex(base)
+
+ req := h.newNetlinkRequest(syscall.RTM_DELLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+func (h *Handle) linkByNameDump(name string) (Link, error) {
+ links, err := h.LinkList()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, link := range links {
+ if link.Attrs().Name == name {
+ return link, nil
+ }
+ }
+ return nil, LinkNotFoundError{fmt.Errorf("Link %s not found", name)}
+}
+
+func (h *Handle) linkByAliasDump(alias string) (Link, error) {
+ links, err := h.LinkList()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, link := range links {
+ if link.Attrs().Alias == alias {
+ return link, nil
+ }
+ }
+ return nil, LinkNotFoundError{fmt.Errorf("Link alias %s not found", alias)}
+}
+
+// LinkByName finds a link by name and returns a pointer to the object.
+func LinkByName(name string) (Link, error) {
+ return pkgHandle.LinkByName(name)
+}
+
+// LinkByName finds a link by name and returns a pointer to the object.
+func (h *Handle) LinkByName(name string) (Link, error) {
+ if h.lookupByDump {
+ return h.linkByNameDump(name)
+ }
+
+ req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ req.AddData(msg)
+
+ nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(name))
+ req.AddData(nameData)
+
+ link, err := execGetLink(req)
+ if err == syscall.EINVAL {
+ // older kernels don't support looking up via IFLA_IFNAME
+ // so fall back to dumping all links
+ h.lookupByDump = true
+ return h.linkByNameDump(name)
+ }
+
+ return link, err
+}
+
+// LinkByAlias finds a link by its alias and returns a pointer to the object.
+// If there are multiple links with the alias it returns the first one
+func LinkByAlias(alias string) (Link, error) {
+ return pkgHandle.LinkByAlias(alias)
+}
+
+// LinkByAlias finds a link by its alias and returns a pointer to the object.
+// If there are multiple links with the alias it returns the first one
+func (h *Handle) LinkByAlias(alias string) (Link, error) {
+ if h.lookupByDump {
+ return h.linkByAliasDump(alias)
+ }
+
+ req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ req.AddData(msg)
+
+ nameData := nl.NewRtAttr(syscall.IFLA_IFALIAS, nl.ZeroTerminated(alias))
+ req.AddData(nameData)
+
+ link, err := execGetLink(req)
+ if err == syscall.EINVAL {
+ // older kernels don't support looking up via IFLA_IFALIAS
+ // so fall back to dumping all links
+ h.lookupByDump = true
+ return h.linkByAliasDump(alias)
+ }
+
+ return link, err
+}
+
+// LinkByIndex finds a link by index and returns a pointer to the object.
+func LinkByIndex(index int) (Link, error) {
+ return pkgHandle.LinkByIndex(index)
+}
+
+// LinkByIndex finds a link by index and returns a pointer to the object.
+func (h *Handle) LinkByIndex(index int) (Link, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(index)
+ req.AddData(msg)
+
+ return execGetLink(req)
+}
+
+func execGetLink(req *nl.NetlinkRequest) (Link, error) {
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ if err != nil {
+ if errno, ok := err.(syscall.Errno); ok {
+ if errno == syscall.ENODEV {
+ return nil, LinkNotFoundError{fmt.Errorf("Link not found")}
+ }
+ }
+ return nil, err
+ }
+
+ switch {
+ case len(msgs) == 0:
+ return nil, LinkNotFoundError{fmt.Errorf("Link not found")}
+
+ case len(msgs) == 1:
+ return LinkDeserialize(nil, msgs[0])
+
+ default:
+ return nil, fmt.Errorf("More than one link found")
+ }
+}
+
+// linkDeserialize deserializes a raw message received from netlink into
+// a link object.
+func LinkDeserialize(hdr *syscall.NlMsghdr, m []byte) (Link, error) {
+ msg := nl.DeserializeIfInfomsg(m)
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ base := LinkAttrs{Index: int(msg.Index), RawFlags: msg.Flags, Flags: linkFlags(msg.Flags), EncapType: msg.EncapType()}
+ if msg.Flags&syscall.IFF_PROMISC != 0 {
+ base.Promisc = 1
+ }
+ var (
+ link Link
+ stats32 []byte
+ stats64 []byte
+ linkType string
+ )
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case syscall.IFLA_LINKINFO:
+ infos, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return nil, err
+ }
+ for _, info := range infos {
+ switch info.Attr.Type {
+ case nl.IFLA_INFO_KIND:
+ linkType = string(info.Value[:len(info.Value)-1])
+ switch linkType {
+ case "dummy":
+ link = &Dummy{}
+ case "ifb":
+ link = &Ifb{}
+ case "bridge":
+ link = &Bridge{}
+ case "vlan":
+ link = &Vlan{}
+ case "veth":
+ link = &Veth{}
+ case "vxlan":
+ link = &Vxlan{}
+ case "bond":
+ link = &Bond{}
+ case "ipvlan":
+ link = &IPVlan{}
+ case "macvlan":
+ link = &Macvlan{}
+ case "macvtap":
+ link = &Macvtap{}
+ case "gretap":
+ link = &Gretap{}
+ case "ipip":
+ link = &Iptun{}
+ case "vti":
+ link = &Vti{}
+ case "vrf":
+ link = &Vrf{}
+ case "gtp":
+ link = &GTP{}
+ default:
+ link = &GenericLink{LinkType: linkType}
+ }
+ case nl.IFLA_INFO_DATA:
+ data, err := nl.ParseRouteAttr(info.Value)
+ if err != nil {
+ return nil, err
+ }
+ switch linkType {
+ case "vlan":
+ parseVlanData(link, data)
+ case "vxlan":
+ parseVxlanData(link, data)
+ case "bond":
+ parseBondData(link, data)
+ case "ipvlan":
+ parseIPVlanData(link, data)
+ case "macvlan":
+ parseMacvlanData(link, data)
+ case "macvtap":
+ parseMacvtapData(link, data)
+ case "gretap":
+ parseGretapData(link, data)
+ case "ipip":
+ parseIptunData(link, data)
+ case "vti":
+ parseVtiData(link, data)
+ case "vrf":
+ parseVrfData(link, data)
+ case "bridge":
+ parseBridgeData(link, data)
+ case "gtp":
+ parseGTPData(link, data)
+ }
+ }
+ }
+ case syscall.IFLA_ADDRESS:
+ var nonzero bool
+ for _, b := range attr.Value {
+ if b != 0 {
+ nonzero = true
+ }
+ }
+ if nonzero {
+ base.HardwareAddr = attr.Value[:]
+ }
+ case syscall.IFLA_IFNAME:
+ base.Name = string(attr.Value[:len(attr.Value)-1])
+ case syscall.IFLA_MTU:
+ base.MTU = int(native.Uint32(attr.Value[0:4]))
+ case syscall.IFLA_LINK:
+ base.ParentIndex = int(native.Uint32(attr.Value[0:4]))
+ case syscall.IFLA_MASTER:
+ base.MasterIndex = int(native.Uint32(attr.Value[0:4]))
+ case syscall.IFLA_TXQLEN:
+ base.TxQLen = int(native.Uint32(attr.Value[0:4]))
+ case syscall.IFLA_IFALIAS:
+ base.Alias = string(attr.Value[:len(attr.Value)-1])
+ case syscall.IFLA_STATS:
+ stats32 = attr.Value[:]
+ case IFLA_STATS64:
+ stats64 = attr.Value[:]
+ case nl.IFLA_XDP:
+ xdp, err := parseLinkXdp(attr.Value[:])
+ if err != nil {
+ return nil, err
+ }
+ base.Xdp = xdp
+ case syscall.IFLA_PROTINFO | syscall.NLA_F_NESTED:
+ if hdr != nil && hdr.Type == syscall.RTM_NEWLINK &&
+ msg.Family == syscall.AF_BRIDGE {
+ attrs, err := nl.ParseRouteAttr(attr.Value[:])
+ if err != nil {
+ return nil, err
+ }
+ base.Protinfo = parseProtinfo(attrs)
+ }
+ case syscall.IFLA_OPERSTATE:
+ base.OperState = LinkOperState(uint8(attr.Value[0]))
+ }
+ }
+
+ if stats64 != nil {
+ base.Statistics = parseLinkStats64(stats64)
+ } else if stats32 != nil {
+ base.Statistics = parseLinkStats32(stats32)
+ }
+
+ // Links that don't have IFLA_INFO_KIND are hardware devices
+ if link == nil {
+ link = &Device{}
+ }
+ *link.Attrs() = base
+
+ return link, nil
+}
+
+// LinkList gets a list of link devices.
+// Equivalent to: `ip link show`
+func LinkList() ([]Link, error) {
+ return pkgHandle.LinkList()
+}
+
+// LinkList gets a list of link devices.
+// Equivalent to: `ip link show`
+func (h *Handle) LinkList() ([]Link, error) {
+ // NOTE(vish): This duplicates functionality in net/iface_linux.go, but we need
+ // to get the message ourselves to parse link type.
+ req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ req.AddData(msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWLINK)
+ if err != nil {
+ return nil, err
+ }
+
+ var res []Link
+ for _, m := range msgs {
+ link, err := LinkDeserialize(nil, m)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, link)
+ }
+
+ return res, nil
+}
+
+// LinkUpdate is used to pass information back from LinkSubscribe()
+type LinkUpdate struct {
+ nl.IfInfomsg
+ Header syscall.NlMsghdr
+ Link
+}
+
+// LinkSubscribe takes a chan down which notifications will be sent
+// when links change. Close the 'done' chan to stop subscription.
+func LinkSubscribe(ch chan<- LinkUpdate, done <-chan struct{}) error {
+ return linkSubscribe(netns.None(), netns.None(), ch, done)
+}
+
+// LinkSubscribeAt works like LinkSubscribe plus it allows the caller
+// to choose the network namespace in which to subscribe (ns).
+func LinkSubscribeAt(ns netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}) error {
+ return linkSubscribe(ns, netns.None(), ch, done)
+}
+
+func linkSubscribe(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}) error {
+ s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_LINK)
+ if err != nil {
+ return err
+ }
+ if done != nil {
+ go func() {
+ <-done
+ s.Close()
+ }()
+ }
+ go func() {
+ defer close(ch)
+ for {
+ msgs, err := s.Receive()
+ if err != nil {
+ return
+ }
+ for _, m := range msgs {
+ ifmsg := nl.DeserializeIfInfomsg(m.Data)
+ link, err := LinkDeserialize(&m.Header, m.Data)
+ if err != nil {
+ return
+ }
+ ch <- LinkUpdate{IfInfomsg: *ifmsg, Header: m.Header, Link: link}
+ }
+ }
+ }()
+
+ return nil
+}
+
+func LinkSetHairpin(link Link, mode bool) error {
+ return pkgHandle.LinkSetHairpin(link, mode)
+}
+
+func (h *Handle) LinkSetHairpin(link Link, mode bool) error {
+ return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_MODE)
+}
+
+func LinkSetGuard(link Link, mode bool) error {
+ return pkgHandle.LinkSetGuard(link, mode)
+}
+
+func (h *Handle) LinkSetGuard(link Link, mode bool) error {
+ return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_GUARD)
+}
+
+func LinkSetFastLeave(link Link, mode bool) error {
+ return pkgHandle.LinkSetFastLeave(link, mode)
+}
+
+func (h *Handle) LinkSetFastLeave(link Link, mode bool) error {
+ return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_FAST_LEAVE)
+}
+
+func LinkSetLearning(link Link, mode bool) error {
+ return pkgHandle.LinkSetLearning(link, mode)
+}
+
+func (h *Handle) LinkSetLearning(link Link, mode bool) error {
+ return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_LEARNING)
+}
+
+func LinkSetRootBlock(link Link, mode bool) error {
+ return pkgHandle.LinkSetRootBlock(link, mode)
+}
+
+func (h *Handle) LinkSetRootBlock(link Link, mode bool) error {
+ return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_PROTECT)
+}
+
+func LinkSetFlood(link Link, mode bool) error {
+ return pkgHandle.LinkSetFlood(link, mode)
+}
+
+func (h *Handle) LinkSetFlood(link Link, mode bool) error {
+ return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_UNICAST_FLOOD)
+}
+
+func LinkSetBrProxyArp(link Link, mode bool) error {
+ return pkgHandle.LinkSetBrProxyArp(link, mode)
+}
+
+func (h *Handle) LinkSetBrProxyArp(link Link, mode bool) error {
+ return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_PROXYARP)
+}
+
+func LinkSetBrProxyArpWiFi(link Link, mode bool) error {
+ return pkgHandle.LinkSetBrProxyArpWiFi(link, mode)
+}
+
+func (h *Handle) LinkSetBrProxyArpWiFi(link Link, mode bool) error {
+ return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_PROXYARP_WIFI)
+}
+
+func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_BRIDGE)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ br := nl.NewRtAttr(syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED, nil)
+ nl.NewRtAttrChild(br, attr, boolToByte(mode))
+ req.AddData(br)
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) {
+ vlan := link.(*Vlan)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.IFLA_VLAN_ID:
+ vlan.VlanId = int(native.Uint16(datum.Value[0:2]))
+ }
+ }
+}
+
+func parseVxlanData(link Link, data []syscall.NetlinkRouteAttr) {
+ vxlan := link.(*Vxlan)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.IFLA_VXLAN_ID:
+ vxlan.VxlanId = int(native.Uint32(datum.Value[0:4]))
+ case nl.IFLA_VXLAN_LINK:
+ vxlan.VtepDevIndex = int(native.Uint32(datum.Value[0:4]))
+ case nl.IFLA_VXLAN_LOCAL:
+ vxlan.SrcAddr = net.IP(datum.Value[0:4])
+ case nl.IFLA_VXLAN_LOCAL6:
+ vxlan.SrcAddr = net.IP(datum.Value[0:16])
+ case nl.IFLA_VXLAN_GROUP:
+ vxlan.Group = net.IP(datum.Value[0:4])
+ case nl.IFLA_VXLAN_GROUP6:
+ vxlan.Group = net.IP(datum.Value[0:16])
+ case nl.IFLA_VXLAN_TTL:
+ vxlan.TTL = int(datum.Value[0])
+ case nl.IFLA_VXLAN_TOS:
+ vxlan.TOS = int(datum.Value[0])
+ case nl.IFLA_VXLAN_LEARNING:
+ vxlan.Learning = int8(datum.Value[0]) != 0
+ case nl.IFLA_VXLAN_PROXY:
+ vxlan.Proxy = int8(datum.Value[0]) != 0
+ case nl.IFLA_VXLAN_RSC:
+ vxlan.RSC = int8(datum.Value[0]) != 0
+ case nl.IFLA_VXLAN_L2MISS:
+ vxlan.L2miss = int8(datum.Value[0]) != 0
+ case nl.IFLA_VXLAN_L3MISS:
+ vxlan.L3miss = int8(datum.Value[0]) != 0
+ case nl.IFLA_VXLAN_UDP_CSUM:
+ vxlan.UDPCSum = int8(datum.Value[0]) != 0
+ case nl.IFLA_VXLAN_GBP:
+ vxlan.GBP = true
+ case nl.IFLA_VXLAN_FLOWBASED:
+ vxlan.FlowBased = int8(datum.Value[0]) != 0
+ case nl.IFLA_VXLAN_AGEING:
+ vxlan.Age = int(native.Uint32(datum.Value[0:4]))
+ vxlan.NoAge = vxlan.Age == 0
+ case nl.IFLA_VXLAN_LIMIT:
+ vxlan.Limit = int(native.Uint32(datum.Value[0:4]))
+ case nl.IFLA_VXLAN_PORT:
+ vxlan.Port = int(ntohs(datum.Value[0:2]))
+ case nl.IFLA_VXLAN_PORT_RANGE:
+ buf := bytes.NewBuffer(datum.Value[0:4])
+ var pr vxlanPortRange
+ if binary.Read(buf, binary.BigEndian, &pr) != nil {
+ vxlan.PortLow = int(pr.Lo)
+ vxlan.PortHigh = int(pr.Hi)
+ }
+ }
+ }
+}
+
+func parseBondData(link Link, data []syscall.NetlinkRouteAttr) {
+ bond := link.(*Bond)
+ for i := range data {
+ switch data[i].Attr.Type {
+ case nl.IFLA_BOND_MODE:
+ bond.Mode = BondMode(data[i].Value[0])
+ case nl.IFLA_BOND_ACTIVE_SLAVE:
+ bond.ActiveSlave = int(native.Uint32(data[i].Value[0:4]))
+ case nl.IFLA_BOND_MIIMON:
+ bond.Miimon = int(native.Uint32(data[i].Value[0:4]))
+ case nl.IFLA_BOND_UPDELAY:
+ bond.UpDelay = int(native.Uint32(data[i].Value[0:4]))
+ case nl.IFLA_BOND_DOWNDELAY:
+ bond.DownDelay = int(native.Uint32(data[i].Value[0:4]))
+ case nl.IFLA_BOND_USE_CARRIER:
+ bond.UseCarrier = int(data[i].Value[0])
+ case nl.IFLA_BOND_ARP_INTERVAL:
+ bond.ArpInterval = int(native.Uint32(data[i].Value[0:4]))
+ case nl.IFLA_BOND_ARP_IP_TARGET:
+ // TODO: implement
+ case nl.IFLA_BOND_ARP_VALIDATE:
+ bond.ArpValidate = BondArpValidate(native.Uint32(data[i].Value[0:4]))
+ case nl.IFLA_BOND_ARP_ALL_TARGETS:
+ bond.ArpAllTargets = BondArpAllTargets(native.Uint32(data[i].Value[0:4]))
+ case nl.IFLA_BOND_PRIMARY:
+ bond.Primary = int(native.Uint32(data[i].Value[0:4]))
+ case nl.IFLA_BOND_PRIMARY_RESELECT:
+ bond.PrimaryReselect = BondPrimaryReselect(data[i].Value[0])
+ case nl.IFLA_BOND_FAIL_OVER_MAC:
+ bond.FailOverMac = BondFailOverMac(data[i].Value[0])
+ case nl.IFLA_BOND_XMIT_HASH_POLICY:
+ bond.XmitHashPolicy = BondXmitHashPolicy(data[i].Value[0])
+ case nl.IFLA_BOND_RESEND_IGMP:
+ bond.ResendIgmp = int(native.Uint32(data[i].Value[0:4]))
+ case nl.IFLA_BOND_NUM_PEER_NOTIF:
+ bond.NumPeerNotif = int(data[i].Value[0])
+ case nl.IFLA_BOND_ALL_SLAVES_ACTIVE:
+ bond.AllSlavesActive = int(data[i].Value[0])
+ case nl.IFLA_BOND_MIN_LINKS:
+ bond.MinLinks = int(native.Uint32(data[i].Value[0:4]))
+ case nl.IFLA_BOND_LP_INTERVAL:
+ bond.LpInterval = int(native.Uint32(data[i].Value[0:4]))
+ case nl.IFLA_BOND_PACKETS_PER_SLAVE:
+ bond.PackersPerSlave = int(native.Uint32(data[i].Value[0:4]))
+ case nl.IFLA_BOND_AD_LACP_RATE:
+ bond.LacpRate = BondLacpRate(data[i].Value[0])
+ case nl.IFLA_BOND_AD_SELECT:
+ bond.AdSelect = BondAdSelect(data[i].Value[0])
+ case nl.IFLA_BOND_AD_INFO:
+ // TODO: implement
+ case nl.IFLA_BOND_AD_ACTOR_SYS_PRIO:
+ bond.AdActorSysPrio = int(native.Uint16(data[i].Value[0:2]))
+ case nl.IFLA_BOND_AD_USER_PORT_KEY:
+ bond.AdUserPortKey = int(native.Uint16(data[i].Value[0:2]))
+ case nl.IFLA_BOND_AD_ACTOR_SYSTEM:
+ bond.AdActorSystem = net.HardwareAddr(data[i].Value[0:6])
+ case nl.IFLA_BOND_TLB_DYNAMIC_LB:
+ bond.TlbDynamicLb = int(data[i].Value[0])
+ }
+ }
+}
+
+func parseIPVlanData(link Link, data []syscall.NetlinkRouteAttr) {
+ ipv := link.(*IPVlan)
+ for _, datum := range data {
+ if datum.Attr.Type == nl.IFLA_IPVLAN_MODE {
+ ipv.Mode = IPVlanMode(native.Uint32(datum.Value[0:4]))
+ return
+ }
+ }
+}
+
+func parseMacvtapData(link Link, data []syscall.NetlinkRouteAttr) {
+ macv := link.(*Macvtap)
+ parseMacvlanData(&macv.Macvlan, data)
+}
+
+func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) {
+ macv := link.(*Macvlan)
+ for _, datum := range data {
+ if datum.Attr.Type == nl.IFLA_MACVLAN_MODE {
+ switch native.Uint32(datum.Value[0:4]) {
+ case nl.MACVLAN_MODE_PRIVATE:
+ macv.Mode = MACVLAN_MODE_PRIVATE
+ case nl.MACVLAN_MODE_VEPA:
+ macv.Mode = MACVLAN_MODE_VEPA
+ case nl.MACVLAN_MODE_BRIDGE:
+ macv.Mode = MACVLAN_MODE_BRIDGE
+ case nl.MACVLAN_MODE_PASSTHRU:
+ macv.Mode = MACVLAN_MODE_PASSTHRU
+ case nl.MACVLAN_MODE_SOURCE:
+ macv.Mode = MACVLAN_MODE_SOURCE
+ }
+ return
+ }
+ }
+}
+
+// copied from pkg/net_linux.go
+func linkFlags(rawFlags uint32) net.Flags {
+ var f net.Flags
+ if rawFlags&syscall.IFF_UP != 0 {
+ f |= net.FlagUp
+ }
+ if rawFlags&syscall.IFF_BROADCAST != 0 {
+ f |= net.FlagBroadcast
+ }
+ if rawFlags&syscall.IFF_LOOPBACK != 0 {
+ f |= net.FlagLoopback
+ }
+ if rawFlags&syscall.IFF_POINTOPOINT != 0 {
+ f |= net.FlagPointToPoint
+ }
+ if rawFlags&syscall.IFF_MULTICAST != 0 {
+ f |= net.FlagMulticast
+ }
+ return f
+}
+
+func addGretapAttrs(gretap *Gretap, linkInfo *nl.RtAttr) {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+
+ if gretap.FlowBased {
+ // In flow based mode, no other attributes need to be configured
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_COLLECT_METADATA, boolAttr(gretap.FlowBased))
+ return
+ }
+
+ ip := gretap.Local.To4()
+ if ip != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_LOCAL, []byte(ip))
+ }
+ ip = gretap.Remote.To4()
+ if ip != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_REMOTE, []byte(ip))
+ }
+
+ if gretap.IKey != 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_IKEY, htonl(gretap.IKey))
+ gretap.IFlags |= uint16(nl.GRE_KEY)
+ }
+
+ if gretap.OKey != 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_OKEY, htonl(gretap.OKey))
+ gretap.OFlags |= uint16(nl.GRE_KEY)
+ }
+
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_IFLAGS, htons(gretap.IFlags))
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_OFLAGS, htons(gretap.OFlags))
+
+ if gretap.Link != 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_LINK, nl.Uint32Attr(gretap.Link))
+ }
+
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gretap.PMtuDisc))
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_TTL, nl.Uint8Attr(gretap.Ttl))
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_TOS, nl.Uint8Attr(gretap.Tos))
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_TYPE, nl.Uint16Attr(gretap.EncapType))
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_FLAGS, nl.Uint16Attr(gretap.EncapFlags))
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_SPORT, htons(gretap.EncapSport))
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_DPORT, htons(gretap.EncapDport))
+}
+
+func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) {
+ gre := link.(*Gretap)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.IFLA_GRE_OKEY:
+ gre.IKey = ntohl(datum.Value[0:4])
+ case nl.IFLA_GRE_IKEY:
+ gre.OKey = ntohl(datum.Value[0:4])
+ case nl.IFLA_GRE_LOCAL:
+ gre.Local = net.IP(datum.Value[0:4])
+ case nl.IFLA_GRE_REMOTE:
+ gre.Remote = net.IP(datum.Value[0:4])
+ case nl.IFLA_GRE_ENCAP_SPORT:
+ gre.EncapSport = ntohs(datum.Value[0:2])
+ case nl.IFLA_GRE_ENCAP_DPORT:
+ gre.EncapDport = ntohs(datum.Value[0:2])
+ case nl.IFLA_GRE_IFLAGS:
+ gre.IFlags = ntohs(datum.Value[0:2])
+ case nl.IFLA_GRE_OFLAGS:
+ gre.OFlags = ntohs(datum.Value[0:2])
+
+ case nl.IFLA_GRE_TTL:
+ gre.Ttl = uint8(datum.Value[0])
+ case nl.IFLA_GRE_TOS:
+ gre.Tos = uint8(datum.Value[0])
+ case nl.IFLA_GRE_PMTUDISC:
+ gre.PMtuDisc = uint8(datum.Value[0])
+ case nl.IFLA_GRE_ENCAP_TYPE:
+ gre.EncapType = native.Uint16(datum.Value[0:2])
+ case nl.IFLA_GRE_ENCAP_FLAGS:
+ gre.EncapFlags = native.Uint16(datum.Value[0:2])
+ case nl.IFLA_GRE_COLLECT_METADATA:
+ gre.FlowBased = int8(datum.Value[0]) != 0
+ }
+ }
+}
+
+func parseLinkStats32(data []byte) *LinkStatistics {
+ return (*LinkStatistics)((*LinkStatistics32)(unsafe.Pointer(&data[0:SizeofLinkStats32][0])).to64())
+}
+
+func parseLinkStats64(data []byte) *LinkStatistics {
+ return (*LinkStatistics)((*LinkStatistics64)(unsafe.Pointer(&data[0:SizeofLinkStats64][0])))
+}
+
+func addXdpAttrs(xdp *LinkXdp, req *nl.NetlinkRequest) {
+ attrs := nl.NewRtAttr(nl.IFLA_XDP|syscall.NLA_F_NESTED, nil)
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(xdp.Fd))
+ nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FD, b)
+ native.PutUint32(b, xdp.Flags)
+ nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FLAGS, b)
+ req.AddData(attrs)
+}
+
+func parseLinkXdp(data []byte) (*LinkXdp, error) {
+ attrs, err := nl.ParseRouteAttr(data)
+ if err != nil {
+ return nil, err
+ }
+ xdp := &LinkXdp{}
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.IFLA_XDP_FD:
+ xdp.Fd = int(native.Uint32(attr.Value[0:4]))
+ case nl.IFLA_XDP_ATTACHED:
+ xdp.Attached = attr.Value[0] != 0
+ case nl.IFLA_XDP_FLAGS:
+ xdp.Flags = native.Uint32(attr.Value[0:4])
+ case nl.IFLA_XDP_PROG_ID:
+ xdp.ProgId = native.Uint32(attr.Value[0:4])
+ }
+ }
+ return xdp, nil
+}
+
+func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+
+ ip := iptun.Local.To4()
+ if ip != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LOCAL, []byte(ip))
+ }
+
+ ip = iptun.Remote.To4()
+ if ip != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_IPTUN_REMOTE, []byte(ip))
+ }
+
+ if iptun.Link != 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LINK, nl.Uint32Attr(iptun.Link))
+ }
+ nl.NewRtAttrChild(data, nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(iptun.PMtuDisc))
+ nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TTL, nl.Uint8Attr(iptun.Ttl))
+ nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TOS, nl.Uint8Attr(iptun.Tos))
+}
+
+func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) {
+ iptun := link.(*Iptun)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.IFLA_IPTUN_LOCAL:
+ iptun.Local = net.IP(datum.Value[0:4])
+ case nl.IFLA_IPTUN_REMOTE:
+ iptun.Remote = net.IP(datum.Value[0:4])
+ case nl.IFLA_IPTUN_TTL:
+ iptun.Ttl = uint8(datum.Value[0])
+ case nl.IFLA_IPTUN_TOS:
+ iptun.Tos = uint8(datum.Value[0])
+ case nl.IFLA_IPTUN_PMTUDISC:
+ iptun.PMtuDisc = uint8(datum.Value[0])
+ }
+ }
+}
+
+func addVtiAttrs(vti *Vti, linkInfo *nl.RtAttr) {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+
+ ip := vti.Local.To4()
+ if ip != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_VTI_LOCAL, []byte(ip))
+ }
+
+ ip = vti.Remote.To4()
+ if ip != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_VTI_REMOTE, []byte(ip))
+ }
+
+ if vti.Link != 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_VTI_LINK, nl.Uint32Attr(vti.Link))
+ }
+
+ nl.NewRtAttrChild(data, nl.IFLA_VTI_IKEY, htonl(vti.IKey))
+ nl.NewRtAttrChild(data, nl.IFLA_VTI_OKEY, htonl(vti.OKey))
+}
+
+func parseVtiData(link Link, data []syscall.NetlinkRouteAttr) {
+ vti := link.(*Vti)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.IFLA_VTI_LOCAL:
+ vti.Local = net.IP(datum.Value[0:4])
+ case nl.IFLA_VTI_REMOTE:
+ vti.Remote = net.IP(datum.Value[0:4])
+ case nl.IFLA_VTI_IKEY:
+ vti.IKey = ntohl(datum.Value[0:4])
+ case nl.IFLA_VTI_OKEY:
+ vti.OKey = ntohl(datum.Value[0:4])
+ }
+ }
+}
+
+func addVrfAttrs(vrf *Vrf, linkInfo *nl.RtAttr) {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(vrf.Table))
+ nl.NewRtAttrChild(data, nl.IFLA_VRF_TABLE, b)
+}
+
+func parseVrfData(link Link, data []syscall.NetlinkRouteAttr) {
+ vrf := link.(*Vrf)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.IFLA_VRF_TABLE:
+ vrf.Table = native.Uint32(datum.Value[0:4])
+ }
+ }
+}
+
+func addBridgeAttrs(bridge *Bridge, linkInfo *nl.RtAttr) {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ if bridge.MulticastSnooping != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_BR_MCAST_SNOOPING, boolToByte(*bridge.MulticastSnooping))
+ }
+ if bridge.HelloTime != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_BR_HELLO_TIME, nl.Uint32Attr(*bridge.HelloTime))
+ }
+}
+
+func parseBridgeData(bridge Link, data []syscall.NetlinkRouteAttr) {
+ br := bridge.(*Bridge)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.IFLA_BR_HELLO_TIME:
+ helloTime := native.Uint32(datum.Value[0:4])
+ br.HelloTime = &helloTime
+ case nl.IFLA_BR_MCAST_SNOOPING:
+ mcastSnooping := datum.Value[0] == 1
+ br.MulticastSnooping = &mcastSnooping
+ }
+ }
+}
+
+func addGTPAttrs(gtp *GTP, linkInfo *nl.RtAttr) {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ nl.NewRtAttrChild(data, nl.IFLA_GTP_FD0, nl.Uint32Attr(uint32(gtp.FD0)))
+ nl.NewRtAttrChild(data, nl.IFLA_GTP_FD1, nl.Uint32Attr(uint32(gtp.FD1)))
+ nl.NewRtAttrChild(data, nl.IFLA_GTP_PDP_HASHSIZE, nl.Uint32Attr(131072))
+ if gtp.Role != nl.GTP_ROLE_GGSN {
+ nl.NewRtAttrChild(data, nl.IFLA_GTP_ROLE, nl.Uint32Attr(uint32(gtp.Role)))
+ }
+}
+
+func parseGTPData(link Link, data []syscall.NetlinkRouteAttr) {
+ gtp := link.(*GTP)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.IFLA_GTP_FD0:
+ gtp.FD0 = int(native.Uint32(datum.Value))
+ case nl.IFLA_GTP_FD1:
+ gtp.FD1 = int(native.Uint32(datum.Value))
+ case nl.IFLA_GTP_PDP_HASHSIZE:
+ gtp.PDPHashsize = int(native.Uint32(datum.Value))
+ case nl.IFLA_GTP_ROLE:
+ gtp.Role = int(native.Uint32(datum.Value))
+ }
+ }
+}
diff --git a/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go b/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go
new file mode 100644
index 000000000..310bd33d8
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go
@@ -0,0 +1,14 @@
+package netlink
+
+// ideally golang.org/x/sys/unix would define IfReq but it only has
+// IFNAMSIZ, hence this minimalistic implementation
+const (
+ SizeOfIfReq = 40
+ IFNAMSIZ = 16
+)
+
+type ifReq struct {
+ Name [IFNAMSIZ]byte
+ Flags uint16
+ pad [SizeOfIfReq - IFNAMSIZ - 2]byte
+}
diff --git a/vendor/github.com/vishvananda/netlink/neigh.go b/vendor/github.com/vishvananda/netlink/neigh.go
new file mode 100644
index 000000000..0e5eb90c9
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/neigh.go
@@ -0,0 +1,22 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+)
+
+// Neigh represents a link layer neighbor from netlink.
+type Neigh struct {
+ LinkIndex int
+ Family int
+ State int
+ Type int
+ Flags int
+ IP net.IP
+ HardwareAddr net.HardwareAddr
+}
+
+// String returns $ip/$hwaddr $label
+func (neigh *Neigh) String() string {
+ return fmt.Sprintf("%s %s", neigh.IP, neigh.HardwareAddr)
+}
diff --git a/vendor/github.com/vishvananda/netlink/neigh_linux.go b/vendor/github.com/vishvananda/netlink/neigh_linux.go
new file mode 100644
index 000000000..f069db257
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/neigh_linux.go
@@ -0,0 +1,250 @@
+package netlink
+
+import (
+ "net"
+ "syscall"
+ "unsafe"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+const (
+ NDA_UNSPEC = iota
+ NDA_DST
+ NDA_LLADDR
+ NDA_CACHEINFO
+ NDA_PROBES
+ NDA_VLAN
+ NDA_PORT
+ NDA_VNI
+ NDA_IFINDEX
+ NDA_MAX = NDA_IFINDEX
+)
+
+// Neighbor Cache Entry States.
+const (
+ NUD_NONE = 0x00
+ NUD_INCOMPLETE = 0x01
+ NUD_REACHABLE = 0x02
+ NUD_STALE = 0x04
+ NUD_DELAY = 0x08
+ NUD_PROBE = 0x10
+ NUD_FAILED = 0x20
+ NUD_NOARP = 0x40
+ NUD_PERMANENT = 0x80
+)
+
+// Neighbor Flags
+const (
+ NTF_USE = 0x01
+ NTF_SELF = 0x02
+ NTF_MASTER = 0x04
+ NTF_PROXY = 0x08
+ NTF_ROUTER = 0x80
+)
+
+type Ndmsg struct {
+ Family uint8
+ Index uint32
+ State uint16
+ Flags uint8
+ Type uint8
+}
+
+func deserializeNdmsg(b []byte) *Ndmsg {
+ var dummy Ndmsg
+ return (*Ndmsg)(unsafe.Pointer(&b[0:unsafe.Sizeof(dummy)][0]))
+}
+
+func (msg *Ndmsg) Serialize() []byte {
+ return (*(*[unsafe.Sizeof(*msg)]byte)(unsafe.Pointer(msg)))[:]
+}
+
+func (msg *Ndmsg) Len() int {
+ return int(unsafe.Sizeof(*msg))
+}
+
+// NeighAdd will add an IP to MAC mapping to the ARP table
+// Equivalent to: `ip neigh add ....`
+func NeighAdd(neigh *Neigh) error {
+ return pkgHandle.NeighAdd(neigh)
+}
+
+// NeighAdd will add an IP to MAC mapping to the ARP table
+// Equivalent to: `ip neigh add ....`
+func (h *Handle) NeighAdd(neigh *Neigh) error {
+ return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL)
+}
+
+// NeighSet will add or replace an IP to MAC mapping to the ARP table
+// Equivalent to: `ip neigh replace....`
+func NeighSet(neigh *Neigh) error {
+ return pkgHandle.NeighSet(neigh)
+}
+
+// NeighSet will add or replace an IP to MAC mapping to the ARP table
+// Equivalent to: `ip neigh replace....`
+func (h *Handle) NeighSet(neigh *Neigh) error {
+ return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE)
+}
+
+// NeighAppend will append an entry to FDB
+// Equivalent to: `bridge fdb append...`
+func NeighAppend(neigh *Neigh) error {
+ return pkgHandle.NeighAppend(neigh)
+}
+
+// NeighAppend will append an entry to FDB
+// Equivalent to: `bridge fdb append...`
+func (h *Handle) NeighAppend(neigh *Neigh) error {
+ return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_APPEND)
+}
+
+// NeighAppend will append an entry to FDB
+// Equivalent to: `bridge fdb append...`
+func neighAdd(neigh *Neigh, mode int) error {
+ return pkgHandle.neighAdd(neigh, mode)
+}
+
+// NeighAppend will append an entry to FDB
+// Equivalent to: `bridge fdb append...`
+func (h *Handle) neighAdd(neigh *Neigh, mode int) error {
+ req := h.newNetlinkRequest(syscall.RTM_NEWNEIGH, mode|syscall.NLM_F_ACK)
+ return neighHandle(neigh, req)
+}
+
+// NeighDel will delete an IP address from a link device.
+// Equivalent to: `ip addr del $addr dev $link`
+func NeighDel(neigh *Neigh) error {
+ return pkgHandle.NeighDel(neigh)
+}
+
+// NeighDel will delete an IP address from a link device.
+// Equivalent to: `ip addr del $addr dev $link`
+func (h *Handle) NeighDel(neigh *Neigh) error {
+ req := h.newNetlinkRequest(syscall.RTM_DELNEIGH, syscall.NLM_F_ACK)
+ return neighHandle(neigh, req)
+}
+
+func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error {
+ var family int
+ if neigh.Family > 0 {
+ family = neigh.Family
+ } else {
+ family = nl.GetIPFamily(neigh.IP)
+ }
+
+ msg := Ndmsg{
+ Family: uint8(family),
+ Index: uint32(neigh.LinkIndex),
+ State: uint16(neigh.State),
+ Type: uint8(neigh.Type),
+ Flags: uint8(neigh.Flags),
+ }
+ req.AddData(&msg)
+
+ ipData := neigh.IP.To4()
+ if ipData == nil {
+ ipData = neigh.IP.To16()
+ }
+
+ dstData := nl.NewRtAttr(NDA_DST, ipData)
+ req.AddData(dstData)
+
+ if neigh.Flags != NTF_PROXY || neigh.HardwareAddr != nil {
+ hwData := nl.NewRtAttr(NDA_LLADDR, []byte(neigh.HardwareAddr))
+ req.AddData(hwData)
+ }
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// NeighList gets a list of IP-MAC mappings in the system (ARP table).
+// Equivalent to: `ip neighbor show`.
+// The list can be filtered by link and ip family.
+func NeighList(linkIndex, family int) ([]Neigh, error) {
+ return pkgHandle.NeighList(linkIndex, family)
+}
+
+// NeighProxyList gets a list of neighbor proxies in the system.
+// Equivalent to: `ip neighbor show proxy`.
+// The list can be filtered by link and ip family.
+func NeighProxyList(linkIndex, family int) ([]Neigh, error) {
+ return pkgHandle.NeighProxyList(linkIndex, family)
+}
+
+// NeighList gets a list of IP-MAC mappings in the system (ARP table).
+// Equivalent to: `ip neighbor show`.
+// The list can be filtered by link and ip family.
+func (h *Handle) NeighList(linkIndex, family int) ([]Neigh, error) {
+ return h.neighList(linkIndex, family, 0)
+}
+
+// NeighProxyList gets a list of neighbor proxies in the system.
+// Equivalent to: `ip neighbor show proxy`.
+// The list can be filtered by link, ip family.
+func (h *Handle) NeighProxyList(linkIndex, family int) ([]Neigh, error) {
+ return h.neighList(linkIndex, family, NTF_PROXY)
+}
+
+func (h *Handle) neighList(linkIndex, family, flags int) ([]Neigh, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETNEIGH, syscall.NLM_F_DUMP)
+ msg := Ndmsg{
+ Family: uint8(family),
+ Index: uint32(linkIndex),
+ Flags: uint8(flags),
+ }
+ req.AddData(&msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWNEIGH)
+ if err != nil {
+ return nil, err
+ }
+
+ var res []Neigh
+ for _, m := range msgs {
+ ndm := deserializeNdmsg(m)
+ if linkIndex != 0 && int(ndm.Index) != linkIndex {
+ // Ignore messages from other interfaces
+ continue
+ }
+
+ neigh, err := NeighDeserialize(m)
+ if err != nil {
+ continue
+ }
+
+ res = append(res, *neigh)
+ }
+
+ return res, nil
+}
+
+func NeighDeserialize(m []byte) (*Neigh, error) {
+ msg := deserializeNdmsg(m)
+
+ neigh := Neigh{
+ LinkIndex: int(msg.Index),
+ Family: int(msg.Family),
+ State: int(msg.State),
+ Type: int(msg.Type),
+ Flags: int(msg.Flags),
+ }
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case NDA_DST:
+ neigh.IP = net.IP(attr.Value)
+ case NDA_LLADDR:
+ neigh.HardwareAddr = net.HardwareAddr(attr.Value)
+ }
+ }
+
+ return &neigh, nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/netlink.go b/vendor/github.com/vishvananda/netlink/netlink.go
new file mode 100644
index 000000000..fb159526e
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/netlink.go
@@ -0,0 +1,39 @@
+// Package netlink provides a simple library for netlink. Netlink is
+// the interface a user-space program in linux uses to communicate with
+// the kernel. It can be used to add and remove interfaces, set up ip
+// addresses and routes, and confiugre ipsec. Netlink communication
+// requires elevated privileges, so in most cases this code needs to
+// be run as root. The low level primitives for netlink are contained
+// in the nl subpackage. This package attempts to provide a high-level
+// interface that is loosly modeled on the iproute2 cli.
+package netlink
+
+import (
+ "errors"
+ "net"
+)
+
+var (
+ // ErrNotImplemented is returned when a requested feature is not implemented.
+ ErrNotImplemented = errors.New("not implemented")
+)
+
+// ParseIPNet parses a string in ip/net format and returns a net.IPNet.
+// This is valuable because addresses in netlink are often IPNets and
+// ParseCIDR returns an IPNet with the IP part set to the base IP of the
+// range.
+func ParseIPNet(s string) (*net.IPNet, error) {
+ ip, ipNet, err := net.ParseCIDR(s)
+ if err != nil {
+ return nil, err
+ }
+ return &net.IPNet{IP: ip, Mask: ipNet.Mask}, nil
+}
+
+// NewIPNet generates an IPNet from an ip address using a netmask of 32 or 128.
+func NewIPNet(ip net.IP) *net.IPNet {
+ if ip.To4() != nil {
+ return &net.IPNet{IP: ip, Mask: net.CIDRMask(32, 32)}
+ }
+ return &net.IPNet{IP: ip, Mask: net.CIDRMask(128, 128)}
+}
diff --git a/vendor/github.com/vishvananda/netlink/netlink_linux.go b/vendor/github.com/vishvananda/netlink/netlink_linux.go
new file mode 100644
index 000000000..a20d293d8
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/netlink_linux.go
@@ -0,0 +1,11 @@
+package netlink
+
+import "github.com/vishvananda/netlink/nl"
+
+// Family type definitions
+const (
+ FAMILY_ALL = nl.FAMILY_ALL
+ FAMILY_V4 = nl.FAMILY_V4
+ FAMILY_V6 = nl.FAMILY_V6
+ FAMILY_MPLS = nl.FAMILY_MPLS
+)
diff --git a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go
new file mode 100644
index 000000000..2d57c16d7
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go
@@ -0,0 +1,221 @@
+// +build !linux
+
+package netlink
+
+import "net"
+
+func LinkSetUp(link Link) error {
+ return ErrNotImplemented
+}
+
+func LinkSetDown(link Link) error {
+ return ErrNotImplemented
+}
+
+func LinkSetMTU(link Link, mtu int) error {
+ return ErrNotImplemented
+}
+
+func LinkSetMaster(link Link, master *Bridge) error {
+ return ErrNotImplemented
+}
+
+func LinkSetNsPid(link Link, nspid int) error {
+ return ErrNotImplemented
+}
+
+func LinkSetNsFd(link Link, fd int) error {
+ return ErrNotImplemented
+}
+
+func LinkSetName(link Link, name string) error {
+ return ErrNotImplemented
+}
+
+func LinkSetAlias(link Link, name string) error {
+ return ErrNotImplemented
+}
+
+func LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error {
+ return ErrNotImplemented
+}
+
+func LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAddr) error {
+ return ErrNotImplemented
+}
+
+func LinkSetVfVlan(link Link, vf, vlan int) error {
+ return ErrNotImplemented
+}
+
+func LinkSetVfTxRate(link Link, vf, rate int) error {
+ return ErrNotImplemented
+}
+
+func LinkSetNoMaster(link Link) error {
+ return ErrNotImplemented
+}
+
+func LinkSetMasterByIndex(link Link, masterIndex int) error {
+ return ErrNotImplemented
+}
+
+func LinkSetXdpFd(link Link, fd int) error {
+ return ErrNotImplemented
+}
+
+func LinkSetARPOff(link Link) error {
+ return ErrNotImplemented
+}
+
+func LinkSetARPOn(link Link) error {
+ return ErrNotImplemented
+}
+
+func LinkByName(name string) (Link, error) {
+ return nil, ErrNotImplemented
+}
+
+func LinkByAlias(alias string) (Link, error) {
+ return nil, ErrNotImplemented
+}
+
+func LinkByIndex(index int) (Link, error) {
+ return nil, ErrNotImplemented
+}
+
+func LinkSetHairpin(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func LinkSetGuard(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func LinkSetFastLeave(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func LinkSetLearning(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func LinkSetRootBlock(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func LinkSetFlood(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func LinkAdd(link Link) error {
+ return ErrNotImplemented
+}
+
+func LinkDel(link Link) error {
+ return ErrNotImplemented
+}
+
+func SetHairpin(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func SetGuard(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func SetFastLeave(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func SetLearning(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func SetRootBlock(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func SetFlood(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func LinkList() ([]Link, error) {
+ return nil, ErrNotImplemented
+}
+
+func AddrAdd(link Link, addr *Addr) error {
+ return ErrNotImplemented
+}
+
+func AddrDel(link Link, addr *Addr) error {
+ return ErrNotImplemented
+}
+
+func AddrList(link Link, family int) ([]Addr, error) {
+ return nil, ErrNotImplemented
+}
+
+func RouteAdd(route *Route) error {
+ return ErrNotImplemented
+}
+
+func RouteDel(route *Route) error {
+ return ErrNotImplemented
+}
+
+func RouteList(link Link, family int) ([]Route, error) {
+ return nil, ErrNotImplemented
+}
+
+func XfrmPolicyAdd(policy *XfrmPolicy) error {
+ return ErrNotImplemented
+}
+
+func XfrmPolicyDel(policy *XfrmPolicy) error {
+ return ErrNotImplemented
+}
+
+func XfrmPolicyList(family int) ([]XfrmPolicy, error) {
+ return nil, ErrNotImplemented
+}
+
+func XfrmStateAdd(policy *XfrmState) error {
+ return ErrNotImplemented
+}
+
+func XfrmStateDel(policy *XfrmState) error {
+ return ErrNotImplemented
+}
+
+func XfrmStateList(family int) ([]XfrmState, error) {
+ return nil, ErrNotImplemented
+}
+
+func NeighAdd(neigh *Neigh) error {
+ return ErrNotImplemented
+}
+
+func NeighSet(neigh *Neigh) error {
+ return ErrNotImplemented
+}
+
+func NeighAppend(neigh *Neigh) error {
+ return ErrNotImplemented
+}
+
+func NeighDel(neigh *Neigh) error {
+ return ErrNotImplemented
+}
+
+func NeighList(linkIndex, family int) ([]Neigh, error) {
+ return nil, ErrNotImplemented
+}
+
+func NeighDeserialize(m []byte) (*Neigh, error) {
+ return nil, ErrNotImplemented
+}
+
+func SocketGet(local, remote net.Addr) (*Socket, error) {
+ return nil, ErrNotImplemented
+}
diff --git a/vendor/github.com/vishvananda/netlink/nl/addr_linux.go b/vendor/github.com/vishvananda/netlink/nl/addr_linux.go
new file mode 100644
index 000000000..fe362e9fa
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/addr_linux.go
@@ -0,0 +1,76 @@
+package nl
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+type IfAddrmsg struct {
+ syscall.IfAddrmsg
+}
+
+func NewIfAddrmsg(family int) *IfAddrmsg {
+ return &IfAddrmsg{
+ IfAddrmsg: syscall.IfAddrmsg{
+ Family: uint8(family),
+ },
+ }
+}
+
+// struct ifaddrmsg {
+// __u8 ifa_family;
+// __u8 ifa_prefixlen; /* The prefix length */
+// __u8 ifa_flags; /* Flags */
+// __u8 ifa_scope; /* Address scope */
+// __u32 ifa_index; /* Link index */
+// };
+
+// type IfAddrmsg struct {
+// Family uint8
+// Prefixlen uint8
+// Flags uint8
+// Scope uint8
+// Index uint32
+// }
+// SizeofIfAddrmsg = 0x8
+
+func DeserializeIfAddrmsg(b []byte) *IfAddrmsg {
+ return (*IfAddrmsg)(unsafe.Pointer(&b[0:syscall.SizeofIfAddrmsg][0]))
+}
+
+func (msg *IfAddrmsg) Serialize() []byte {
+ return (*(*[syscall.SizeofIfAddrmsg]byte)(unsafe.Pointer(msg)))[:]
+}
+
+func (msg *IfAddrmsg) Len() int {
+ return syscall.SizeofIfAddrmsg
+}
+
+// struct ifa_cacheinfo {
+// __u32 ifa_prefered;
+// __u32 ifa_valid;
+// __u32 cstamp; /* created timestamp, hundredths of seconds */
+// __u32 tstamp; /* updated timestamp, hundredths of seconds */
+// };
+
+const IFA_CACHEINFO = 6
+const SizeofIfaCacheInfo = 0x10
+
+type IfaCacheInfo struct {
+ IfaPrefered uint32
+ IfaValid uint32
+ Cstamp uint32
+ Tstamp uint32
+}
+
+func (msg *IfaCacheInfo) Len() int {
+ return SizeofIfaCacheInfo
+}
+
+func DeserializeIfaCacheInfo(b []byte) *IfaCacheInfo {
+ return (*IfaCacheInfo)(unsafe.Pointer(&b[0:SizeofIfaCacheInfo][0]))
+}
+
+func (msg *IfaCacheInfo) Serialize() []byte {
+ return (*(*[SizeofIfaCacheInfo]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go b/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go
new file mode 100644
index 000000000..6c0d33338
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go
@@ -0,0 +1,74 @@
+package nl
+
+import (
+ "fmt"
+ "unsafe"
+)
+
+const (
+ SizeofBridgeVlanInfo = 0x04
+)
+
+/* Bridge Flags */
+const (
+ BRIDGE_FLAGS_MASTER = iota /* Bridge command to/from master */
+ BRIDGE_FLAGS_SELF /* Bridge command to/from lowerdev */
+)
+
+/* Bridge management nested attributes
+ * [IFLA_AF_SPEC] = {
+ * [IFLA_BRIDGE_FLAGS]
+ * [IFLA_BRIDGE_MODE]
+ * [IFLA_BRIDGE_VLAN_INFO]
+ * }
+ */
+const (
+ IFLA_BRIDGE_FLAGS = iota
+ IFLA_BRIDGE_MODE
+ IFLA_BRIDGE_VLAN_INFO
+)
+
+const (
+ BRIDGE_VLAN_INFO_MASTER = 1 << iota
+ BRIDGE_VLAN_INFO_PVID
+ BRIDGE_VLAN_INFO_UNTAGGED
+ BRIDGE_VLAN_INFO_RANGE_BEGIN
+ BRIDGE_VLAN_INFO_RANGE_END
+)
+
+// struct bridge_vlan_info {
+// __u16 flags;
+// __u16 vid;
+// };
+
+type BridgeVlanInfo struct {
+ Flags uint16
+ Vid uint16
+}
+
+func (b *BridgeVlanInfo) Serialize() []byte {
+ return (*(*[SizeofBridgeVlanInfo]byte)(unsafe.Pointer(b)))[:]
+}
+
+func DeserializeBridgeVlanInfo(b []byte) *BridgeVlanInfo {
+ return (*BridgeVlanInfo)(unsafe.Pointer(&b[0:SizeofBridgeVlanInfo][0]))
+}
+
+func (b *BridgeVlanInfo) PortVID() bool {
+ return b.Flags&BRIDGE_VLAN_INFO_PVID > 0
+}
+
+func (b *BridgeVlanInfo) EngressUntag() bool {
+ return b.Flags&BRIDGE_VLAN_INFO_UNTAGGED > 0
+}
+
+func (b *BridgeVlanInfo) String() string {
+ return fmt.Sprintf("%+v", *b)
+}
+
+/* New extended info filters for IFLA_EXT_MASK */
+const (
+ RTEXT_FILTER_VF = 1 << iota
+ RTEXT_FILTER_BRVLAN
+ RTEXT_FILTER_BRVLAN_COMPRESSED
+)
diff --git a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go
new file mode 100644
index 000000000..380cc5967
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go
@@ -0,0 +1,189 @@
+package nl
+
+import "unsafe"
+
+// Track the message sizes for the correct serialization/deserialization
+const (
+ SizeofNfgenmsg = 4
+ SizeofNfattr = 4
+ SizeofNfConntrack = 376
+ SizeofNfctTupleHead = 52
+)
+
+var L4ProtoMap = map[uint8]string{
+ 6: "tcp",
+ 17: "udp",
+}
+
+// All the following constants are coming from:
+// https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+
+// enum cntl_msg_types {
+// IPCTNL_MSG_CT_NEW,
+// IPCTNL_MSG_CT_GET,
+// IPCTNL_MSG_CT_DELETE,
+// IPCTNL_MSG_CT_GET_CTRZERO,
+// IPCTNL_MSG_CT_GET_STATS_CPU,
+// IPCTNL_MSG_CT_GET_STATS,
+// IPCTNL_MSG_CT_GET_DYING,
+// IPCTNL_MSG_CT_GET_UNCONFIRMED,
+//
+// IPCTNL_MSG_MAX
+// };
+const (
+ IPCTNL_MSG_CT_GET = 1
+ IPCTNL_MSG_CT_DELETE = 2
+)
+
+// #define NFNETLINK_V0 0
+const (
+ NFNETLINK_V0 = 0
+)
+
+// #define NLA_F_NESTED (1 << 15)
+const (
+ NLA_F_NESTED = (1 << 15)
+)
+
+// enum ctattr_type {
+// CTA_UNSPEC,
+// CTA_TUPLE_ORIG,
+// CTA_TUPLE_REPLY,
+// CTA_STATUS,
+// CTA_PROTOINFO,
+// CTA_HELP,
+// CTA_NAT_SRC,
+// #define CTA_NAT CTA_NAT_SRC /* backwards compatibility */
+// CTA_TIMEOUT,
+// CTA_MARK,
+// CTA_COUNTERS_ORIG,
+// CTA_COUNTERS_REPLY,
+// CTA_USE,
+// CTA_ID,
+// CTA_NAT_DST,
+// CTA_TUPLE_MASTER,
+// CTA_SEQ_ADJ_ORIG,
+// CTA_NAT_SEQ_ADJ_ORIG = CTA_SEQ_ADJ_ORIG,
+// CTA_SEQ_ADJ_REPLY,
+// CTA_NAT_SEQ_ADJ_REPLY = CTA_SEQ_ADJ_REPLY,
+// CTA_SECMARK, /* obsolete */
+// CTA_ZONE,
+// CTA_SECCTX,
+// CTA_TIMESTAMP,
+// CTA_MARK_MASK,
+// CTA_LABELS,
+// CTA_LABELS_MASK,
+// __CTA_MAX
+// };
+const (
+ CTA_TUPLE_ORIG = 1
+ CTA_TUPLE_REPLY = 2
+ CTA_STATUS = 3
+ CTA_TIMEOUT = 7
+ CTA_MARK = 8
+ CTA_PROTOINFO = 4
+)
+
+// enum ctattr_tuple {
+// CTA_TUPLE_UNSPEC,
+// CTA_TUPLE_IP,
+// CTA_TUPLE_PROTO,
+// CTA_TUPLE_ZONE,
+// __CTA_TUPLE_MAX
+// };
+// #define CTA_TUPLE_MAX (__CTA_TUPLE_MAX - 1)
+const (
+ CTA_TUPLE_IP = 1
+ CTA_TUPLE_PROTO = 2
+)
+
+// enum ctattr_ip {
+// CTA_IP_UNSPEC,
+// CTA_IP_V4_SRC,
+// CTA_IP_V4_DST,
+// CTA_IP_V6_SRC,
+// CTA_IP_V6_DST,
+// __CTA_IP_MAX
+// };
+// #define CTA_IP_MAX (__CTA_IP_MAX - 1)
+const (
+ CTA_IP_V4_SRC = 1
+ CTA_IP_V4_DST = 2
+ CTA_IP_V6_SRC = 3
+ CTA_IP_V6_DST = 4
+)
+
+// enum ctattr_l4proto {
+// CTA_PROTO_UNSPEC,
+// CTA_PROTO_NUM,
+// CTA_PROTO_SRC_PORT,
+// CTA_PROTO_DST_PORT,
+// CTA_PROTO_ICMP_ID,
+// CTA_PROTO_ICMP_TYPE,
+// CTA_PROTO_ICMP_CODE,
+// CTA_PROTO_ICMPV6_ID,
+// CTA_PROTO_ICMPV6_TYPE,
+// CTA_PROTO_ICMPV6_CODE,
+// __CTA_PROTO_MAX
+// };
+// #define CTA_PROTO_MAX (__CTA_PROTO_MAX - 1)
+const (
+ CTA_PROTO_NUM = 1
+ CTA_PROTO_SRC_PORT = 2
+ CTA_PROTO_DST_PORT = 3
+)
+
+// enum ctattr_protoinfo {
+// CTA_PROTOINFO_UNSPEC,
+// CTA_PROTOINFO_TCP,
+// CTA_PROTOINFO_DCCP,
+// CTA_PROTOINFO_SCTP,
+// __CTA_PROTOINFO_MAX
+// };
+// #define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1)
+const (
+ CTA_PROTOINFO_TCP = 1
+)
+
+// enum ctattr_protoinfo_tcp {
+// CTA_PROTOINFO_TCP_UNSPEC,
+// CTA_PROTOINFO_TCP_STATE,
+// CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
+// CTA_PROTOINFO_TCP_WSCALE_REPLY,
+// CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
+// CTA_PROTOINFO_TCP_FLAGS_REPLY,
+// __CTA_PROTOINFO_TCP_MAX
+// };
+// #define CTA_PROTOINFO_TCP_MAX (__CTA_PROTOINFO_TCP_MAX - 1)
+const (
+ CTA_PROTOINFO_TCP_STATE = 1
+ CTA_PROTOINFO_TCP_WSCALE_ORIGINAL = 2
+ CTA_PROTOINFO_TCP_WSCALE_REPLY = 3
+ CTA_PROTOINFO_TCP_FLAGS_ORIGINAL = 4
+ CTA_PROTOINFO_TCP_FLAGS_REPLY = 5
+)
+
+// /* General form of address family dependent message.
+// */
+// struct nfgenmsg {
+// __u8 nfgen_family; /* AF_xxx */
+// __u8 version; /* nfnetlink version */
+// __be16 res_id; /* resource id */
+// };
+type Nfgenmsg struct {
+ NfgenFamily uint8
+ Version uint8
+ ResId uint16 // big endian
+}
+
+func (msg *Nfgenmsg) Len() int {
+ return SizeofNfgenmsg
+}
+
+func DeserializeNfgenmsg(b []byte) *Nfgenmsg {
+ return (*Nfgenmsg)(unsafe.Pointer(&b[0:SizeofNfgenmsg][0]))
+}
+
+func (msg *Nfgenmsg) Serialize() []byte {
+ return (*(*[SizeofNfgenmsg]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/vendor/github.com/vishvananda/netlink/nl/genetlink_linux.go b/vendor/github.com/vishvananda/netlink/nl/genetlink_linux.go
new file mode 100644
index 000000000..81b46f2c7
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/genetlink_linux.go
@@ -0,0 +1,89 @@
+package nl
+
+import (
+ "unsafe"
+)
+
+const SizeofGenlmsg = 4
+
+const (
+ GENL_ID_CTRL = 0x10
+ GENL_CTRL_VERSION = 2
+ GENL_CTRL_NAME = "nlctrl"
+)
+
+const (
+ GENL_CTRL_CMD_GETFAMILY = 3
+)
+
+const (
+ GENL_CTRL_ATTR_UNSPEC = iota
+ GENL_CTRL_ATTR_FAMILY_ID
+ GENL_CTRL_ATTR_FAMILY_NAME
+ GENL_CTRL_ATTR_VERSION
+ GENL_CTRL_ATTR_HDRSIZE
+ GENL_CTRL_ATTR_MAXATTR
+ GENL_CTRL_ATTR_OPS
+ GENL_CTRL_ATTR_MCAST_GROUPS
+)
+
+const (
+ GENL_CTRL_ATTR_OP_UNSPEC = iota
+ GENL_CTRL_ATTR_OP_ID
+ GENL_CTRL_ATTR_OP_FLAGS
+)
+
+const (
+ GENL_ADMIN_PERM = 1 << iota
+ GENL_CMD_CAP_DO
+ GENL_CMD_CAP_DUMP
+ GENL_CMD_CAP_HASPOL
+)
+
+const (
+ GENL_CTRL_ATTR_MCAST_GRP_UNSPEC = iota
+ GENL_CTRL_ATTR_MCAST_GRP_NAME
+ GENL_CTRL_ATTR_MCAST_GRP_ID
+)
+
+const (
+ GENL_GTP_VERSION = 0
+ GENL_GTP_NAME = "gtp"
+)
+
+const (
+ GENL_GTP_CMD_NEWPDP = iota
+ GENL_GTP_CMD_DELPDP
+ GENL_GTP_CMD_GETPDP
+)
+
+const (
+ GENL_GTP_ATTR_UNSPEC = iota
+ GENL_GTP_ATTR_LINK
+ GENL_GTP_ATTR_VERSION
+ GENL_GTP_ATTR_TID
+ GENL_GTP_ATTR_PEER_ADDRESS
+ GENL_GTP_ATTR_MS_ADDRESS
+ GENL_GTP_ATTR_FLOW
+ GENL_GTP_ATTR_NET_NS_FD
+ GENL_GTP_ATTR_I_TEI
+ GENL_GTP_ATTR_O_TEI
+ GENL_GTP_ATTR_PAD
+)
+
+type Genlmsg struct {
+ Command uint8
+ Version uint8
+}
+
+func (msg *Genlmsg) Len() int {
+ return SizeofGenlmsg
+}
+
+func DeserializeGenlmsg(b []byte) *Genlmsg {
+ return (*Genlmsg)(unsafe.Pointer(&b[0:SizeofGenlmsg][0]))
+}
+
+func (msg *Genlmsg) Serialize() []byte {
+ return (*(*[SizeofGenlmsg]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/github.com/vishvananda/netlink/nl/link_linux.go
new file mode 100644
index 000000000..a492246d8
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/link_linux.go
@@ -0,0 +1,525 @@
+package nl
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const (
+ DEFAULT_CHANGE = 0xFFFFFFFF
+ // doesn't exist in syscall
+ IFLA_VFINFO_LIST = syscall.IFLA_IFALIAS + 1 + iota
+ IFLA_STATS64
+ IFLA_VF_PORTS
+ IFLA_PORT_SELF
+ IFLA_AF_SPEC
+ IFLA_GROUP
+ IFLA_NET_NS_FD
+ IFLA_EXT_MASK
+ IFLA_PROMISCUITY
+ IFLA_NUM_TX_QUEUES
+ IFLA_NUM_RX_QUEUES
+ IFLA_CARRIER
+ IFLA_PHYS_PORT_ID
+ IFLA_CARRIER_CHANGES
+ IFLA_PHYS_SWITCH_ID
+ IFLA_LINK_NETNSID
+ IFLA_PHYS_PORT_NAME
+ IFLA_PROTO_DOWN
+ IFLA_GSO_MAX_SEGS
+ IFLA_GSO_MAX_SIZE
+ IFLA_PAD
+ IFLA_XDP
+)
+
+const (
+ IFLA_INFO_UNSPEC = iota
+ IFLA_INFO_KIND
+ IFLA_INFO_DATA
+ IFLA_INFO_XSTATS
+ IFLA_INFO_MAX = IFLA_INFO_XSTATS
+)
+
+const (
+ IFLA_VLAN_UNSPEC = iota
+ IFLA_VLAN_ID
+ IFLA_VLAN_FLAGS
+ IFLA_VLAN_EGRESS_QOS
+ IFLA_VLAN_INGRESS_QOS
+ IFLA_VLAN_PROTOCOL
+ IFLA_VLAN_MAX = IFLA_VLAN_PROTOCOL
+)
+
+const (
+ VETH_INFO_UNSPEC = iota
+ VETH_INFO_PEER
+ VETH_INFO_MAX = VETH_INFO_PEER
+)
+
+const (
+ IFLA_VXLAN_UNSPEC = iota
+ IFLA_VXLAN_ID
+ IFLA_VXLAN_GROUP
+ IFLA_VXLAN_LINK
+ IFLA_VXLAN_LOCAL
+ IFLA_VXLAN_TTL
+ IFLA_VXLAN_TOS
+ IFLA_VXLAN_LEARNING
+ IFLA_VXLAN_AGEING
+ IFLA_VXLAN_LIMIT
+ IFLA_VXLAN_PORT_RANGE
+ IFLA_VXLAN_PROXY
+ IFLA_VXLAN_RSC
+ IFLA_VXLAN_L2MISS
+ IFLA_VXLAN_L3MISS
+ IFLA_VXLAN_PORT
+ IFLA_VXLAN_GROUP6
+ IFLA_VXLAN_LOCAL6
+ IFLA_VXLAN_UDP_CSUM
+ IFLA_VXLAN_UDP_ZERO_CSUM6_TX
+ IFLA_VXLAN_UDP_ZERO_CSUM6_RX
+ IFLA_VXLAN_REMCSUM_TX
+ IFLA_VXLAN_REMCSUM_RX
+ IFLA_VXLAN_GBP
+ IFLA_VXLAN_REMCSUM_NOPARTIAL
+ IFLA_VXLAN_FLOWBASED
+ IFLA_VXLAN_MAX = IFLA_VXLAN_FLOWBASED
+)
+
+const (
+ BRIDGE_MODE_UNSPEC = iota
+ BRIDGE_MODE_HAIRPIN
+)
+
+const (
+ IFLA_BRPORT_UNSPEC = iota
+ IFLA_BRPORT_STATE
+ IFLA_BRPORT_PRIORITY
+ IFLA_BRPORT_COST
+ IFLA_BRPORT_MODE
+ IFLA_BRPORT_GUARD
+ IFLA_BRPORT_PROTECT
+ IFLA_BRPORT_FAST_LEAVE
+ IFLA_BRPORT_LEARNING
+ IFLA_BRPORT_UNICAST_FLOOD
+ IFLA_BRPORT_PROXYARP
+ IFLA_BRPORT_LEARNING_SYNC
+ IFLA_BRPORT_PROXYARP_WIFI
+ IFLA_BRPORT_MAX = IFLA_BRPORT_PROXYARP_WIFI
+)
+
+const (
+ IFLA_IPVLAN_UNSPEC = iota
+ IFLA_IPVLAN_MODE
+ IFLA_IPVLAN_MAX = IFLA_IPVLAN_MODE
+)
+
+const (
+ IFLA_MACVLAN_UNSPEC = iota
+ IFLA_MACVLAN_MODE
+ IFLA_MACVLAN_FLAGS
+ IFLA_MACVLAN_MAX = IFLA_MACVLAN_FLAGS
+)
+
+const (
+ MACVLAN_MODE_PRIVATE = 1
+ MACVLAN_MODE_VEPA = 2
+ MACVLAN_MODE_BRIDGE = 4
+ MACVLAN_MODE_PASSTHRU = 8
+ MACVLAN_MODE_SOURCE = 16
+)
+
+const (
+ IFLA_BOND_UNSPEC = iota
+ IFLA_BOND_MODE
+ IFLA_BOND_ACTIVE_SLAVE
+ IFLA_BOND_MIIMON
+ IFLA_BOND_UPDELAY
+ IFLA_BOND_DOWNDELAY
+ IFLA_BOND_USE_CARRIER
+ IFLA_BOND_ARP_INTERVAL
+ IFLA_BOND_ARP_IP_TARGET
+ IFLA_BOND_ARP_VALIDATE
+ IFLA_BOND_ARP_ALL_TARGETS
+ IFLA_BOND_PRIMARY
+ IFLA_BOND_PRIMARY_RESELECT
+ IFLA_BOND_FAIL_OVER_MAC
+ IFLA_BOND_XMIT_HASH_POLICY
+ IFLA_BOND_RESEND_IGMP
+ IFLA_BOND_NUM_PEER_NOTIF
+ IFLA_BOND_ALL_SLAVES_ACTIVE
+ IFLA_BOND_MIN_LINKS
+ IFLA_BOND_LP_INTERVAL
+ IFLA_BOND_PACKETS_PER_SLAVE
+ IFLA_BOND_AD_LACP_RATE
+ IFLA_BOND_AD_SELECT
+ IFLA_BOND_AD_INFO
+ IFLA_BOND_AD_ACTOR_SYS_PRIO
+ IFLA_BOND_AD_USER_PORT_KEY
+ IFLA_BOND_AD_ACTOR_SYSTEM
+ IFLA_BOND_TLB_DYNAMIC_LB
+)
+
+const (
+ IFLA_BOND_AD_INFO_UNSPEC = iota
+ IFLA_BOND_AD_INFO_AGGREGATOR
+ IFLA_BOND_AD_INFO_NUM_PORTS
+ IFLA_BOND_AD_INFO_ACTOR_KEY
+ IFLA_BOND_AD_INFO_PARTNER_KEY
+ IFLA_BOND_AD_INFO_PARTNER_MAC
+)
+
+const (
+ IFLA_BOND_SLAVE_UNSPEC = iota
+ IFLA_BOND_SLAVE_STATE
+ IFLA_BOND_SLAVE_MII_STATUS
+ IFLA_BOND_SLAVE_LINK_FAILURE_COUNT
+ IFLA_BOND_SLAVE_PERM_HWADDR
+ IFLA_BOND_SLAVE_QUEUE_ID
+ IFLA_BOND_SLAVE_AD_AGGREGATOR_ID
+)
+
+const (
+ IFLA_GRE_UNSPEC = iota
+ IFLA_GRE_LINK
+ IFLA_GRE_IFLAGS
+ IFLA_GRE_OFLAGS
+ IFLA_GRE_IKEY
+ IFLA_GRE_OKEY
+ IFLA_GRE_LOCAL
+ IFLA_GRE_REMOTE
+ IFLA_GRE_TTL
+ IFLA_GRE_TOS
+ IFLA_GRE_PMTUDISC
+ IFLA_GRE_ENCAP_LIMIT
+ IFLA_GRE_FLOWINFO
+ IFLA_GRE_FLAGS
+ IFLA_GRE_ENCAP_TYPE
+ IFLA_GRE_ENCAP_FLAGS
+ IFLA_GRE_ENCAP_SPORT
+ IFLA_GRE_ENCAP_DPORT
+ IFLA_GRE_COLLECT_METADATA
+ IFLA_GRE_MAX = IFLA_GRE_COLLECT_METADATA
+)
+
+const (
+ GRE_CSUM = 0x8000
+ GRE_ROUTING = 0x4000
+ GRE_KEY = 0x2000
+ GRE_SEQ = 0x1000
+ GRE_STRICT = 0x0800
+ GRE_REC = 0x0700
+ GRE_FLAGS = 0x00F8
+ GRE_VERSION = 0x0007
+)
+
+const (
+ IFLA_VF_INFO_UNSPEC = iota
+ IFLA_VF_INFO
+ IFLA_VF_INFO_MAX = IFLA_VF_INFO
+)
+
+const (
+ IFLA_VF_UNSPEC = iota
+ IFLA_VF_MAC /* Hardware queue specific attributes */
+ IFLA_VF_VLAN
+ IFLA_VF_TX_RATE /* Max TX Bandwidth Allocation */
+ IFLA_VF_SPOOFCHK /* Spoof Checking on/off switch */
+ IFLA_VF_LINK_STATE /* link state enable/disable/auto switch */
+ IFLA_VF_RATE /* Min and Max TX Bandwidth Allocation */
+ IFLA_VF_RSS_QUERY_EN /* RSS Redirection Table and Hash Key query
+ * on/off switch
+ */
+ IFLA_VF_STATS /* network device statistics */
+ IFLA_VF_MAX = IFLA_VF_STATS
+)
+
+const (
+ IFLA_VF_LINK_STATE_AUTO = iota /* link state of the uplink */
+ IFLA_VF_LINK_STATE_ENABLE /* link always up */
+ IFLA_VF_LINK_STATE_DISABLE /* link always down */
+ IFLA_VF_LINK_STATE_MAX = IFLA_VF_LINK_STATE_DISABLE
+)
+
+const (
+ IFLA_VF_STATS_RX_PACKETS = iota
+ IFLA_VF_STATS_TX_PACKETS
+ IFLA_VF_STATS_RX_BYTES
+ IFLA_VF_STATS_TX_BYTES
+ IFLA_VF_STATS_BROADCAST
+ IFLA_VF_STATS_MULTICAST
+ IFLA_VF_STATS_MAX = IFLA_VF_STATS_MULTICAST
+)
+
+const (
+ SizeofVfMac = 0x24
+ SizeofVfVlan = 0x0c
+ SizeofVfTxRate = 0x08
+ SizeofVfRate = 0x0c
+ SizeofVfSpoofchk = 0x08
+ SizeofVfLinkState = 0x08
+ SizeofVfRssQueryEn = 0x08
+)
+
+// struct ifla_vf_mac {
+// __u32 vf;
+// __u8 mac[32]; /* MAX_ADDR_LEN */
+// };
+
+type VfMac struct {
+ Vf uint32
+ Mac [32]byte
+}
+
+func (msg *VfMac) Len() int {
+ return SizeofVfMac
+}
+
+func DeserializeVfMac(b []byte) *VfMac {
+ return (*VfMac)(unsafe.Pointer(&b[0:SizeofVfMac][0]))
+}
+
+func (msg *VfMac) Serialize() []byte {
+ return (*(*[SizeofVfMac]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct ifla_vf_vlan {
+// __u32 vf;
+// __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */
+// __u32 qos;
+// };
+
+type VfVlan struct {
+ Vf uint32
+ Vlan uint32
+ Qos uint32
+}
+
+func (msg *VfVlan) Len() int {
+ return SizeofVfVlan
+}
+
+func DeserializeVfVlan(b []byte) *VfVlan {
+ return (*VfVlan)(unsafe.Pointer(&b[0:SizeofVfVlan][0]))
+}
+
+func (msg *VfVlan) Serialize() []byte {
+ return (*(*[SizeofVfVlan]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct ifla_vf_tx_rate {
+// __u32 vf;
+// __u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */
+// };
+
+type VfTxRate struct {
+ Vf uint32
+ Rate uint32
+}
+
+func (msg *VfTxRate) Len() int {
+ return SizeofVfTxRate
+}
+
+func DeserializeVfTxRate(b []byte) *VfTxRate {
+ return (*VfTxRate)(unsafe.Pointer(&b[0:SizeofVfTxRate][0]))
+}
+
+func (msg *VfTxRate) Serialize() []byte {
+ return (*(*[SizeofVfTxRate]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct ifla_vf_rate {
+// __u32 vf;
+// __u32 min_tx_rate; /* Min Bandwidth in Mbps */
+// __u32 max_tx_rate; /* Max Bandwidth in Mbps */
+// };
+
+type VfRate struct {
+ Vf uint32
+ MinTxRate uint32
+ MaxTxRate uint32
+}
+
+func (msg *VfRate) Len() int {
+ return SizeofVfRate
+}
+
+func DeserializeVfRate(b []byte) *VfRate {
+ return (*VfRate)(unsafe.Pointer(&b[0:SizeofVfRate][0]))
+}
+
+func (msg *VfRate) Serialize() []byte {
+ return (*(*[SizeofVfRate]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct ifla_vf_spoofchk {
+// __u32 vf;
+// __u32 setting;
+// };
+
+type VfSpoofchk struct {
+ Vf uint32
+ Setting uint32
+}
+
+func (msg *VfSpoofchk) Len() int {
+ return SizeofVfSpoofchk
+}
+
+func DeserializeVfSpoofchk(b []byte) *VfSpoofchk {
+ return (*VfSpoofchk)(unsafe.Pointer(&b[0:SizeofVfSpoofchk][0]))
+}
+
+func (msg *VfSpoofchk) Serialize() []byte {
+ return (*(*[SizeofVfSpoofchk]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct ifla_vf_link_state {
+// __u32 vf;
+// __u32 link_state;
+// };
+
+type VfLinkState struct {
+ Vf uint32
+ LinkState uint32
+}
+
+func (msg *VfLinkState) Len() int {
+ return SizeofVfLinkState
+}
+
+func DeserializeVfLinkState(b []byte) *VfLinkState {
+ return (*VfLinkState)(unsafe.Pointer(&b[0:SizeofVfLinkState][0]))
+}
+
+func (msg *VfLinkState) Serialize() []byte {
+ return (*(*[SizeofVfLinkState]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct ifla_vf_rss_query_en {
+// __u32 vf;
+// __u32 setting;
+// };
+
+type VfRssQueryEn struct {
+ Vf uint32
+ Setting uint32
+}
+
+func (msg *VfRssQueryEn) Len() int {
+ return SizeofVfRssQueryEn
+}
+
+func DeserializeVfRssQueryEn(b []byte) *VfRssQueryEn {
+ return (*VfRssQueryEn)(unsafe.Pointer(&b[0:SizeofVfRssQueryEn][0]))
+}
+
+func (msg *VfRssQueryEn) Serialize() []byte {
+ return (*(*[SizeofVfRssQueryEn]byte)(unsafe.Pointer(msg)))[:]
+}
+
+const (
+ IFLA_XDP_UNSPEC = iota
+ IFLA_XDP_FD /* fd of xdp program to attach, or -1 to remove */
+ IFLA_XDP_ATTACHED /* read-only bool indicating if prog is attached */
+ IFLA_XDP_FLAGS /* xdp prog related flags */
+ IFLA_XDP_PROG_ID /* xdp prog id */
+ IFLA_XDP_MAX = IFLA_XDP_PROG_ID
+)
+
+const (
+ IFLA_IPTUN_UNSPEC = iota
+ IFLA_IPTUN_LINK
+ IFLA_IPTUN_LOCAL
+ IFLA_IPTUN_REMOTE
+ IFLA_IPTUN_TTL
+ IFLA_IPTUN_TOS
+ IFLA_IPTUN_ENCAP_LIMIT
+ IFLA_IPTUN_FLOWINFO
+ IFLA_IPTUN_FLAGS
+ IFLA_IPTUN_PROTO
+ IFLA_IPTUN_PMTUDISC
+ IFLA_IPTUN_6RD_PREFIX
+ IFLA_IPTUN_6RD_RELAY_PREFIX
+ IFLA_IPTUN_6RD_PREFIXLEN
+ IFLA_IPTUN_6RD_RELAY_PREFIXLEN
+ IFLA_IPTUN_MAX = IFLA_IPTUN_6RD_RELAY_PREFIXLEN
+)
+
+const (
+ IFLA_VTI_UNSPEC = iota
+ IFLA_VTI_LINK
+ IFLA_VTI_IKEY
+ IFLA_VTI_OKEY
+ IFLA_VTI_LOCAL
+ IFLA_VTI_REMOTE
+ IFLA_VTI_MAX = IFLA_VTI_REMOTE
+)
+
+const (
+ IFLA_VRF_UNSPEC = iota
+ IFLA_VRF_TABLE
+)
+
+const (
+ IFLA_BR_UNSPEC = iota
+ IFLA_BR_FORWARD_DELAY
+ IFLA_BR_HELLO_TIME
+ IFLA_BR_MAX_AGE
+ IFLA_BR_AGEING_TIME
+ IFLA_BR_STP_STATE
+ IFLA_BR_PRIORITY
+ IFLA_BR_VLAN_FILTERING
+ IFLA_BR_VLAN_PROTOCOL
+ IFLA_BR_GROUP_FWD_MASK
+ IFLA_BR_ROOT_ID
+ IFLA_BR_BRIDGE_ID
+ IFLA_BR_ROOT_PORT
+ IFLA_BR_ROOT_PATH_COST
+ IFLA_BR_TOPOLOGY_CHANGE
+ IFLA_BR_TOPOLOGY_CHANGE_DETECTED
+ IFLA_BR_HELLO_TIMER
+ IFLA_BR_TCN_TIMER
+ IFLA_BR_TOPOLOGY_CHANGE_TIMER
+ IFLA_BR_GC_TIMER
+ IFLA_BR_GROUP_ADDR
+ IFLA_BR_FDB_FLUSH
+ IFLA_BR_MCAST_ROUTER
+ IFLA_BR_MCAST_SNOOPING
+ IFLA_BR_MCAST_QUERY_USE_IFADDR
+ IFLA_BR_MCAST_QUERIER
+ IFLA_BR_MCAST_HASH_ELASTICITY
+ IFLA_BR_MCAST_HASH_MAX
+ IFLA_BR_MCAST_LAST_MEMBER_CNT
+ IFLA_BR_MCAST_STARTUP_QUERY_CNT
+ IFLA_BR_MCAST_LAST_MEMBER_INTVL
+ IFLA_BR_MCAST_MEMBERSHIP_INTVL
+ IFLA_BR_MCAST_QUERIER_INTVL
+ IFLA_BR_MCAST_QUERY_INTVL
+ IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
+ IFLA_BR_MCAST_STARTUP_QUERY_INTVL
+ IFLA_BR_NF_CALL_IPTABLES
+ IFLA_BR_NF_CALL_IP6TABLES
+ IFLA_BR_NF_CALL_ARPTABLES
+ IFLA_BR_VLAN_DEFAULT_PVID
+ IFLA_BR_PAD
+ IFLA_BR_VLAN_STATS_ENABLED
+ IFLA_BR_MCAST_STATS_ENABLED
+ IFLA_BR_MCAST_IGMP_VERSION
+ IFLA_BR_MCAST_MLD_VERSION
+ IFLA_BR_MAX = IFLA_BR_MCAST_MLD_VERSION
+)
+
+const (
+ IFLA_GTP_UNSPEC = iota
+ IFLA_GTP_FD0
+ IFLA_GTP_FD1
+ IFLA_GTP_PDP_HASHSIZE
+ IFLA_GTP_ROLE
+)
+
+const (
+ GTP_ROLE_GGSN = iota
+ GTP_ROLE_SGSN
+)
diff --git a/vendor/github.com/vishvananda/netlink/nl/mpls_linux.go b/vendor/github.com/vishvananda/netlink/nl/mpls_linux.go
new file mode 100644
index 000000000..3915b7eec
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/mpls_linux.go
@@ -0,0 +1,36 @@
+package nl
+
+import "encoding/binary"
+
+const (
+ MPLS_LS_LABEL_SHIFT = 12
+ MPLS_LS_S_SHIFT = 8
+)
+
+func EncodeMPLSStack(labels ...int) []byte {
+ b := make([]byte, 4*len(labels))
+ for idx, label := range labels {
+ l := label << MPLS_LS_LABEL_SHIFT
+ if idx == len(labels)-1 {
+ l |= 1 << MPLS_LS_S_SHIFT
+ }
+ binary.BigEndian.PutUint32(b[idx*4:], uint32(l))
+ }
+ return b
+}
+
+func DecodeMPLSStack(buf []byte) []int {
+ if len(buf)%4 != 0 {
+ return nil
+ }
+ stack := make([]int, 0, len(buf)/4)
+ for len(buf) > 0 {
+ l := binary.BigEndian.Uint32(buf[:4])
+ buf = buf[4:]
+ stack = append(stack, int(l)>>MPLS_LS_LABEL_SHIFT)
+ if (l>>MPLS_LS_S_SHIFT)&1 > 0 {
+ break
+ }
+ }
+ return stack
+}
diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
new file mode 100644
index 000000000..1329acd86
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
@@ -0,0 +1,718 @@
+// Package nl has low level primitives for making Netlink calls.
+package nl
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "net"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "unsafe"
+
+ "github.com/vishvananda/netns"
+)
+
+const (
+ // Family type definitions
+ FAMILY_ALL = syscall.AF_UNSPEC
+ FAMILY_V4 = syscall.AF_INET
+ FAMILY_V6 = syscall.AF_INET6
+ FAMILY_MPLS = AF_MPLS
+)
+
+// SupportedNlFamilies contains the list of netlink families this netlink package supports
+var SupportedNlFamilies = []int{syscall.NETLINK_ROUTE, syscall.NETLINK_XFRM, syscall.NETLINK_NETFILTER}
+
+var nextSeqNr uint32
+
+// GetIPFamily returns the family type of a net.IP.
+func GetIPFamily(ip net.IP) int {
+ if len(ip) <= net.IPv4len {
+ return FAMILY_V4
+ }
+ if ip.To4() != nil {
+ return FAMILY_V4
+ }
+ return FAMILY_V6
+}
+
+var nativeEndian binary.ByteOrder
+
+// Get native endianness for the system
+func NativeEndian() binary.ByteOrder {
+ if nativeEndian == nil {
+ var x uint32 = 0x01020304
+ if *(*byte)(unsafe.Pointer(&x)) == 0x01 {
+ nativeEndian = binary.BigEndian
+ } else {
+ nativeEndian = binary.LittleEndian
+ }
+ }
+ return nativeEndian
+}
+
+// Byte swap a 16 bit value if we aren't big endian
+func Swap16(i uint16) uint16 {
+ if NativeEndian() == binary.BigEndian {
+ return i
+ }
+ return (i&0xff00)>>8 | (i&0xff)<<8
+}
+
+// Byte swap a 32 bit value if aren't big endian
+func Swap32(i uint32) uint32 {
+ if NativeEndian() == binary.BigEndian {
+ return i
+ }
+ return (i&0xff000000)>>24 | (i&0xff0000)>>8 | (i&0xff00)<<8 | (i&0xff)<<24
+}
+
+type NetlinkRequestData interface {
+ Len() int
+ Serialize() []byte
+}
+
+// IfInfomsg is related to links, but it is used for list requests as well
+type IfInfomsg struct {
+ syscall.IfInfomsg
+}
+
+// Create an IfInfomsg with family specified
+func NewIfInfomsg(family int) *IfInfomsg {
+ return &IfInfomsg{
+ IfInfomsg: syscall.IfInfomsg{
+ Family: uint8(family),
+ },
+ }
+}
+
+func DeserializeIfInfomsg(b []byte) *IfInfomsg {
+ return (*IfInfomsg)(unsafe.Pointer(&b[0:syscall.SizeofIfInfomsg][0]))
+}
+
+func (msg *IfInfomsg) Serialize() []byte {
+ return (*(*[syscall.SizeofIfInfomsg]byte)(unsafe.Pointer(msg)))[:]
+}
+
+func (msg *IfInfomsg) Len() int {
+ return syscall.SizeofIfInfomsg
+}
+
+func (msg *IfInfomsg) EncapType() string {
+ switch msg.Type {
+ case 0:
+ return "generic"
+ case syscall.ARPHRD_ETHER:
+ return "ether"
+ case syscall.ARPHRD_EETHER:
+ return "eether"
+ case syscall.ARPHRD_AX25:
+ return "ax25"
+ case syscall.ARPHRD_PRONET:
+ return "pronet"
+ case syscall.ARPHRD_CHAOS:
+ return "chaos"
+ case syscall.ARPHRD_IEEE802:
+ return "ieee802"
+ case syscall.ARPHRD_ARCNET:
+ return "arcnet"
+ case syscall.ARPHRD_APPLETLK:
+ return "atalk"
+ case syscall.ARPHRD_DLCI:
+ return "dlci"
+ case syscall.ARPHRD_ATM:
+ return "atm"
+ case syscall.ARPHRD_METRICOM:
+ return "metricom"
+ case syscall.ARPHRD_IEEE1394:
+ return "ieee1394"
+ case syscall.ARPHRD_INFINIBAND:
+ return "infiniband"
+ case syscall.ARPHRD_SLIP:
+ return "slip"
+ case syscall.ARPHRD_CSLIP:
+ return "cslip"
+ case syscall.ARPHRD_SLIP6:
+ return "slip6"
+ case syscall.ARPHRD_CSLIP6:
+ return "cslip6"
+ case syscall.ARPHRD_RSRVD:
+ return "rsrvd"
+ case syscall.ARPHRD_ADAPT:
+ return "adapt"
+ case syscall.ARPHRD_ROSE:
+ return "rose"
+ case syscall.ARPHRD_X25:
+ return "x25"
+ case syscall.ARPHRD_HWX25:
+ return "hwx25"
+ case syscall.ARPHRD_PPP:
+ return "ppp"
+ case syscall.ARPHRD_HDLC:
+ return "hdlc"
+ case syscall.ARPHRD_LAPB:
+ return "lapb"
+ case syscall.ARPHRD_DDCMP:
+ return "ddcmp"
+ case syscall.ARPHRD_RAWHDLC:
+ return "rawhdlc"
+ case syscall.ARPHRD_TUNNEL:
+ return "ipip"
+ case syscall.ARPHRD_TUNNEL6:
+ return "tunnel6"
+ case syscall.ARPHRD_FRAD:
+ return "frad"
+ case syscall.ARPHRD_SKIP:
+ return "skip"
+ case syscall.ARPHRD_LOOPBACK:
+ return "loopback"
+ case syscall.ARPHRD_LOCALTLK:
+ return "ltalk"
+ case syscall.ARPHRD_FDDI:
+ return "fddi"
+ case syscall.ARPHRD_BIF:
+ return "bif"
+ case syscall.ARPHRD_SIT:
+ return "sit"
+ case syscall.ARPHRD_IPDDP:
+ return "ip/ddp"
+ case syscall.ARPHRD_IPGRE:
+ return "gre"
+ case syscall.ARPHRD_PIMREG:
+ return "pimreg"
+ case syscall.ARPHRD_HIPPI:
+ return "hippi"
+ case syscall.ARPHRD_ASH:
+ return "ash"
+ case syscall.ARPHRD_ECONET:
+ return "econet"
+ case syscall.ARPHRD_IRDA:
+ return "irda"
+ case syscall.ARPHRD_FCPP:
+ return "fcpp"
+ case syscall.ARPHRD_FCAL:
+ return "fcal"
+ case syscall.ARPHRD_FCPL:
+ return "fcpl"
+ case syscall.ARPHRD_FCFABRIC:
+ return "fcfb0"
+ case syscall.ARPHRD_FCFABRIC + 1:
+ return "fcfb1"
+ case syscall.ARPHRD_FCFABRIC + 2:
+ return "fcfb2"
+ case syscall.ARPHRD_FCFABRIC + 3:
+ return "fcfb3"
+ case syscall.ARPHRD_FCFABRIC + 4:
+ return "fcfb4"
+ case syscall.ARPHRD_FCFABRIC + 5:
+ return "fcfb5"
+ case syscall.ARPHRD_FCFABRIC + 6:
+ return "fcfb6"
+ case syscall.ARPHRD_FCFABRIC + 7:
+ return "fcfb7"
+ case syscall.ARPHRD_FCFABRIC + 8:
+ return "fcfb8"
+ case syscall.ARPHRD_FCFABRIC + 9:
+ return "fcfb9"
+ case syscall.ARPHRD_FCFABRIC + 10:
+ return "fcfb10"
+ case syscall.ARPHRD_FCFABRIC + 11:
+ return "fcfb11"
+ case syscall.ARPHRD_FCFABRIC + 12:
+ return "fcfb12"
+ case syscall.ARPHRD_IEEE802_TR:
+ return "tr"
+ case syscall.ARPHRD_IEEE80211:
+ return "ieee802.11"
+ case syscall.ARPHRD_IEEE80211_PRISM:
+ return "ieee802.11/prism"
+ case syscall.ARPHRD_IEEE80211_RADIOTAP:
+ return "ieee802.11/radiotap"
+ case syscall.ARPHRD_IEEE802154:
+ return "ieee802.15.4"
+
+ case 65534:
+ return "none"
+ case 65535:
+ return "void"
+ }
+ return fmt.Sprintf("unknown%d", msg.Type)
+}
+
+func rtaAlignOf(attrlen int) int {
+ return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1)
+}
+
+func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg {
+ msg := NewIfInfomsg(family)
+ parent.children = append(parent.children, msg)
+ return msg
+}
+
+// Extend RtAttr to handle data and children
+type RtAttr struct {
+ syscall.RtAttr
+ Data []byte
+ children []NetlinkRequestData
+}
+
+// Create a new Extended RtAttr object
+func NewRtAttr(attrType int, data []byte) *RtAttr {
+ return &RtAttr{
+ RtAttr: syscall.RtAttr{
+ Type: uint16(attrType),
+ },
+ children: []NetlinkRequestData{},
+ Data: data,
+ }
+}
+
+// Create a new RtAttr obj anc add it as a child of an existing object
+func NewRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr {
+ attr := NewRtAttr(attrType, data)
+ parent.children = append(parent.children, attr)
+ return attr
+}
+
+func (a *RtAttr) Len() int {
+ if len(a.children) == 0 {
+ return (syscall.SizeofRtAttr + len(a.Data))
+ }
+
+ l := 0
+ for _, child := range a.children {
+ l += rtaAlignOf(child.Len())
+ }
+ l += syscall.SizeofRtAttr
+ return rtaAlignOf(l + len(a.Data))
+}
+
+// Serialize the RtAttr into a byte array
+// This can't just unsafe.cast because it must iterate through children.
+func (a *RtAttr) Serialize() []byte {
+ native := NativeEndian()
+
+ length := a.Len()
+ buf := make([]byte, rtaAlignOf(length))
+
+ next := 4
+ if a.Data != nil {
+ copy(buf[next:], a.Data)
+ next += rtaAlignOf(len(a.Data))
+ }
+ if len(a.children) > 0 {
+ for _, child := range a.children {
+ childBuf := child.Serialize()
+ copy(buf[next:], childBuf)
+ next += rtaAlignOf(len(childBuf))
+ }
+ }
+
+ if l := uint16(length); l != 0 {
+ native.PutUint16(buf[0:2], l)
+ }
+ native.PutUint16(buf[2:4], a.Type)
+ return buf
+}
+
+type NetlinkRequest struct {
+ syscall.NlMsghdr
+ Data []NetlinkRequestData
+ RawData []byte
+ Sockets map[int]*SocketHandle
+}
+
+// Serialize the Netlink Request into a byte array
+func (req *NetlinkRequest) Serialize() []byte {
+ length := syscall.SizeofNlMsghdr
+ dataBytes := make([][]byte, len(req.Data))
+ for i, data := range req.Data {
+ dataBytes[i] = data.Serialize()
+ length = length + len(dataBytes[i])
+ }
+ length += len(req.RawData)
+
+ req.Len = uint32(length)
+ b := make([]byte, length)
+ hdr := (*(*[syscall.SizeofNlMsghdr]byte)(unsafe.Pointer(req)))[:]
+ next := syscall.SizeofNlMsghdr
+ copy(b[0:next], hdr)
+ for _, data := range dataBytes {
+ for _, dataByte := range data {
+ b[next] = dataByte
+ next = next + 1
+ }
+ }
+ // Add the raw data if any
+ if len(req.RawData) > 0 {
+ copy(b[next:length], req.RawData)
+ }
+ return b
+}
+
+func (req *NetlinkRequest) AddData(data NetlinkRequestData) {
+ if data != nil {
+ req.Data = append(req.Data, data)
+ }
+}
+
+// AddRawData adds raw bytes to the end of the NetlinkRequest object during serialization
+func (req *NetlinkRequest) AddRawData(data []byte) {
+ if data != nil {
+ req.RawData = append(req.RawData, data...)
+ }
+}
+
+// Execute the request against a the given sockType.
+// Returns a list of netlink messages in serialized format, optionally filtered
+// by resType.
+func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, error) {
+ var (
+ s *NetlinkSocket
+ err error
+ )
+
+ if req.Sockets != nil {
+ if sh, ok := req.Sockets[sockType]; ok {
+ s = sh.Socket
+ req.Seq = atomic.AddUint32(&sh.Seq, 1)
+ }
+ }
+ sharedSocket := s != nil
+
+ if s == nil {
+ s, err = getNetlinkSocket(sockType)
+ if err != nil {
+ return nil, err
+ }
+ defer s.Close()
+ } else {
+ s.Lock()
+ defer s.Unlock()
+ }
+
+ if err := s.Send(req); err != nil {
+ return nil, err
+ }
+
+ pid, err := s.GetPid()
+ if err != nil {
+ return nil, err
+ }
+
+ var res [][]byte
+
+done:
+ for {
+ msgs, err := s.Receive()
+ if err != nil {
+ return nil, err
+ }
+ for _, m := range msgs {
+ if m.Header.Seq != req.Seq {
+ if sharedSocket {
+ continue
+ }
+ return nil, fmt.Errorf("Wrong Seq nr %d, expected %d", m.Header.Seq, req.Seq)
+ }
+ if m.Header.Pid != pid {
+ return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid)
+ }
+ if m.Header.Type == syscall.NLMSG_DONE {
+ break done
+ }
+ if m.Header.Type == syscall.NLMSG_ERROR {
+ native := NativeEndian()
+ error := int32(native.Uint32(m.Data[0:4]))
+ if error == 0 {
+ break done
+ }
+ return nil, syscall.Errno(-error)
+ }
+ if resType != 0 && m.Header.Type != resType {
+ continue
+ }
+ res = append(res, m.Data)
+ if m.Header.Flags&syscall.NLM_F_MULTI == 0 {
+ break done
+ }
+ }
+ }
+ return res, nil
+}
+
+// Create a new netlink request from proto and flags
+// Note the Len value will be inaccurate once data is added until
+// the message is serialized
+func NewNetlinkRequest(proto, flags int) *NetlinkRequest {
+ return &NetlinkRequest{
+ NlMsghdr: syscall.NlMsghdr{
+ Len: uint32(syscall.SizeofNlMsghdr),
+ Type: uint16(proto),
+ Flags: syscall.NLM_F_REQUEST | uint16(flags),
+ Seq: atomic.AddUint32(&nextSeqNr, 1),
+ },
+ }
+}
+
+type NetlinkSocket struct {
+ fd int32
+ lsa syscall.SockaddrNetlink
+ sync.Mutex
+}
+
+func getNetlinkSocket(protocol int) (*NetlinkSocket, error) {
+ fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW|syscall.SOCK_CLOEXEC, protocol)
+ if err != nil {
+ return nil, err
+ }
+ s := &NetlinkSocket{
+ fd: int32(fd),
+ }
+ s.lsa.Family = syscall.AF_NETLINK
+ if err := syscall.Bind(fd, &s.lsa); err != nil {
+ syscall.Close(fd)
+ return nil, err
+ }
+
+ return s, nil
+}
+
+// GetNetlinkSocketAt opens a netlink socket in the network namespace newNs
+// and positions the thread back into the network namespace specified by curNs,
+// when done. If curNs is close, the function derives the current namespace and
+// moves back into it when done. If newNs is close, the socket will be opened
+// in the current network namespace.
+func GetNetlinkSocketAt(newNs, curNs netns.NsHandle, protocol int) (*NetlinkSocket, error) {
+ c, err := executeInNetns(newNs, curNs)
+ if err != nil {
+ return nil, err
+ }
+ defer c()
+ return getNetlinkSocket(protocol)
+}
+
+// executeInNetns sets execution of the code following this call to the
+// network namespace newNs, then moves the thread back to curNs if open,
+// otherwise to the current netns at the time the function was invoked
+// In case of success, the caller is expected to execute the returned function
+// at the end of the code that needs to be executed in the network namespace.
+// Example:
+// func jobAt(...) error {
+// d, err := executeInNetns(...)
+// if err != nil { return err}
+// defer d()
+// < code which needs to be executed in specific netns>
+// }
+// TODO: his function probably belongs to netns pkg.
+func executeInNetns(newNs, curNs netns.NsHandle) (func(), error) {
+ var (
+ err error
+ moveBack func(netns.NsHandle) error
+ closeNs func() error
+ unlockThd func()
+ )
+ restore := func() {
+ // order matters
+ if moveBack != nil {
+ moveBack(curNs)
+ }
+ if closeNs != nil {
+ closeNs()
+ }
+ if unlockThd != nil {
+ unlockThd()
+ }
+ }
+ if newNs.IsOpen() {
+ runtime.LockOSThread()
+ unlockThd = runtime.UnlockOSThread
+ if !curNs.IsOpen() {
+ if curNs, err = netns.Get(); err != nil {
+ restore()
+ return nil, fmt.Errorf("could not get current namespace while creating netlink socket: %v", err)
+ }
+ closeNs = curNs.Close
+ }
+ if err := netns.Set(newNs); err != nil {
+ restore()
+ return nil, fmt.Errorf("failed to set into network namespace %d while creating netlink socket: %v", newNs, err)
+ }
+ moveBack = netns.Set
+ }
+ return restore, nil
+}
+
+// Create a netlink socket with a given protocol (e.g. NETLINK_ROUTE)
+// and subscribe it to multicast groups passed in variable argument list.
+// Returns the netlink socket on which Receive() method can be called
+// to retrieve the messages from the kernel.
+func Subscribe(protocol int, groups ...uint) (*NetlinkSocket, error) {
+ fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, protocol)
+ if err != nil {
+ return nil, err
+ }
+ s := &NetlinkSocket{
+ fd: int32(fd),
+ }
+ s.lsa.Family = syscall.AF_NETLINK
+
+ for _, g := range groups {
+ s.lsa.Groups |= (1 << (g - 1))
+ }
+
+ if err := syscall.Bind(fd, &s.lsa); err != nil {
+ syscall.Close(fd)
+ return nil, err
+ }
+
+ return s, nil
+}
+
+// SubscribeAt works like Subscribe plus let's the caller choose the network
+// namespace in which the socket would be opened (newNs). Then control goes back
+// to curNs if open, otherwise to the netns at the time this function was called.
+func SubscribeAt(newNs, curNs netns.NsHandle, protocol int, groups ...uint) (*NetlinkSocket, error) {
+ c, err := executeInNetns(newNs, curNs)
+ if err != nil {
+ return nil, err
+ }
+ defer c()
+ return Subscribe(protocol, groups...)
+}
+
+func (s *NetlinkSocket) Close() {
+ fd := int(atomic.SwapInt32(&s.fd, -1))
+ syscall.Close(fd)
+}
+
+func (s *NetlinkSocket) GetFd() int {
+ return int(atomic.LoadInt32(&s.fd))
+}
+
+func (s *NetlinkSocket) Send(request *NetlinkRequest) error {
+ fd := int(atomic.LoadInt32(&s.fd))
+ if fd < 0 {
+ return fmt.Errorf("Send called on a closed socket")
+ }
+ if err := syscall.Sendto(fd, request.Serialize(), 0, &s.lsa); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) {
+ fd := int(atomic.LoadInt32(&s.fd))
+ if fd < 0 {
+ return nil, fmt.Errorf("Receive called on a closed socket")
+ }
+ rb := make([]byte, syscall.Getpagesize())
+ nr, _, err := syscall.Recvfrom(fd, rb, 0)
+ if err != nil {
+ return nil, err
+ }
+ if nr < syscall.NLMSG_HDRLEN {
+ return nil, fmt.Errorf("Got short response from netlink")
+ }
+ rb = rb[:nr]
+ return syscall.ParseNetlinkMessage(rb)
+}
+
+func (s *NetlinkSocket) GetPid() (uint32, error) {
+ fd := int(atomic.LoadInt32(&s.fd))
+ lsa, err := syscall.Getsockname(fd)
+ if err != nil {
+ return 0, err
+ }
+ switch v := lsa.(type) {
+ case *syscall.SockaddrNetlink:
+ return v.Pid, nil
+ }
+ return 0, fmt.Errorf("Wrong socket type")
+}
+
+func ZeroTerminated(s string) []byte {
+ bytes := make([]byte, len(s)+1)
+ for i := 0; i < len(s); i++ {
+ bytes[i] = s[i]
+ }
+ bytes[len(s)] = 0
+ return bytes
+}
+
+func NonZeroTerminated(s string) []byte {
+ bytes := make([]byte, len(s))
+ for i := 0; i < len(s); i++ {
+ bytes[i] = s[i]
+ }
+ return bytes
+}
+
+func BytesToString(b []byte) string {
+ n := bytes.Index(b, []byte{0})
+ return string(b[:n])
+}
+
+func Uint8Attr(v uint8) []byte {
+ return []byte{byte(v)}
+}
+
+func Uint16Attr(v uint16) []byte {
+ native := NativeEndian()
+ bytes := make([]byte, 2)
+ native.PutUint16(bytes, v)
+ return bytes
+}
+
+func Uint32Attr(v uint32) []byte {
+ native := NativeEndian()
+ bytes := make([]byte, 4)
+ native.PutUint32(bytes, v)
+ return bytes
+}
+
+func Uint64Attr(v uint64) []byte {
+ native := NativeEndian()
+ bytes := make([]byte, 8)
+ native.PutUint64(bytes, v)
+ return bytes
+}
+
+func ParseRouteAttr(b []byte) ([]syscall.NetlinkRouteAttr, error) {
+ var attrs []syscall.NetlinkRouteAttr
+ for len(b) >= syscall.SizeofRtAttr {
+ a, vbuf, alen, err := netlinkRouteAttrAndValue(b)
+ if err != nil {
+ return nil, err
+ }
+ ra := syscall.NetlinkRouteAttr{Attr: *a, Value: vbuf[:int(a.Len)-syscall.SizeofRtAttr]}
+ attrs = append(attrs, ra)
+ b = b[alen:]
+ }
+ return attrs, nil
+}
+
+func netlinkRouteAttrAndValue(b []byte) (*syscall.RtAttr, []byte, int, error) {
+ a := (*syscall.RtAttr)(unsafe.Pointer(&b[0]))
+ if int(a.Len) < syscall.SizeofRtAttr || int(a.Len) > len(b) {
+ return nil, nil, 0, syscall.EINVAL
+ }
+ return a, b[syscall.SizeofRtAttr:], rtaAlignOf(int(a.Len)), nil
+}
+
+// SocketHandle contains the netlink socket and the associated
+// sequence counter for a specific netlink family
+type SocketHandle struct {
+ Seq uint32
+ Socket *NetlinkSocket
+}
+
+// Close closes the netlink socket
+func (sh *SocketHandle) Close() {
+ if sh.Socket != nil {
+ sh.Socket.Close()
+ }
+}
diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_unspecified.go b/vendor/github.com/vishvananda/netlink/nl/nl_unspecified.go
new file mode 100644
index 000000000..dfc0be660
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/nl_unspecified.go
@@ -0,0 +1,11 @@
+// +build !linux
+
+package nl
+
+import "encoding/binary"
+
+var SupportedNlFamilies = []int{}
+
+func NativeEndian() binary.ByteOrder {
+ return nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/nl/route_linux.go b/vendor/github.com/vishvananda/netlink/nl/route_linux.go
new file mode 100644
index 000000000..1a064d65d
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/route_linux.go
@@ -0,0 +1,80 @@
+package nl
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+type RtMsg struct {
+ syscall.RtMsg
+}
+
+func NewRtMsg() *RtMsg {
+ return &RtMsg{
+ RtMsg: syscall.RtMsg{
+ Table: syscall.RT_TABLE_MAIN,
+ Scope: syscall.RT_SCOPE_UNIVERSE,
+ Protocol: syscall.RTPROT_BOOT,
+ Type: syscall.RTN_UNICAST,
+ },
+ }
+}
+
+func NewRtDelMsg() *RtMsg {
+ return &RtMsg{
+ RtMsg: syscall.RtMsg{
+ Table: syscall.RT_TABLE_MAIN,
+ Scope: syscall.RT_SCOPE_NOWHERE,
+ },
+ }
+}
+
+func (msg *RtMsg) Len() int {
+ return syscall.SizeofRtMsg
+}
+
+func DeserializeRtMsg(b []byte) *RtMsg {
+ return (*RtMsg)(unsafe.Pointer(&b[0:syscall.SizeofRtMsg][0]))
+}
+
+func (msg *RtMsg) Serialize() []byte {
+ return (*(*[syscall.SizeofRtMsg]byte)(unsafe.Pointer(msg)))[:]
+}
+
+type RtNexthop struct {
+ syscall.RtNexthop
+ Children []NetlinkRequestData
+}
+
+func DeserializeRtNexthop(b []byte) *RtNexthop {
+ return (*RtNexthop)(unsafe.Pointer(&b[0:syscall.SizeofRtNexthop][0]))
+}
+
+func (msg *RtNexthop) Len() int {
+ if len(msg.Children) == 0 {
+ return syscall.SizeofRtNexthop
+ }
+
+ l := 0
+ for _, child := range msg.Children {
+ l += rtaAlignOf(child.Len())
+ }
+ l += syscall.SizeofRtNexthop
+ return rtaAlignOf(l)
+}
+
+func (msg *RtNexthop) Serialize() []byte {
+ length := msg.Len()
+ msg.RtNexthop.Len = uint16(length)
+ buf := make([]byte, length)
+ copy(buf, (*(*[syscall.SizeofRtNexthop]byte)(unsafe.Pointer(msg)))[:])
+ next := rtaAlignOf(syscall.SizeofRtNexthop)
+ if len(msg.Children) > 0 {
+ for _, child := range msg.Children {
+ childBuf := child.Serialize()
+ copy(buf[next:], childBuf)
+ next += rtaAlignOf(len(childBuf))
+ }
+ }
+ return buf
+}
diff --git a/vendor/github.com/vishvananda/netlink/nl/syscall.go b/vendor/github.com/vishvananda/netlink/nl/syscall.go
new file mode 100644
index 000000000..3473e5363
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/syscall.go
@@ -0,0 +1,68 @@
+package nl
+
+// syscall package lack of rule atributes type.
+// Thus there are defined below
+const (
+ FRA_UNSPEC = iota
+ FRA_DST /* destination address */
+ FRA_SRC /* source address */
+ FRA_IIFNAME /* interface name */
+ FRA_GOTO /* target to jump to (FR_ACT_GOTO) */
+ FRA_UNUSED2
+ FRA_PRIORITY /* priority/preference */
+ FRA_UNUSED3
+ FRA_UNUSED4
+ FRA_UNUSED5
+ FRA_FWMARK /* mark */
+ FRA_FLOW /* flow/class id */
+ FRA_TUN_ID
+ FRA_SUPPRESS_IFGROUP
+ FRA_SUPPRESS_PREFIXLEN
+ FRA_TABLE /* Extended table id */
+ FRA_FWMASK /* mask for netfilter mark */
+ FRA_OIFNAME
+)
+
+// ip rule netlink request types
+const (
+ FR_ACT_UNSPEC = iota
+ FR_ACT_TO_TBL /* Pass to fixed table */
+ FR_ACT_GOTO /* Jump to another rule */
+ FR_ACT_NOP /* No operation */
+ FR_ACT_RES3
+ FR_ACT_RES4
+ FR_ACT_BLACKHOLE /* Drop without notification */
+ FR_ACT_UNREACHABLE /* Drop with ENETUNREACH */
+ FR_ACT_PROHIBIT /* Drop with EACCES */
+)
+
+// socket diags related
+const (
+ SOCK_DIAG_BY_FAMILY = 20 /* linux.sock_diag.h */
+ TCPDIAG_NOCOOKIE = 0xFFFFFFFF /* TCPDIAG_NOCOOKIE in net/ipv4/tcp_diag.h*/
+)
+
+const (
+ AF_MPLS = 28
+)
+
+const (
+ RTA_NEWDST = 0x13
+ RTA_ENCAP_TYPE = 0x15
+ RTA_ENCAP = 0x16
+)
+
+// RTA_ENCAP subtype
+const (
+ MPLS_IPTUNNEL_UNSPEC = iota
+ MPLS_IPTUNNEL_DST
+)
+
+// light weight tunnel encap types
+const (
+ LWTUNNEL_ENCAP_NONE = iota
+ LWTUNNEL_ENCAP_MPLS
+ LWTUNNEL_ENCAP_IP
+ LWTUNNEL_ENCAP_ILA
+ LWTUNNEL_ENCAP_IP6
+)
diff --git a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go
new file mode 100644
index 000000000..e91fb21c5
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go
@@ -0,0 +1,675 @@
+package nl
+
+import (
+ "unsafe"
+)
+
+// LinkLayer
+const (
+ LINKLAYER_UNSPEC = iota
+ LINKLAYER_ETHERNET
+ LINKLAYER_ATM
+)
+
+// ATM
+const (
+ ATM_CELL_PAYLOAD = 48
+ ATM_CELL_SIZE = 53
+)
+
+const TC_LINKLAYER_MASK = 0x0F
+
+// Police
+const (
+ TCA_POLICE_UNSPEC = iota
+ TCA_POLICE_TBF
+ TCA_POLICE_RATE
+ TCA_POLICE_PEAKRATE
+ TCA_POLICE_AVRATE
+ TCA_POLICE_RESULT
+ TCA_POLICE_MAX = TCA_POLICE_RESULT
+)
+
+// Message types
+const (
+ TCA_UNSPEC = iota
+ TCA_KIND
+ TCA_OPTIONS
+ TCA_STATS
+ TCA_XSTATS
+ TCA_RATE
+ TCA_FCNT
+ TCA_STATS2
+ TCA_STAB
+ TCA_MAX = TCA_STAB
+)
+
+const (
+ TCA_ACT_TAB = 1
+ TCAA_MAX = 1
+)
+
+const (
+ TCA_ACT_UNSPEC = iota
+ TCA_ACT_KIND
+ TCA_ACT_OPTIONS
+ TCA_ACT_INDEX
+ TCA_ACT_STATS
+ TCA_ACT_MAX
+)
+
+const (
+ TCA_PRIO_UNSPEC = iota
+ TCA_PRIO_MQ
+ TCA_PRIO_MAX = TCA_PRIO_MQ
+)
+
+const (
+ SizeofTcMsg = 0x14
+ SizeofTcActionMsg = 0x04
+ SizeofTcPrioMap = 0x14
+ SizeofTcRateSpec = 0x0c
+ SizeofTcNetemQopt = 0x18
+ SizeofTcNetemCorr = 0x0c
+ SizeofTcNetemReorder = 0x08
+ SizeofTcNetemCorrupt = 0x08
+ SizeofTcTbfQopt = 2*SizeofTcRateSpec + 0x0c
+ SizeofTcHtbCopt = 2*SizeofTcRateSpec + 0x14
+ SizeofTcHtbGlob = 0x14
+ SizeofTcU32Key = 0x10
+ SizeofTcU32Sel = 0x10 // without keys
+ SizeofTcGen = 0x14
+ SizeofTcMirred = SizeofTcGen + 0x08
+ SizeofTcPolice = 2*SizeofTcRateSpec + 0x20
+)
+
+// struct tcmsg {
+// unsigned char tcm_family;
+// unsigned char tcm__pad1;
+// unsigned short tcm__pad2;
+// int tcm_ifindex;
+// __u32 tcm_handle;
+// __u32 tcm_parent;
+// __u32 tcm_info;
+// };
+
+type TcMsg struct {
+ Family uint8
+ Pad [3]byte
+ Ifindex int32
+ Handle uint32
+ Parent uint32
+ Info uint32
+}
+
+func (msg *TcMsg) Len() int {
+ return SizeofTcMsg
+}
+
+func DeserializeTcMsg(b []byte) *TcMsg {
+ return (*TcMsg)(unsafe.Pointer(&b[0:SizeofTcMsg][0]))
+}
+
+func (x *TcMsg) Serialize() []byte {
+ return (*(*[SizeofTcMsg]byte)(unsafe.Pointer(x)))[:]
+}
+
+// struct tcamsg {
+// unsigned char tca_family;
+// unsigned char tca__pad1;
+// unsigned short tca__pad2;
+// };
+
+type TcActionMsg struct {
+ Family uint8
+ Pad [3]byte
+}
+
+func (msg *TcActionMsg) Len() int {
+ return SizeofTcActionMsg
+}
+
+func DeserializeTcActionMsg(b []byte) *TcActionMsg {
+ return (*TcActionMsg)(unsafe.Pointer(&b[0:SizeofTcActionMsg][0]))
+}
+
+func (x *TcActionMsg) Serialize() []byte {
+ return (*(*[SizeofTcActionMsg]byte)(unsafe.Pointer(x)))[:]
+}
+
+const (
+ TC_PRIO_MAX = 15
+)
+
+// struct tc_prio_qopt {
+// int bands; /* Number of bands */
+// __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
+// };
+
+type TcPrioMap struct {
+ Bands int32
+ Priomap [TC_PRIO_MAX + 1]uint8
+}
+
+func (msg *TcPrioMap) Len() int {
+ return SizeofTcPrioMap
+}
+
+func DeserializeTcPrioMap(b []byte) *TcPrioMap {
+ return (*TcPrioMap)(unsafe.Pointer(&b[0:SizeofTcPrioMap][0]))
+}
+
+func (x *TcPrioMap) Serialize() []byte {
+ return (*(*[SizeofTcPrioMap]byte)(unsafe.Pointer(x)))[:]
+}
+
+const (
+ TCA_TBF_UNSPEC = iota
+ TCA_TBF_PARMS
+ TCA_TBF_RTAB
+ TCA_TBF_PTAB
+ TCA_TBF_RATE64
+ TCA_TBF_PRATE64
+ TCA_TBF_BURST
+ TCA_TBF_PBURST
+ TCA_TBF_MAX = TCA_TBF_PBURST
+)
+
+// struct tc_ratespec {
+// unsigned char cell_log;
+// __u8 linklayer; /* lower 4 bits */
+// unsigned short overhead;
+// short cell_align;
+// unsigned short mpu;
+// __u32 rate;
+// };
+
+type TcRateSpec struct {
+ CellLog uint8
+ Linklayer uint8
+ Overhead uint16
+ CellAlign int16
+ Mpu uint16
+ Rate uint32
+}
+
+func (msg *TcRateSpec) Len() int {
+ return SizeofTcRateSpec
+}
+
+func DeserializeTcRateSpec(b []byte) *TcRateSpec {
+ return (*TcRateSpec)(unsafe.Pointer(&b[0:SizeofTcRateSpec][0]))
+}
+
+func (x *TcRateSpec) Serialize() []byte {
+ return (*(*[SizeofTcRateSpec]byte)(unsafe.Pointer(x)))[:]
+}
+
+/**
+* NETEM
+ */
+
+const (
+ TCA_NETEM_UNSPEC = iota
+ TCA_NETEM_CORR
+ TCA_NETEM_DELAY_DIST
+ TCA_NETEM_REORDER
+ TCA_NETEM_CORRUPT
+ TCA_NETEM_LOSS
+ TCA_NETEM_RATE
+ TCA_NETEM_ECN
+ TCA_NETEM_RATE64
+ TCA_NETEM_MAX = TCA_NETEM_RATE64
+)
+
+// struct tc_netem_qopt {
+// __u32 latency; /* added delay (us) */
+// __u32 limit; /* fifo limit (packets) */
+// __u32 loss; /* random packet loss (0=none ~0=100%) */
+// __u32 gap; /* re-ordering gap (0 for none) */
+// __u32 duplicate; /* random packet dup (0=none ~0=100%) */
+// __u32 jitter; /* random jitter in latency (us) */
+// };
+
+type TcNetemQopt struct {
+ Latency uint32
+ Limit uint32
+ Loss uint32
+ Gap uint32
+ Duplicate uint32
+ Jitter uint32
+}
+
+func (msg *TcNetemQopt) Len() int {
+ return SizeofTcNetemQopt
+}
+
+func DeserializeTcNetemQopt(b []byte) *TcNetemQopt {
+ return (*TcNetemQopt)(unsafe.Pointer(&b[0:SizeofTcNetemQopt][0]))
+}
+
+func (x *TcNetemQopt) Serialize() []byte {
+ return (*(*[SizeofTcNetemQopt]byte)(unsafe.Pointer(x)))[:]
+}
+
+// struct tc_netem_corr {
+// __u32 delay_corr; /* delay correlation */
+// __u32 loss_corr; /* packet loss correlation */
+// __u32 dup_corr; /* duplicate correlation */
+// };
+
+type TcNetemCorr struct {
+ DelayCorr uint32
+ LossCorr uint32
+ DupCorr uint32
+}
+
+func (msg *TcNetemCorr) Len() int {
+ return SizeofTcNetemCorr
+}
+
+func DeserializeTcNetemCorr(b []byte) *TcNetemCorr {
+ return (*TcNetemCorr)(unsafe.Pointer(&b[0:SizeofTcNetemCorr][0]))
+}
+
+func (x *TcNetemCorr) Serialize() []byte {
+ return (*(*[SizeofTcNetemCorr]byte)(unsafe.Pointer(x)))[:]
+}
+
+// struct tc_netem_reorder {
+// __u32 probability;
+// __u32 correlation;
+// };
+
+type TcNetemReorder struct {
+ Probability uint32
+ Correlation uint32
+}
+
+func (msg *TcNetemReorder) Len() int {
+ return SizeofTcNetemReorder
+}
+
+func DeserializeTcNetemReorder(b []byte) *TcNetemReorder {
+ return (*TcNetemReorder)(unsafe.Pointer(&b[0:SizeofTcNetemReorder][0]))
+}
+
+func (x *TcNetemReorder) Serialize() []byte {
+ return (*(*[SizeofTcNetemReorder]byte)(unsafe.Pointer(x)))[:]
+}
+
+// struct tc_netem_corrupt {
+// __u32 probability;
+// __u32 correlation;
+// };
+
+type TcNetemCorrupt struct {
+ Probability uint32
+ Correlation uint32
+}
+
+func (msg *TcNetemCorrupt) Len() int {
+ return SizeofTcNetemCorrupt
+}
+
+func DeserializeTcNetemCorrupt(b []byte) *TcNetemCorrupt {
+ return (*TcNetemCorrupt)(unsafe.Pointer(&b[0:SizeofTcNetemCorrupt][0]))
+}
+
+func (x *TcNetemCorrupt) Serialize() []byte {
+ return (*(*[SizeofTcNetemCorrupt]byte)(unsafe.Pointer(x)))[:]
+}
+
+// struct tc_tbf_qopt {
+// struct tc_ratespec rate;
+// struct tc_ratespec peakrate;
+// __u32 limit;
+// __u32 buffer;
+// __u32 mtu;
+// };
+
+type TcTbfQopt struct {
+ Rate TcRateSpec
+ Peakrate TcRateSpec
+ Limit uint32
+ Buffer uint32
+ Mtu uint32
+}
+
+func (msg *TcTbfQopt) Len() int {
+ return SizeofTcTbfQopt
+}
+
+func DeserializeTcTbfQopt(b []byte) *TcTbfQopt {
+ return (*TcTbfQopt)(unsafe.Pointer(&b[0:SizeofTcTbfQopt][0]))
+}
+
+func (x *TcTbfQopt) Serialize() []byte {
+ return (*(*[SizeofTcTbfQopt]byte)(unsafe.Pointer(x)))[:]
+}
+
+const (
+ TCA_HTB_UNSPEC = iota
+ TCA_HTB_PARMS
+ TCA_HTB_INIT
+ TCA_HTB_CTAB
+ TCA_HTB_RTAB
+ TCA_HTB_DIRECT_QLEN
+ TCA_HTB_RATE64
+ TCA_HTB_CEIL64
+ TCA_HTB_MAX = TCA_HTB_CEIL64
+)
+
+//struct tc_htb_opt {
+// struct tc_ratespec rate;
+// struct tc_ratespec ceil;
+// __u32 buffer;
+// __u32 cbuffer;
+// __u32 quantum;
+// __u32 level; /* out only */
+// __u32 prio;
+//};
+
+type TcHtbCopt struct {
+ Rate TcRateSpec
+ Ceil TcRateSpec
+ Buffer uint32
+ Cbuffer uint32
+ Quantum uint32
+ Level uint32
+ Prio uint32
+}
+
+func (msg *TcHtbCopt) Len() int {
+ return SizeofTcHtbCopt
+}
+
+func DeserializeTcHtbCopt(b []byte) *TcHtbCopt {
+ return (*TcHtbCopt)(unsafe.Pointer(&b[0:SizeofTcHtbCopt][0]))
+}
+
+func (x *TcHtbCopt) Serialize() []byte {
+ return (*(*[SizeofTcHtbCopt]byte)(unsafe.Pointer(x)))[:]
+}
+
+type TcHtbGlob struct {
+ Version uint32
+ Rate2Quantum uint32
+ Defcls uint32
+ Debug uint32
+ DirectPkts uint32
+}
+
+func (msg *TcHtbGlob) Len() int {
+ return SizeofTcHtbGlob
+}
+
+func DeserializeTcHtbGlob(b []byte) *TcHtbGlob {
+ return (*TcHtbGlob)(unsafe.Pointer(&b[0:SizeofTcHtbGlob][0]))
+}
+
+func (x *TcHtbGlob) Serialize() []byte {
+ return (*(*[SizeofTcHtbGlob]byte)(unsafe.Pointer(x)))[:]
+}
+
+const (
+ TCA_U32_UNSPEC = iota
+ TCA_U32_CLASSID
+ TCA_U32_HASH
+ TCA_U32_LINK
+ TCA_U32_DIVISOR
+ TCA_U32_SEL
+ TCA_U32_POLICE
+ TCA_U32_ACT
+ TCA_U32_INDEV
+ TCA_U32_PCNT
+ TCA_U32_MARK
+ TCA_U32_MAX = TCA_U32_MARK
+)
+
+// struct tc_u32_key {
+// __be32 mask;
+// __be32 val;
+// int off;
+// int offmask;
+// };
+
+type TcU32Key struct {
+ Mask uint32 // big endian
+ Val uint32 // big endian
+ Off int32
+ OffMask int32
+}
+
+func (msg *TcU32Key) Len() int {
+ return SizeofTcU32Key
+}
+
+func DeserializeTcU32Key(b []byte) *TcU32Key {
+ return (*TcU32Key)(unsafe.Pointer(&b[0:SizeofTcU32Key][0]))
+}
+
+func (x *TcU32Key) Serialize() []byte {
+ return (*(*[SizeofTcU32Key]byte)(unsafe.Pointer(x)))[:]
+}
+
+// struct tc_u32_sel {
+// unsigned char flags;
+// unsigned char offshift;
+// unsigned char nkeys;
+//
+// __be16 offmask;
+// __u16 off;
+// short offoff;
+//
+// short hoff;
+// __be32 hmask;
+// struct tc_u32_key keys[0];
+// };
+
+const (
+ TC_U32_TERMINAL = 1 << iota
+ TC_U32_OFFSET = 1 << iota
+ TC_U32_VAROFFSET = 1 << iota
+ TC_U32_EAT = 1 << iota
+)
+
+type TcU32Sel struct {
+ Flags uint8
+ Offshift uint8
+ Nkeys uint8
+ Pad uint8
+ Offmask uint16 // big endian
+ Off uint16
+ Offoff int16
+ Hoff int16
+ Hmask uint32 // big endian
+ Keys []TcU32Key
+}
+
+func (msg *TcU32Sel) Len() int {
+ return SizeofTcU32Sel + int(msg.Nkeys)*SizeofTcU32Key
+}
+
+func DeserializeTcU32Sel(b []byte) *TcU32Sel {
+ x := &TcU32Sel{}
+ copy((*(*[SizeofTcU32Sel]byte)(unsafe.Pointer(x)))[:], b)
+ next := SizeofTcU32Sel
+ var i uint8
+ for i = 0; i < x.Nkeys; i++ {
+ x.Keys = append(x.Keys, *DeserializeTcU32Key(b[next:]))
+ next += SizeofTcU32Key
+ }
+ return x
+}
+
+func (x *TcU32Sel) Serialize() []byte {
+ // This can't just unsafe.cast because it must iterate through keys.
+ buf := make([]byte, x.Len())
+ copy(buf, (*(*[SizeofTcU32Sel]byte)(unsafe.Pointer(x)))[:])
+ next := SizeofTcU32Sel
+ for _, key := range x.Keys {
+ keyBuf := key.Serialize()
+ copy(buf[next:], keyBuf)
+ next += SizeofTcU32Key
+ }
+ return buf
+}
+
+type TcGen struct {
+ Index uint32
+ Capab uint32
+ Action int32
+ Refcnt int32
+ Bindcnt int32
+}
+
+func (msg *TcGen) Len() int {
+ return SizeofTcGen
+}
+
+func DeserializeTcGen(b []byte) *TcGen {
+ return (*TcGen)(unsafe.Pointer(&b[0:SizeofTcGen][0]))
+}
+
+func (x *TcGen) Serialize() []byte {
+ return (*(*[SizeofTcGen]byte)(unsafe.Pointer(x)))[:]
+}
+
+// #define tc_gen \
+// __u32 index; \
+// __u32 capab; \
+// int action; \
+// int refcnt; \
+// int bindcnt
+
+const (
+ TCA_ACT_GACT = 5
+)
+
+const (
+ TCA_GACT_UNSPEC = iota
+ TCA_GACT_TM
+ TCA_GACT_PARMS
+ TCA_GACT_PROB
+ TCA_GACT_MAX = TCA_GACT_PROB
+)
+
+type TcGact TcGen
+
+const (
+ TCA_ACT_BPF = 13
+)
+
+const (
+ TCA_ACT_BPF_UNSPEC = iota
+ TCA_ACT_BPF_TM
+ TCA_ACT_BPF_PARMS
+ TCA_ACT_BPF_OPS_LEN
+ TCA_ACT_BPF_OPS
+ TCA_ACT_BPF_FD
+ TCA_ACT_BPF_NAME
+ TCA_ACT_BPF_MAX = TCA_ACT_BPF_NAME
+)
+
+const (
+ TCA_BPF_FLAG_ACT_DIRECT uint32 = 1 << iota
+)
+
+const (
+ TCA_BPF_UNSPEC = iota
+ TCA_BPF_ACT
+ TCA_BPF_POLICE
+ TCA_BPF_CLASSID
+ TCA_BPF_OPS_LEN
+ TCA_BPF_OPS
+ TCA_BPF_FD
+ TCA_BPF_NAME
+ TCA_BPF_FLAGS
+ TCA_BPF_MAX = TCA_BPF_FLAGS
+)
+
+type TcBpf TcGen
+
+const (
+ TCA_ACT_MIRRED = 8
+)
+
+const (
+ TCA_MIRRED_UNSPEC = iota
+ TCA_MIRRED_TM
+ TCA_MIRRED_PARMS
+ TCA_MIRRED_MAX = TCA_MIRRED_PARMS
+)
+
+// struct tc_mirred {
+// tc_gen;
+// int eaction; /* one of IN/EGRESS_MIRROR/REDIR */
+// __u32 ifindex; /* ifindex of egress port */
+// };
+
+type TcMirred struct {
+ TcGen
+ Eaction int32
+ Ifindex uint32
+}
+
+func (msg *TcMirred) Len() int {
+ return SizeofTcMirred
+}
+
+func DeserializeTcMirred(b []byte) *TcMirred {
+ return (*TcMirred)(unsafe.Pointer(&b[0:SizeofTcMirred][0]))
+}
+
+func (x *TcMirred) Serialize() []byte {
+ return (*(*[SizeofTcMirred]byte)(unsafe.Pointer(x)))[:]
+}
+
+// struct tc_police {
+// __u32 index;
+// int action;
+// __u32 limit;
+// __u32 burst;
+// __u32 mtu;
+// struct tc_ratespec rate;
+// struct tc_ratespec peakrate;
+// int refcnt;
+// int bindcnt;
+// __u32 capab;
+// };
+
+type TcPolice struct {
+ Index uint32
+ Action int32
+ Limit uint32
+ Burst uint32
+ Mtu uint32
+ Rate TcRateSpec
+ PeakRate TcRateSpec
+ Refcnt int32
+ Bindcnt int32
+ Capab uint32
+}
+
+func (msg *TcPolice) Len() int {
+ return SizeofTcPolice
+}
+
+func DeserializeTcPolice(b []byte) *TcPolice {
+ return (*TcPolice)(unsafe.Pointer(&b[0:SizeofTcPolice][0]))
+}
+
+func (x *TcPolice) Serialize() []byte {
+ return (*(*[SizeofTcPolice]byte)(unsafe.Pointer(x)))[:]
+}
+
+const (
+ TCA_FW_UNSPEC = iota
+ TCA_FW_CLASSID
+ TCA_FW_POLICE
+ TCA_FW_INDEV
+ TCA_FW_ACT
+ TCA_FW_MASK
+ TCA_FW_MAX = TCA_FW_MASK
+)
diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go
new file mode 100644
index 000000000..09a2ffa10
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go
@@ -0,0 +1,296 @@
+package nl
+
+import (
+ "bytes"
+ "net"
+ "unsafe"
+)
+
+// Infinity for packet and byte counts
+const (
+ XFRM_INF = ^uint64(0)
+)
+
+type XfrmMsgType uint8
+
+type XfrmMsg interface {
+ Type() XfrmMsgType
+}
+
+// Message Types
+const (
+ XFRM_MSG_BASE XfrmMsgType = 0x10
+ XFRM_MSG_NEWSA = 0x10
+ XFRM_MSG_DELSA = 0x11
+ XFRM_MSG_GETSA = 0x12
+ XFRM_MSG_NEWPOLICY = 0x13
+ XFRM_MSG_DELPOLICY = 0x14
+ XFRM_MSG_GETPOLICY = 0x15
+ XFRM_MSG_ALLOCSPI = 0x16
+ XFRM_MSG_ACQUIRE = 0x17
+ XFRM_MSG_EXPIRE = 0x18
+ XFRM_MSG_UPDPOLICY = 0x19
+ XFRM_MSG_UPDSA = 0x1a
+ XFRM_MSG_POLEXPIRE = 0x1b
+ XFRM_MSG_FLUSHSA = 0x1c
+ XFRM_MSG_FLUSHPOLICY = 0x1d
+ XFRM_MSG_NEWAE = 0x1e
+ XFRM_MSG_GETAE = 0x1f
+ XFRM_MSG_REPORT = 0x20
+ XFRM_MSG_MIGRATE = 0x21
+ XFRM_MSG_NEWSADINFO = 0x22
+ XFRM_MSG_GETSADINFO = 0x23
+ XFRM_MSG_NEWSPDINFO = 0x24
+ XFRM_MSG_GETSPDINFO = 0x25
+ XFRM_MSG_MAPPING = 0x26
+ XFRM_MSG_MAX = 0x26
+ XFRM_NR_MSGTYPES = 0x17
+)
+
+// Attribute types
+const (
+ /* Netlink message attributes. */
+ XFRMA_UNSPEC = 0x00
+ XFRMA_ALG_AUTH = 0x01 /* struct xfrm_algo */
+ XFRMA_ALG_CRYPT = 0x02 /* struct xfrm_algo */
+ XFRMA_ALG_COMP = 0x03 /* struct xfrm_algo */
+ XFRMA_ENCAP = 0x04 /* struct xfrm_algo + struct xfrm_encap_tmpl */
+ XFRMA_TMPL = 0x05 /* 1 or more struct xfrm_user_tmpl */
+ XFRMA_SA = 0x06 /* struct xfrm_usersa_info */
+ XFRMA_POLICY = 0x07 /* struct xfrm_userpolicy_info */
+ XFRMA_SEC_CTX = 0x08 /* struct xfrm_sec_ctx */
+ XFRMA_LTIME_VAL = 0x09
+ XFRMA_REPLAY_VAL = 0x0a
+ XFRMA_REPLAY_THRESH = 0x0b
+ XFRMA_ETIMER_THRESH = 0x0c
+ XFRMA_SRCADDR = 0x0d /* xfrm_address_t */
+ XFRMA_COADDR = 0x0e /* xfrm_address_t */
+ XFRMA_LASTUSED = 0x0f /* unsigned long */
+ XFRMA_POLICY_TYPE = 0x10 /* struct xfrm_userpolicy_type */
+ XFRMA_MIGRATE = 0x11
+ XFRMA_ALG_AEAD = 0x12 /* struct xfrm_algo_aead */
+ XFRMA_KMADDRESS = 0x13 /* struct xfrm_user_kmaddress */
+ XFRMA_ALG_AUTH_TRUNC = 0x14 /* struct xfrm_algo_auth */
+ XFRMA_MARK = 0x15 /* struct xfrm_mark */
+ XFRMA_TFCPAD = 0x16 /* __u32 */
+ XFRMA_REPLAY_ESN_VAL = 0x17 /* struct xfrm_replay_esn */
+ XFRMA_SA_EXTRA_FLAGS = 0x18 /* __u32 */
+ XFRMA_MAX = 0x18
+)
+
+const (
+ SizeofXfrmAddress = 0x10
+ SizeofXfrmSelector = 0x38
+ SizeofXfrmLifetimeCfg = 0x40
+ SizeofXfrmLifetimeCur = 0x20
+ SizeofXfrmId = 0x18
+ SizeofXfrmMark = 0x08
+)
+
+// Netlink groups
+const (
+ XFRMNLGRP_NONE = 0x0
+ XFRMNLGRP_ACQUIRE = 0x1
+ XFRMNLGRP_EXPIRE = 0x2
+ XFRMNLGRP_SA = 0x3
+ XFRMNLGRP_POLICY = 0x4
+ XFRMNLGRP_AEVENTS = 0x5
+ XFRMNLGRP_REPORT = 0x6
+ XFRMNLGRP_MIGRATE = 0x7
+ XFRMNLGRP_MAPPING = 0x8
+ __XFRMNLGRP_MAX = 0x9
+)
+
+// typedef union {
+// __be32 a4;
+// __be32 a6[4];
+// } xfrm_address_t;
+
+type XfrmAddress [SizeofXfrmAddress]byte
+
+func (x *XfrmAddress) ToIP() net.IP {
+ var empty = [12]byte{}
+ ip := make(net.IP, net.IPv6len)
+ if bytes.Equal(x[4:16], empty[:]) {
+ ip[10] = 0xff
+ ip[11] = 0xff
+ copy(ip[12:16], x[0:4])
+ } else {
+ copy(ip[:], x[:])
+ }
+ return ip
+}
+
+func (x *XfrmAddress) ToIPNet(prefixlen uint8) *net.IPNet {
+ ip := x.ToIP()
+ if GetIPFamily(ip) == FAMILY_V4 {
+ return &net.IPNet{IP: ip, Mask: net.CIDRMask(int(prefixlen), 32)}
+ }
+ return &net.IPNet{IP: ip, Mask: net.CIDRMask(int(prefixlen), 128)}
+}
+
+func (x *XfrmAddress) FromIP(ip net.IP) {
+ var empty = [16]byte{}
+ if len(ip) < net.IPv4len {
+ copy(x[4:16], empty[:])
+ } else if GetIPFamily(ip) == FAMILY_V4 {
+ copy(x[0:4], ip.To4()[0:4])
+ copy(x[4:16], empty[:12])
+ } else {
+ copy(x[0:16], ip.To16()[0:16])
+ }
+}
+
+func DeserializeXfrmAddress(b []byte) *XfrmAddress {
+ return (*XfrmAddress)(unsafe.Pointer(&b[0:SizeofXfrmAddress][0]))
+}
+
+func (x *XfrmAddress) Serialize() []byte {
+ return (*(*[SizeofXfrmAddress]byte)(unsafe.Pointer(x)))[:]
+}
+
+// struct xfrm_selector {
+// xfrm_address_t daddr;
+// xfrm_address_t saddr;
+// __be16 dport;
+// __be16 dport_mask;
+// __be16 sport;
+// __be16 sport_mask;
+// __u16 family;
+// __u8 prefixlen_d;
+// __u8 prefixlen_s;
+// __u8 proto;
+// int ifindex;
+// __kernel_uid32_t user;
+// };
+
+type XfrmSelector struct {
+ Daddr XfrmAddress
+ Saddr XfrmAddress
+ Dport uint16 // big endian
+ DportMask uint16 // big endian
+ Sport uint16 // big endian
+ SportMask uint16 // big endian
+ Family uint16
+ PrefixlenD uint8
+ PrefixlenS uint8
+ Proto uint8
+ Pad [3]byte
+ Ifindex int32
+ User uint32
+}
+
+func (msg *XfrmSelector) Len() int {
+ return SizeofXfrmSelector
+}
+
+func DeserializeXfrmSelector(b []byte) *XfrmSelector {
+ return (*XfrmSelector)(unsafe.Pointer(&b[0:SizeofXfrmSelector][0]))
+}
+
+func (msg *XfrmSelector) Serialize() []byte {
+ return (*(*[SizeofXfrmSelector]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_lifetime_cfg {
+// __u64 soft_byte_limit;
+// __u64 hard_byte_limit;
+// __u64 soft_packet_limit;
+// __u64 hard_packet_limit;
+// __u64 soft_add_expires_seconds;
+// __u64 hard_add_expires_seconds;
+// __u64 soft_use_expires_seconds;
+// __u64 hard_use_expires_seconds;
+// };
+//
+
+type XfrmLifetimeCfg struct {
+ SoftByteLimit uint64
+ HardByteLimit uint64
+ SoftPacketLimit uint64
+ HardPacketLimit uint64
+ SoftAddExpiresSeconds uint64
+ HardAddExpiresSeconds uint64
+ SoftUseExpiresSeconds uint64
+ HardUseExpiresSeconds uint64
+}
+
+func (msg *XfrmLifetimeCfg) Len() int {
+ return SizeofXfrmLifetimeCfg
+}
+
+func DeserializeXfrmLifetimeCfg(b []byte) *XfrmLifetimeCfg {
+ return (*XfrmLifetimeCfg)(unsafe.Pointer(&b[0:SizeofXfrmLifetimeCfg][0]))
+}
+
+func (msg *XfrmLifetimeCfg) Serialize() []byte {
+ return (*(*[SizeofXfrmLifetimeCfg]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_lifetime_cur {
+// __u64 bytes;
+// __u64 packets;
+// __u64 add_time;
+// __u64 use_time;
+// };
+
+type XfrmLifetimeCur struct {
+ Bytes uint64
+ Packets uint64
+ AddTime uint64
+ UseTime uint64
+}
+
+func (msg *XfrmLifetimeCur) Len() int {
+ return SizeofXfrmLifetimeCur
+}
+
+func DeserializeXfrmLifetimeCur(b []byte) *XfrmLifetimeCur {
+ return (*XfrmLifetimeCur)(unsafe.Pointer(&b[0:SizeofXfrmLifetimeCur][0]))
+}
+
+func (msg *XfrmLifetimeCur) Serialize() []byte {
+ return (*(*[SizeofXfrmLifetimeCur]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_id {
+// xfrm_address_t daddr;
+// __be32 spi;
+// __u8 proto;
+// };
+
+type XfrmId struct {
+ Daddr XfrmAddress
+ Spi uint32 // big endian
+ Proto uint8
+ Pad [3]byte
+}
+
+func (msg *XfrmId) Len() int {
+ return SizeofXfrmId
+}
+
+func DeserializeXfrmId(b []byte) *XfrmId {
+ return (*XfrmId)(unsafe.Pointer(&b[0:SizeofXfrmId][0]))
+}
+
+func (msg *XfrmId) Serialize() []byte {
+ return (*(*[SizeofXfrmId]byte)(unsafe.Pointer(msg)))[:]
+}
+
+type XfrmMark struct {
+ Value uint32
+ Mask uint32
+}
+
+func (msg *XfrmMark) Len() int {
+ return SizeofXfrmMark
+}
+
+func DeserializeXfrmMark(b []byte) *XfrmMark {
+ return (*XfrmMark)(unsafe.Pointer(&b[0:SizeofXfrmMark][0]))
+}
+
+func (msg *XfrmMark) Serialize() []byte {
+ return (*(*[SizeofXfrmMark]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_monitor_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_monitor_linux.go
new file mode 100644
index 000000000..715df4cc5
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_monitor_linux.go
@@ -0,0 +1,32 @@
+package nl
+
+import (
+ "unsafe"
+)
+
+const (
+ SizeofXfrmUserExpire = 0xe8
+)
+
+// struct xfrm_user_expire {
+// struct xfrm_usersa_info state;
+// __u8 hard;
+// };
+
+type XfrmUserExpire struct {
+ XfrmUsersaInfo XfrmUsersaInfo
+ Hard uint8
+ Pad [7]byte
+}
+
+func (msg *XfrmUserExpire) Len() int {
+ return SizeofXfrmUserExpire
+}
+
+func DeserializeXfrmUserExpire(b []byte) *XfrmUserExpire {
+ return (*XfrmUserExpire)(unsafe.Pointer(&b[0:SizeofXfrmUserExpire][0]))
+}
+
+func (msg *XfrmUserExpire) Serialize() []byte {
+ return (*(*[SizeofXfrmUserExpire]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go
new file mode 100644
index 000000000..66f7e03d2
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go
@@ -0,0 +1,119 @@
+package nl
+
+import (
+ "unsafe"
+)
+
+const (
+ SizeofXfrmUserpolicyId = 0x40
+ SizeofXfrmUserpolicyInfo = 0xa8
+ SizeofXfrmUserTmpl = 0x40
+)
+
+// struct xfrm_userpolicy_id {
+// struct xfrm_selector sel;
+// __u32 index;
+// __u8 dir;
+// };
+//
+
+type XfrmUserpolicyId struct {
+ Sel XfrmSelector
+ Index uint32
+ Dir uint8
+ Pad [3]byte
+}
+
+func (msg *XfrmUserpolicyId) Len() int {
+ return SizeofXfrmUserpolicyId
+}
+
+func DeserializeXfrmUserpolicyId(b []byte) *XfrmUserpolicyId {
+ return (*XfrmUserpolicyId)(unsafe.Pointer(&b[0:SizeofXfrmUserpolicyId][0]))
+}
+
+func (msg *XfrmUserpolicyId) Serialize() []byte {
+ return (*(*[SizeofXfrmUserpolicyId]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_userpolicy_info {
+// struct xfrm_selector sel;
+// struct xfrm_lifetime_cfg lft;
+// struct xfrm_lifetime_cur curlft;
+// __u32 priority;
+// __u32 index;
+// __u8 dir;
+// __u8 action;
+// #define XFRM_POLICY_ALLOW 0
+// #define XFRM_POLICY_BLOCK 1
+// __u8 flags;
+// #define XFRM_POLICY_LOCALOK 1 /* Allow user to override global policy */
+// /* Automatically expand selector to include matching ICMP payloads. */
+// #define XFRM_POLICY_ICMP 2
+// __u8 share;
+// };
+
+type XfrmUserpolicyInfo struct {
+ Sel XfrmSelector
+ Lft XfrmLifetimeCfg
+ Curlft XfrmLifetimeCur
+ Priority uint32
+ Index uint32
+ Dir uint8
+ Action uint8
+ Flags uint8
+ Share uint8
+ Pad [4]byte
+}
+
+func (msg *XfrmUserpolicyInfo) Len() int {
+ return SizeofXfrmUserpolicyInfo
+}
+
+func DeserializeXfrmUserpolicyInfo(b []byte) *XfrmUserpolicyInfo {
+ return (*XfrmUserpolicyInfo)(unsafe.Pointer(&b[0:SizeofXfrmUserpolicyInfo][0]))
+}
+
+func (msg *XfrmUserpolicyInfo) Serialize() []byte {
+ return (*(*[SizeofXfrmUserpolicyInfo]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_user_tmpl {
+// struct xfrm_id id;
+// __u16 family;
+// xfrm_address_t saddr;
+// __u32 reqid;
+// __u8 mode;
+// __u8 share;
+// __u8 optional;
+// __u32 aalgos;
+// __u32 ealgos;
+// __u32 calgos;
+// }
+
+type XfrmUserTmpl struct {
+ XfrmId XfrmId
+ Family uint16
+ Pad1 [2]byte
+ Saddr XfrmAddress
+ Reqid uint32
+ Mode uint8
+ Share uint8
+ Optional uint8
+ Pad2 byte
+ Aalgos uint32
+ Ealgos uint32
+ Calgos uint32
+}
+
+func (msg *XfrmUserTmpl) Len() int {
+ return SizeofXfrmUserTmpl
+}
+
+func DeserializeXfrmUserTmpl(b []byte) *XfrmUserTmpl {
+ return (*XfrmUserTmpl)(unsafe.Pointer(&b[0:SizeofXfrmUserTmpl][0]))
+}
+
+func (msg *XfrmUserTmpl) Serialize() []byte {
+ return (*(*[SizeofXfrmUserTmpl]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go
new file mode 100644
index 000000000..b6290fd54
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go
@@ -0,0 +1,334 @@
+package nl
+
+import (
+ "unsafe"
+)
+
+const (
+ SizeofXfrmUsersaId = 0x18
+ SizeofXfrmStats = 0x0c
+ SizeofXfrmUsersaInfo = 0xe0
+ SizeofXfrmUserSpiInfo = 0xe8
+ SizeofXfrmAlgo = 0x44
+ SizeofXfrmAlgoAuth = 0x48
+ SizeofXfrmAlgoAEAD = 0x48
+ SizeofXfrmEncapTmpl = 0x18
+ SizeofXfrmUsersaFlush = 0x8
+ SizeofXfrmReplayStateEsn = 0x18
+)
+
+const (
+ XFRM_STATE_NOECN = 1
+ XFRM_STATE_DECAP_DSCP = 2
+ XFRM_STATE_NOPMTUDISC = 4
+ XFRM_STATE_WILDRECV = 8
+ XFRM_STATE_ICMP = 16
+ XFRM_STATE_AF_UNSPEC = 32
+ XFRM_STATE_ALIGN4 = 64
+ XFRM_STATE_ESN = 128
+)
+
+// struct xfrm_usersa_id {
+// xfrm_address_t daddr;
+// __be32 spi;
+// __u16 family;
+// __u8 proto;
+// };
+
+type XfrmUsersaId struct {
+ Daddr XfrmAddress
+ Spi uint32 // big endian
+ Family uint16
+ Proto uint8
+ Pad byte
+}
+
+func (msg *XfrmUsersaId) Len() int {
+ return SizeofXfrmUsersaId
+}
+
+func DeserializeXfrmUsersaId(b []byte) *XfrmUsersaId {
+ return (*XfrmUsersaId)(unsafe.Pointer(&b[0:SizeofXfrmUsersaId][0]))
+}
+
+func (msg *XfrmUsersaId) Serialize() []byte {
+ return (*(*[SizeofXfrmUsersaId]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_stats {
+// __u32 replay_window;
+// __u32 replay;
+// __u32 integrity_failed;
+// };
+
+type XfrmStats struct {
+ ReplayWindow uint32
+ Replay uint32
+ IntegrityFailed uint32
+}
+
+func (msg *XfrmStats) Len() int {
+ return SizeofXfrmStats
+}
+
+func DeserializeXfrmStats(b []byte) *XfrmStats {
+ return (*XfrmStats)(unsafe.Pointer(&b[0:SizeofXfrmStats][0]))
+}
+
+func (msg *XfrmStats) Serialize() []byte {
+ return (*(*[SizeofXfrmStats]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_usersa_info {
+// struct xfrm_selector sel;
+// struct xfrm_id id;
+// xfrm_address_t saddr;
+// struct xfrm_lifetime_cfg lft;
+// struct xfrm_lifetime_cur curlft;
+// struct xfrm_stats stats;
+// __u32 seq;
+// __u32 reqid;
+// __u16 family;
+// __u8 mode; /* XFRM_MODE_xxx */
+// __u8 replay_window;
+// __u8 flags;
+// #define XFRM_STATE_NOECN 1
+// #define XFRM_STATE_DECAP_DSCP 2
+// #define XFRM_STATE_NOPMTUDISC 4
+// #define XFRM_STATE_WILDRECV 8
+// #define XFRM_STATE_ICMP 16
+// #define XFRM_STATE_AF_UNSPEC 32
+// #define XFRM_STATE_ALIGN4 64
+// #define XFRM_STATE_ESN 128
+// };
+//
+// #define XFRM_SA_XFLAG_DONT_ENCAP_DSCP 1
+//
+
+type XfrmUsersaInfo struct {
+ Sel XfrmSelector
+ Id XfrmId
+ Saddr XfrmAddress
+ Lft XfrmLifetimeCfg
+ Curlft XfrmLifetimeCur
+ Stats XfrmStats
+ Seq uint32
+ Reqid uint32
+ Family uint16
+ Mode uint8
+ ReplayWindow uint8
+ Flags uint8
+ Pad [7]byte
+}
+
+func (msg *XfrmUsersaInfo) Len() int {
+ return SizeofXfrmUsersaInfo
+}
+
+func DeserializeXfrmUsersaInfo(b []byte) *XfrmUsersaInfo {
+ return (*XfrmUsersaInfo)(unsafe.Pointer(&b[0:SizeofXfrmUsersaInfo][0]))
+}
+
+func (msg *XfrmUsersaInfo) Serialize() []byte {
+ return (*(*[SizeofXfrmUsersaInfo]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_userspi_info {
+// struct xfrm_usersa_info info;
+// __u32 min;
+// __u32 max;
+// };
+
+type XfrmUserSpiInfo struct {
+ XfrmUsersaInfo XfrmUsersaInfo
+ Min uint32
+ Max uint32
+}
+
+func (msg *XfrmUserSpiInfo) Len() int {
+ return SizeofXfrmUserSpiInfo
+}
+
+func DeserializeXfrmUserSpiInfo(b []byte) *XfrmUserSpiInfo {
+ return (*XfrmUserSpiInfo)(unsafe.Pointer(&b[0:SizeofXfrmUserSpiInfo][0]))
+}
+
+func (msg *XfrmUserSpiInfo) Serialize() []byte {
+ return (*(*[SizeofXfrmUserSpiInfo]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_algo {
+// char alg_name[64];
+// unsigned int alg_key_len; /* in bits */
+// char alg_key[0];
+// };
+
+type XfrmAlgo struct {
+ AlgName [64]byte
+ AlgKeyLen uint32
+ AlgKey []byte
+}
+
+func (msg *XfrmAlgo) Len() int {
+ return SizeofXfrmAlgo + int(msg.AlgKeyLen/8)
+}
+
+func DeserializeXfrmAlgo(b []byte) *XfrmAlgo {
+ ret := XfrmAlgo{}
+ copy(ret.AlgName[:], b[0:64])
+ ret.AlgKeyLen = *(*uint32)(unsafe.Pointer(&b[64]))
+ ret.AlgKey = b[68:ret.Len()]
+ return &ret
+}
+
+func (msg *XfrmAlgo) Serialize() []byte {
+ b := make([]byte, msg.Len())
+ copy(b[0:64], msg.AlgName[:])
+ copy(b[64:68], (*(*[4]byte)(unsafe.Pointer(&msg.AlgKeyLen)))[:])
+ copy(b[68:msg.Len()], msg.AlgKey[:])
+ return b
+}
+
+// struct xfrm_algo_auth {
+// char alg_name[64];
+// unsigned int alg_key_len; /* in bits */
+// unsigned int alg_trunc_len; /* in bits */
+// char alg_key[0];
+// };
+
+type XfrmAlgoAuth struct {
+ AlgName [64]byte
+ AlgKeyLen uint32
+ AlgTruncLen uint32
+ AlgKey []byte
+}
+
+func (msg *XfrmAlgoAuth) Len() int {
+ return SizeofXfrmAlgoAuth + int(msg.AlgKeyLen/8)
+}
+
+func DeserializeXfrmAlgoAuth(b []byte) *XfrmAlgoAuth {
+ ret := XfrmAlgoAuth{}
+ copy(ret.AlgName[:], b[0:64])
+ ret.AlgKeyLen = *(*uint32)(unsafe.Pointer(&b[64]))
+ ret.AlgTruncLen = *(*uint32)(unsafe.Pointer(&b[68]))
+ ret.AlgKey = b[72:ret.Len()]
+ return &ret
+}
+
+func (msg *XfrmAlgoAuth) Serialize() []byte {
+ b := make([]byte, msg.Len())
+ copy(b[0:64], msg.AlgName[:])
+ copy(b[64:68], (*(*[4]byte)(unsafe.Pointer(&msg.AlgKeyLen)))[:])
+ copy(b[68:72], (*(*[4]byte)(unsafe.Pointer(&msg.AlgTruncLen)))[:])
+ copy(b[72:msg.Len()], msg.AlgKey[:])
+ return b
+}
+
+// struct xfrm_algo_aead {
+// char alg_name[64];
+// unsigned int alg_key_len; /* in bits */
+// unsigned int alg_icv_len; /* in bits */
+// char alg_key[0];
+// }
+
+type XfrmAlgoAEAD struct {
+ AlgName [64]byte
+ AlgKeyLen uint32
+ AlgICVLen uint32
+ AlgKey []byte
+}
+
+func (msg *XfrmAlgoAEAD) Len() int {
+ return SizeofXfrmAlgoAEAD + int(msg.AlgKeyLen/8)
+}
+
+func DeserializeXfrmAlgoAEAD(b []byte) *XfrmAlgoAEAD {
+ ret := XfrmAlgoAEAD{}
+ copy(ret.AlgName[:], b[0:64])
+ ret.AlgKeyLen = *(*uint32)(unsafe.Pointer(&b[64]))
+ ret.AlgICVLen = *(*uint32)(unsafe.Pointer(&b[68]))
+ ret.AlgKey = b[72:ret.Len()]
+ return &ret
+}
+
+func (msg *XfrmAlgoAEAD) Serialize() []byte {
+ b := make([]byte, msg.Len())
+ copy(b[0:64], msg.AlgName[:])
+ copy(b[64:68], (*(*[4]byte)(unsafe.Pointer(&msg.AlgKeyLen)))[:])
+ copy(b[68:72], (*(*[4]byte)(unsafe.Pointer(&msg.AlgICVLen)))[:])
+ copy(b[72:msg.Len()], msg.AlgKey[:])
+ return b
+}
+
+// struct xfrm_encap_tmpl {
+// __u16 encap_type;
+// __be16 encap_sport;
+// __be16 encap_dport;
+// xfrm_address_t encap_oa;
+// };
+
+type XfrmEncapTmpl struct {
+ EncapType uint16
+ EncapSport uint16 // big endian
+ EncapDport uint16 // big endian
+ Pad [2]byte
+ EncapOa XfrmAddress
+}
+
+func (msg *XfrmEncapTmpl) Len() int {
+ return SizeofXfrmEncapTmpl
+}
+
+func DeserializeXfrmEncapTmpl(b []byte) *XfrmEncapTmpl {
+ return (*XfrmEncapTmpl)(unsafe.Pointer(&b[0:SizeofXfrmEncapTmpl][0]))
+}
+
+func (msg *XfrmEncapTmpl) Serialize() []byte {
+ return (*(*[SizeofXfrmEncapTmpl]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_usersa_flush {
+// __u8 proto;
+// };
+
+type XfrmUsersaFlush struct {
+ Proto uint8
+}
+
+func (msg *XfrmUsersaFlush) Len() int {
+ return SizeofXfrmUsersaFlush
+}
+
+func DeserializeXfrmUsersaFlush(b []byte) *XfrmUsersaFlush {
+ return (*XfrmUsersaFlush)(unsafe.Pointer(&b[0:SizeofXfrmUsersaFlush][0]))
+}
+
+func (msg *XfrmUsersaFlush) Serialize() []byte {
+ return (*(*[SizeofXfrmUsersaFlush]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_replay_state_esn {
+// unsigned int bmp_len;
+// __u32 oseq;
+// __u32 seq;
+// __u32 oseq_hi;
+// __u32 seq_hi;
+// __u32 replay_window;
+// __u32 bmp[0];
+// };
+
+type XfrmReplayStateEsn struct {
+ BmpLen uint32
+ OSeq uint32
+ Seq uint32
+ OSeqHi uint32
+ SeqHi uint32
+ ReplayWindow uint32
+ Bmp []uint32
+}
+
+func (msg *XfrmReplayStateEsn) Serialize() []byte {
+ // We deliberately do not pass Bmp, as it gets set by the kernel.
+ return (*(*[SizeofXfrmReplayStateEsn]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/vendor/github.com/vishvananda/netlink/order.go b/vendor/github.com/vishvananda/netlink/order.go
new file mode 100644
index 000000000..e28e153a1
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/order.go
@@ -0,0 +1,32 @@
+package netlink
+
+import (
+ "encoding/binary"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+var (
+ native = nl.NativeEndian()
+ networkOrder = binary.BigEndian
+)
+
+func htonl(val uint32) []byte {
+ bytes := make([]byte, 4)
+ binary.BigEndian.PutUint32(bytes, val)
+ return bytes
+}
+
+func htons(val uint16) []byte {
+ bytes := make([]byte, 2)
+ binary.BigEndian.PutUint16(bytes, val)
+ return bytes
+}
+
+func ntohl(buf []byte) uint32 {
+ return binary.BigEndian.Uint32(buf)
+}
+
+func ntohs(buf []byte) uint16 {
+ return binary.BigEndian.Uint16(buf)
+}
diff --git a/vendor/github.com/vishvananda/netlink/protinfo.go b/vendor/github.com/vishvananda/netlink/protinfo.go
new file mode 100644
index 000000000..0087c4438
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/protinfo.go
@@ -0,0 +1,58 @@
+package netlink
+
+import (
+ "strings"
+)
+
+// Protinfo represents bridge flags from netlink.
+type Protinfo struct {
+ Hairpin bool
+ Guard bool
+ FastLeave bool
+ RootBlock bool
+ Learning bool
+ Flood bool
+ ProxyArp bool
+ ProxyArpWiFi bool
+}
+
+// String returns a list of enabled flags
+func (prot *Protinfo) String() string {
+ var boolStrings []string
+ if prot.Hairpin {
+ boolStrings = append(boolStrings, "Hairpin")
+ }
+ if prot.Guard {
+ boolStrings = append(boolStrings, "Guard")
+ }
+ if prot.FastLeave {
+ boolStrings = append(boolStrings, "FastLeave")
+ }
+ if prot.RootBlock {
+ boolStrings = append(boolStrings, "RootBlock")
+ }
+ if prot.Learning {
+ boolStrings = append(boolStrings, "Learning")
+ }
+ if prot.Flood {
+ boolStrings = append(boolStrings, "Flood")
+ }
+ if prot.ProxyArp {
+ boolStrings = append(boolStrings, "ProxyArp")
+ }
+ if prot.ProxyArpWiFi {
+ boolStrings = append(boolStrings, "ProxyArpWiFi")
+ }
+ return strings.Join(boolStrings, " ")
+}
+
+func boolToByte(x bool) []byte {
+ if x {
+ return []byte{1}
+ }
+ return []byte{0}
+}
+
+func byteToBool(x byte) bool {
+ return uint8(x) != 0
+}
diff --git a/vendor/github.com/vishvananda/netlink/protinfo_linux.go b/vendor/github.com/vishvananda/netlink/protinfo_linux.go
new file mode 100644
index 000000000..10dd0d533
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/protinfo_linux.go
@@ -0,0 +1,74 @@
+package netlink
+
+import (
+ "fmt"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+func LinkGetProtinfo(link Link) (Protinfo, error) {
+ return pkgHandle.LinkGetProtinfo(link)
+}
+
+func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ var pi Protinfo
+ req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP)
+ msg := nl.NewIfInfomsg(syscall.AF_BRIDGE)
+ req.AddData(msg)
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ if err != nil {
+ return pi, err
+ }
+
+ for _, m := range msgs {
+ ans := nl.DeserializeIfInfomsg(m)
+ if int(ans.Index) != base.Index {
+ continue
+ }
+ attrs, err := nl.ParseRouteAttr(m[ans.Len():])
+ if err != nil {
+ return pi, err
+ }
+ for _, attr := range attrs {
+ if attr.Attr.Type != syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED {
+ continue
+ }
+ infos, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return pi, err
+ }
+ pi = *parseProtinfo(infos)
+
+ return pi, nil
+ }
+ }
+ return pi, fmt.Errorf("Device with index %d not found", base.Index)
+}
+
+func parseProtinfo(infos []syscall.NetlinkRouteAttr) *Protinfo {
+ var pi Protinfo
+ for _, info := range infos {
+ switch info.Attr.Type {
+ case nl.IFLA_BRPORT_MODE:
+ pi.Hairpin = byteToBool(info.Value[0])
+ case nl.IFLA_BRPORT_GUARD:
+ pi.Guard = byteToBool(info.Value[0])
+ case nl.IFLA_BRPORT_FAST_LEAVE:
+ pi.FastLeave = byteToBool(info.Value[0])
+ case nl.IFLA_BRPORT_PROTECT:
+ pi.RootBlock = byteToBool(info.Value[0])
+ case nl.IFLA_BRPORT_LEARNING:
+ pi.Learning = byteToBool(info.Value[0])
+ case nl.IFLA_BRPORT_UNICAST_FLOOD:
+ pi.Flood = byteToBool(info.Value[0])
+ case nl.IFLA_BRPORT_PROXYARP:
+ pi.ProxyArp = byteToBool(info.Value[0])
+ case nl.IFLA_BRPORT_PROXYARP_WIFI:
+ pi.ProxyArpWiFi = byteToBool(info.Value[0])
+ }
+ }
+ return &pi
+}
diff --git a/vendor/github.com/vishvananda/netlink/qdisc.go b/vendor/github.com/vishvananda/netlink/qdisc.go
new file mode 100644
index 000000000..0ca86ebe8
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/qdisc.go
@@ -0,0 +1,232 @@
+package netlink
+
+import (
+ "fmt"
+ "math"
+)
+
+const (
+ HANDLE_NONE = 0
+ HANDLE_INGRESS = 0xFFFFFFF1
+ HANDLE_CLSACT = HANDLE_INGRESS
+ HANDLE_ROOT = 0xFFFFFFFF
+ PRIORITY_MAP_LEN = 16
+)
+const (
+ HANDLE_MIN_INGRESS = 0xFFFFFFF2
+ HANDLE_MIN_EGRESS = 0xFFFFFFF3
+)
+
+type Qdisc interface {
+ Attrs() *QdiscAttrs
+ Type() string
+}
+
+// QdiscAttrs represents a netlink qdisc. A qdisc is associated with a link,
+// has a handle, a parent and a refcnt. The root qdisc of a device should
+// have parent == HANDLE_ROOT.
+type QdiscAttrs struct {
+ LinkIndex int
+ Handle uint32
+ Parent uint32
+ Refcnt uint32 // read only
+}
+
+func (q QdiscAttrs) String() string {
+ return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Refcnt: %d}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Refcnt)
+}
+
+func MakeHandle(major, minor uint16) uint32 {
+ return (uint32(major) << 16) | uint32(minor)
+}
+
+func MajorMinor(handle uint32) (uint16, uint16) {
+ return uint16((handle & 0xFFFF0000) >> 16), uint16(handle & 0x0000FFFFF)
+}
+
+func HandleStr(handle uint32) string {
+ switch handle {
+ case HANDLE_NONE:
+ return "none"
+ case HANDLE_INGRESS:
+ return "ingress"
+ case HANDLE_ROOT:
+ return "root"
+ default:
+ major, minor := MajorMinor(handle)
+ return fmt.Sprintf("%x:%x", major, minor)
+ }
+}
+
+func Percentage2u32(percentage float32) uint32 {
+ // FIXME this is most likely not the best way to convert from % to uint32
+ if percentage == 100 {
+ return math.MaxUint32
+ }
+ return uint32(math.MaxUint32 * (percentage / 100))
+}
+
+// PfifoFast is the default qdisc created by the kernel if one has not
+// been defined for the interface
+type PfifoFast struct {
+ QdiscAttrs
+ Bands uint8
+ PriorityMap [PRIORITY_MAP_LEN]uint8
+}
+
+func (qdisc *PfifoFast) Attrs() *QdiscAttrs {
+ return &qdisc.QdiscAttrs
+}
+
+func (qdisc *PfifoFast) Type() string {
+ return "pfifo_fast"
+}
+
+// Prio is a basic qdisc that works just like PfifoFast
+type Prio struct {
+ QdiscAttrs
+ Bands uint8
+ PriorityMap [PRIORITY_MAP_LEN]uint8
+}
+
+func NewPrio(attrs QdiscAttrs) *Prio {
+ return &Prio{
+ QdiscAttrs: attrs,
+ Bands: 3,
+ PriorityMap: [PRIORITY_MAP_LEN]uint8{1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1},
+ }
+}
+
+func (qdisc *Prio) Attrs() *QdiscAttrs {
+ return &qdisc.QdiscAttrs
+}
+
+func (qdisc *Prio) Type() string {
+ return "prio"
+}
+
+// Htb is a classful qdisc that rate limits based on tokens
+type Htb struct {
+ QdiscAttrs
+ Version uint32
+ Rate2Quantum uint32
+ Defcls uint32
+ Debug uint32
+ DirectPkts uint32
+}
+
+func NewHtb(attrs QdiscAttrs) *Htb {
+ return &Htb{
+ QdiscAttrs: attrs,
+ Version: 3,
+ Defcls: 0,
+ Rate2Quantum: 10,
+ Debug: 0,
+ DirectPkts: 0,
+ }
+}
+
+func (qdisc *Htb) Attrs() *QdiscAttrs {
+ return &qdisc.QdiscAttrs
+}
+
+func (qdisc *Htb) Type() string {
+ return "htb"
+}
+
+// Netem is a classless qdisc that rate limits based on tokens
+
+type NetemQdiscAttrs struct {
+ Latency uint32 // in us
+ DelayCorr float32 // in %
+ Limit uint32
+ Loss float32 // in %
+ LossCorr float32 // in %
+ Gap uint32
+ Duplicate float32 // in %
+ DuplicateCorr float32 // in %
+ Jitter uint32 // in us
+ ReorderProb float32 // in %
+ ReorderCorr float32 // in %
+ CorruptProb float32 // in %
+ CorruptCorr float32 // in %
+}
+
+func (q NetemQdiscAttrs) String() string {
+ return fmt.Sprintf(
+ "{Latency: %d, Limit: %d, Loss: %f, Gap: %d, Duplicate: %f, Jitter: %d}",
+ q.Latency, q.Limit, q.Loss, q.Gap, q.Duplicate, q.Jitter,
+ )
+}
+
+type Netem struct {
+ QdiscAttrs
+ Latency uint32
+ DelayCorr uint32
+ Limit uint32
+ Loss uint32
+ LossCorr uint32
+ Gap uint32
+ Duplicate uint32
+ DuplicateCorr uint32
+ Jitter uint32
+ ReorderProb uint32
+ ReorderCorr uint32
+ CorruptProb uint32
+ CorruptCorr uint32
+}
+
+func (qdisc *Netem) Attrs() *QdiscAttrs {
+ return &qdisc.QdiscAttrs
+}
+
+func (qdisc *Netem) Type() string {
+ return "netem"
+}
+
+// Tbf is a classless qdisc that rate limits based on tokens
+type Tbf struct {
+ QdiscAttrs
+ Rate uint64
+ Limit uint32
+ Buffer uint32
+ Peakrate uint64
+ Minburst uint32
+ // TODO: handle other settings
+}
+
+func (qdisc *Tbf) Attrs() *QdiscAttrs {
+ return &qdisc.QdiscAttrs
+}
+
+func (qdisc *Tbf) Type() string {
+ return "tbf"
+}
+
+// Ingress is a qdisc for adding ingress filters
+type Ingress struct {
+ QdiscAttrs
+}
+
+func (qdisc *Ingress) Attrs() *QdiscAttrs {
+ return &qdisc.QdiscAttrs
+}
+
+func (qdisc *Ingress) Type() string {
+ return "ingress"
+}
+
+// GenericQdisc qdiscs represent types that are not currently understood
+// by this netlink library.
+type GenericQdisc struct {
+ QdiscAttrs
+ QdiscType string
+}
+
+func (qdisc *GenericQdisc) Attrs() *QdiscAttrs {
+ return &qdisc.QdiscAttrs
+}
+
+func (qdisc *GenericQdisc) Type() string {
+ return qdisc.QdiscType
+}
diff --git a/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/github.com/vishvananda/netlink/qdisc_linux.go
new file mode 100644
index 000000000..2c0deddb3
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/qdisc_linux.go
@@ -0,0 +1,527 @@
+package netlink
+
+import (
+ "fmt"
+ "io/ioutil"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+// NOTE function is here because it uses other linux functions
+func NewNetem(attrs QdiscAttrs, nattrs NetemQdiscAttrs) *Netem {
+ var limit uint32 = 1000
+ var lossCorr, delayCorr, duplicateCorr uint32
+ var reorderProb, reorderCorr uint32
+ var corruptProb, corruptCorr uint32
+
+ latency := nattrs.Latency
+ loss := Percentage2u32(nattrs.Loss)
+ gap := nattrs.Gap
+ duplicate := Percentage2u32(nattrs.Duplicate)
+ jitter := nattrs.Jitter
+
+ // Correlation
+ if latency > 0 && jitter > 0 {
+ delayCorr = Percentage2u32(nattrs.DelayCorr)
+ }
+ if loss > 0 {
+ lossCorr = Percentage2u32(nattrs.LossCorr)
+ }
+ if duplicate > 0 {
+ duplicateCorr = Percentage2u32(nattrs.DuplicateCorr)
+ }
+ // FIXME should validate values(like loss/duplicate are percentages...)
+ latency = time2Tick(latency)
+
+ if nattrs.Limit != 0 {
+ limit = nattrs.Limit
+ }
+ // Jitter is only value if latency is > 0
+ if latency > 0 {
+ jitter = time2Tick(jitter)
+ }
+
+ reorderProb = Percentage2u32(nattrs.ReorderProb)
+ reorderCorr = Percentage2u32(nattrs.ReorderCorr)
+
+ if reorderProb > 0 {
+ // ERROR if lantency == 0
+ if gap == 0 {
+ gap = 1
+ }
+ }
+
+ corruptProb = Percentage2u32(nattrs.CorruptProb)
+ corruptCorr = Percentage2u32(nattrs.CorruptCorr)
+
+ return &Netem{
+ QdiscAttrs: attrs,
+ Latency: latency,
+ DelayCorr: delayCorr,
+ Limit: limit,
+ Loss: loss,
+ LossCorr: lossCorr,
+ Gap: gap,
+ Duplicate: duplicate,
+ DuplicateCorr: duplicateCorr,
+ Jitter: jitter,
+ ReorderProb: reorderProb,
+ ReorderCorr: reorderCorr,
+ CorruptProb: corruptProb,
+ CorruptCorr: corruptCorr,
+ }
+}
+
+// QdiscDel will delete a qdisc from the system.
+// Equivalent to: `tc qdisc del $qdisc`
+func QdiscDel(qdisc Qdisc) error {
+ return pkgHandle.QdiscDel(qdisc)
+}
+
+// QdiscDel will delete a qdisc from the system.
+// Equivalent to: `tc qdisc del $qdisc`
+func (h *Handle) QdiscDel(qdisc Qdisc) error {
+ return h.qdiscModify(syscall.RTM_DELQDISC, 0, qdisc)
+}
+
+// QdiscChange will change a qdisc in place
+// Equivalent to: `tc qdisc change $qdisc`
+// The parent and handle MUST NOT be changed.
+func QdiscChange(qdisc Qdisc) error {
+ return pkgHandle.QdiscChange(qdisc)
+}
+
+// QdiscChange will change a qdisc in place
+// Equivalent to: `tc qdisc change $qdisc`
+// The parent and handle MUST NOT be changed.
+func (h *Handle) QdiscChange(qdisc Qdisc) error {
+ return h.qdiscModify(syscall.RTM_NEWQDISC, 0, qdisc)
+}
+
+// QdiscReplace will replace a qdisc to the system.
+// Equivalent to: `tc qdisc replace $qdisc`
+// The handle MUST change.
+func QdiscReplace(qdisc Qdisc) error {
+ return pkgHandle.QdiscReplace(qdisc)
+}
+
+// QdiscReplace will replace a qdisc to the system.
+// Equivalent to: `tc qdisc replace $qdisc`
+// The handle MUST change.
+func (h *Handle) QdiscReplace(qdisc Qdisc) error {
+ return h.qdiscModify(
+ syscall.RTM_NEWQDISC,
+ syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE,
+ qdisc)
+}
+
+// QdiscAdd will add a qdisc to the system.
+// Equivalent to: `tc qdisc add $qdisc`
+func QdiscAdd(qdisc Qdisc) error {
+ return pkgHandle.QdiscAdd(qdisc)
+}
+
+// QdiscAdd will add a qdisc to the system.
+// Equivalent to: `tc qdisc add $qdisc`
+func (h *Handle) QdiscAdd(qdisc Qdisc) error {
+ return h.qdiscModify(
+ syscall.RTM_NEWQDISC,
+ syscall.NLM_F_CREATE|syscall.NLM_F_EXCL,
+ qdisc)
+}
+
+func (h *Handle) qdiscModify(cmd, flags int, qdisc Qdisc) error {
+ req := h.newNetlinkRequest(cmd, flags|syscall.NLM_F_ACK)
+ base := qdisc.Attrs()
+ msg := &nl.TcMsg{
+ Family: nl.FAMILY_ALL,
+ Ifindex: int32(base.LinkIndex),
+ Handle: base.Handle,
+ Parent: base.Parent,
+ }
+ req.AddData(msg)
+
+ // When deleting don't bother building the rest of the netlink payload
+ if cmd != syscall.RTM_DELQDISC {
+ if err := qdiscPayload(req, qdisc); err != nil {
+ return err
+ }
+ }
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error {
+
+ req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(qdisc.Type())))
+
+ options := nl.NewRtAttr(nl.TCA_OPTIONS, nil)
+ if prio, ok := qdisc.(*Prio); ok {
+ tcmap := nl.TcPrioMap{
+ Bands: int32(prio.Bands),
+ Priomap: prio.PriorityMap,
+ }
+ options = nl.NewRtAttr(nl.TCA_OPTIONS, tcmap.Serialize())
+ } else if tbf, ok := qdisc.(*Tbf); ok {
+ opt := nl.TcTbfQopt{}
+ opt.Rate.Rate = uint32(tbf.Rate)
+ opt.Peakrate.Rate = uint32(tbf.Peakrate)
+ opt.Limit = tbf.Limit
+ opt.Buffer = tbf.Buffer
+ nl.NewRtAttrChild(options, nl.TCA_TBF_PARMS, opt.Serialize())
+ if tbf.Rate >= uint64(1<<32) {
+ nl.NewRtAttrChild(options, nl.TCA_TBF_RATE64, nl.Uint64Attr(tbf.Rate))
+ }
+ if tbf.Peakrate >= uint64(1<<32) {
+ nl.NewRtAttrChild(options, nl.TCA_TBF_PRATE64, nl.Uint64Attr(tbf.Peakrate))
+ }
+ if tbf.Peakrate > 0 {
+ nl.NewRtAttrChild(options, nl.TCA_TBF_PBURST, nl.Uint32Attr(tbf.Minburst))
+ }
+ } else if htb, ok := qdisc.(*Htb); ok {
+ opt := nl.TcHtbGlob{}
+ opt.Version = htb.Version
+ opt.Rate2Quantum = htb.Rate2Quantum
+ opt.Defcls = htb.Defcls
+ // TODO: Handle Debug properly. For now default to 0
+ opt.Debug = htb.Debug
+ opt.DirectPkts = htb.DirectPkts
+ nl.NewRtAttrChild(options, nl.TCA_HTB_INIT, opt.Serialize())
+ // nl.NewRtAttrChild(options, nl.TCA_HTB_DIRECT_QLEN, opt.Serialize())
+ } else if netem, ok := qdisc.(*Netem); ok {
+ opt := nl.TcNetemQopt{}
+ opt.Latency = netem.Latency
+ opt.Limit = netem.Limit
+ opt.Loss = netem.Loss
+ opt.Gap = netem.Gap
+ opt.Duplicate = netem.Duplicate
+ opt.Jitter = netem.Jitter
+ options = nl.NewRtAttr(nl.TCA_OPTIONS, opt.Serialize())
+ // Correlation
+ corr := nl.TcNetemCorr{}
+ corr.DelayCorr = netem.DelayCorr
+ corr.LossCorr = netem.LossCorr
+ corr.DupCorr = netem.DuplicateCorr
+
+ if corr.DelayCorr > 0 || corr.LossCorr > 0 || corr.DupCorr > 0 {
+ nl.NewRtAttrChild(options, nl.TCA_NETEM_CORR, corr.Serialize())
+ }
+ // Corruption
+ corruption := nl.TcNetemCorrupt{}
+ corruption.Probability = netem.CorruptProb
+ corruption.Correlation = netem.CorruptCorr
+ if corruption.Probability > 0 {
+ nl.NewRtAttrChild(options, nl.TCA_NETEM_CORRUPT, corruption.Serialize())
+ }
+ // Reorder
+ reorder := nl.TcNetemReorder{}
+ reorder.Probability = netem.ReorderProb
+ reorder.Correlation = netem.ReorderCorr
+ if reorder.Probability > 0 {
+ nl.NewRtAttrChild(options, nl.TCA_NETEM_REORDER, reorder.Serialize())
+ }
+ } else if _, ok := qdisc.(*Ingress); ok {
+ // ingress filters must use the proper handle
+ if qdisc.Attrs().Parent != HANDLE_INGRESS {
+ return fmt.Errorf("Ingress filters must set Parent to HANDLE_INGRESS")
+ }
+ }
+
+ req.AddData(options)
+ return nil
+}
+
+// QdiscList gets a list of qdiscs in the system.
+// Equivalent to: `tc qdisc show`.
+// The list can be filtered by link.
+func QdiscList(link Link) ([]Qdisc, error) {
+ return pkgHandle.QdiscList(link)
+}
+
+// QdiscList gets a list of qdiscs in the system.
+// Equivalent to: `tc qdisc show`.
+// The list can be filtered by link.
+func (h *Handle) QdiscList(link Link) ([]Qdisc, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETQDISC, syscall.NLM_F_DUMP)
+ index := int32(0)
+ if link != nil {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ index = int32(base.Index)
+ }
+ msg := &nl.TcMsg{
+ Family: nl.FAMILY_ALL,
+ Ifindex: index,
+ }
+ req.AddData(msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWQDISC)
+ if err != nil {
+ return nil, err
+ }
+
+ var res []Qdisc
+ for _, m := range msgs {
+ msg := nl.DeserializeTcMsg(m)
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ // skip qdiscs from other interfaces
+ if link != nil && msg.Ifindex != index {
+ continue
+ }
+
+ base := QdiscAttrs{
+ LinkIndex: int(msg.Ifindex),
+ Handle: msg.Handle,
+ Parent: msg.Parent,
+ Refcnt: msg.Info,
+ }
+ var qdisc Qdisc
+ qdiscType := ""
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.TCA_KIND:
+ qdiscType = string(attr.Value[:len(attr.Value)-1])
+ switch qdiscType {
+ case "pfifo_fast":
+ qdisc = &PfifoFast{}
+ case "prio":
+ qdisc = &Prio{}
+ case "tbf":
+ qdisc = &Tbf{}
+ case "ingress":
+ qdisc = &Ingress{}
+ case "htb":
+ qdisc = &Htb{}
+ case "netem":
+ qdisc = &Netem{}
+ default:
+ qdisc = &GenericQdisc{QdiscType: qdiscType}
+ }
+ case nl.TCA_OPTIONS:
+ switch qdiscType {
+ case "pfifo_fast":
+ // pfifo returns TcPrioMap directly without wrapping it in rtattr
+ if err := parsePfifoFastData(qdisc, attr.Value); err != nil {
+ return nil, err
+ }
+ case "prio":
+ // prio returns TcPrioMap directly without wrapping it in rtattr
+ if err := parsePrioData(qdisc, attr.Value); err != nil {
+ return nil, err
+ }
+ case "tbf":
+ data, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return nil, err
+ }
+ if err := parseTbfData(qdisc, data); err != nil {
+ return nil, err
+ }
+ case "htb":
+ data, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return nil, err
+ }
+ if err := parseHtbData(qdisc, data); err != nil {
+ return nil, err
+ }
+ case "netem":
+ if err := parseNetemData(qdisc, attr.Value); err != nil {
+ return nil, err
+ }
+
+ // no options for ingress
+ }
+ }
+ }
+ *qdisc.Attrs() = base
+ res = append(res, qdisc)
+ }
+
+ return res, nil
+}
+
+func parsePfifoFastData(qdisc Qdisc, value []byte) error {
+ pfifo := qdisc.(*PfifoFast)
+ tcmap := nl.DeserializeTcPrioMap(value)
+ pfifo.PriorityMap = tcmap.Priomap
+ pfifo.Bands = uint8(tcmap.Bands)
+ return nil
+}
+
+func parsePrioData(qdisc Qdisc, value []byte) error {
+ prio := qdisc.(*Prio)
+ tcmap := nl.DeserializeTcPrioMap(value)
+ prio.PriorityMap = tcmap.Priomap
+ prio.Bands = uint8(tcmap.Bands)
+ return nil
+}
+
+func parseHtbData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error {
+ native = nl.NativeEndian()
+ htb := qdisc.(*Htb)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.TCA_HTB_INIT:
+ opt := nl.DeserializeTcHtbGlob(datum.Value)
+ htb.Version = opt.Version
+ htb.Rate2Quantum = opt.Rate2Quantum
+ htb.Defcls = opt.Defcls
+ htb.Debug = opt.Debug
+ htb.DirectPkts = opt.DirectPkts
+ case nl.TCA_HTB_DIRECT_QLEN:
+ // TODO
+ //htb.DirectQlen = native.uint32(datum.Value)
+ }
+ }
+ return nil
+}
+
+func parseNetemData(qdisc Qdisc, value []byte) error {
+ netem := qdisc.(*Netem)
+ opt := nl.DeserializeTcNetemQopt(value)
+ netem.Latency = opt.Latency
+ netem.Limit = opt.Limit
+ netem.Loss = opt.Loss
+ netem.Gap = opt.Gap
+ netem.Duplicate = opt.Duplicate
+ netem.Jitter = opt.Jitter
+ data, err := nl.ParseRouteAttr(value[nl.SizeofTcNetemQopt:])
+ if err != nil {
+ return err
+ }
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.TCA_NETEM_CORR:
+ opt := nl.DeserializeTcNetemCorr(datum.Value)
+ netem.DelayCorr = opt.DelayCorr
+ netem.LossCorr = opt.LossCorr
+ netem.DuplicateCorr = opt.DupCorr
+ case nl.TCA_NETEM_CORRUPT:
+ opt := nl.DeserializeTcNetemCorrupt(datum.Value)
+ netem.CorruptProb = opt.Probability
+ netem.CorruptCorr = opt.Correlation
+ case nl.TCA_NETEM_REORDER:
+ opt := nl.DeserializeTcNetemReorder(datum.Value)
+ netem.ReorderProb = opt.Probability
+ netem.ReorderCorr = opt.Correlation
+ }
+ }
+ return nil
+}
+
+func parseTbfData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error {
+ native = nl.NativeEndian()
+ tbf := qdisc.(*Tbf)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.TCA_TBF_PARMS:
+ opt := nl.DeserializeTcTbfQopt(datum.Value)
+ tbf.Rate = uint64(opt.Rate.Rate)
+ tbf.Peakrate = uint64(opt.Peakrate.Rate)
+ tbf.Limit = opt.Limit
+ tbf.Buffer = opt.Buffer
+ case nl.TCA_TBF_RATE64:
+ tbf.Rate = native.Uint64(datum.Value[0:8])
+ case nl.TCA_TBF_PRATE64:
+ tbf.Peakrate = native.Uint64(datum.Value[0:8])
+ case nl.TCA_TBF_PBURST:
+ tbf.Minburst = native.Uint32(datum.Value[0:4])
+ }
+ }
+ return nil
+}
+
+const (
+ TIME_UNITS_PER_SEC = 1000000
+)
+
+var (
+ tickInUsec float64
+ clockFactor float64
+ hz float64
+)
+
+func initClock() {
+ data, err := ioutil.ReadFile("/proc/net/psched")
+ if err != nil {
+ return
+ }
+ parts := strings.Split(strings.TrimSpace(string(data)), " ")
+ if len(parts) < 3 {
+ return
+ }
+ var vals [3]uint64
+ for i := range vals {
+ val, err := strconv.ParseUint(parts[i], 16, 32)
+ if err != nil {
+ return
+ }
+ vals[i] = val
+ }
+ // compatibility
+ if vals[2] == 1000000000 {
+ vals[0] = vals[1]
+ }
+ clockFactor = float64(vals[2]) / TIME_UNITS_PER_SEC
+ tickInUsec = float64(vals[0]) / float64(vals[1]) * clockFactor
+ hz = float64(vals[0])
+}
+
+func TickInUsec() float64 {
+ if tickInUsec == 0.0 {
+ initClock()
+ }
+ return tickInUsec
+}
+
+func ClockFactor() float64 {
+ if clockFactor == 0.0 {
+ initClock()
+ }
+ return clockFactor
+}
+
+func Hz() float64 {
+ if hz == 0.0 {
+ initClock()
+ }
+ return hz
+}
+
+func time2Tick(time uint32) uint32 {
+ return uint32(float64(time) * TickInUsec())
+}
+
+func tick2Time(tick uint32) uint32 {
+ return uint32(float64(tick) / TickInUsec())
+}
+
+func time2Ktime(time uint32) uint32 {
+ return uint32(float64(time) * ClockFactor())
+}
+
+func ktime2Time(ktime uint32) uint32 {
+ return uint32(float64(ktime) / ClockFactor())
+}
+
+func burst(rate uint64, buffer uint32) uint32 {
+ return uint32(float64(rate) * float64(tick2Time(buffer)) / TIME_UNITS_PER_SEC)
+}
+
+func latency(rate uint64, limit, buffer uint32) float64 {
+ return TIME_UNITS_PER_SEC*(float64(limit)/float64(rate)) - float64(tick2Time(buffer))
+}
+
+func Xmittime(rate uint64, size uint32) float64 {
+ return TickInUsec() * TIME_UNITS_PER_SEC * (float64(size) / float64(rate))
+}
diff --git a/vendor/github.com/vishvananda/netlink/route.go b/vendor/github.com/vishvananda/netlink/route.go
new file mode 100644
index 000000000..03ac4b239
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/route.go
@@ -0,0 +1,116 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// Scope is an enum representing a route scope.
+type Scope uint8
+
+type NextHopFlag int
+
+type Destination interface {
+ Family() int
+ Decode([]byte) error
+ Encode() ([]byte, error)
+ String() string
+}
+
+type Encap interface {
+ Type() int
+ Decode([]byte) error
+ Encode() ([]byte, error)
+ String() string
+}
+
+// Route represents a netlink route.
+type Route struct {
+ LinkIndex int
+ ILinkIndex int
+ Scope Scope
+ Dst *net.IPNet
+ Src net.IP
+ Gw net.IP
+ MultiPath []*NexthopInfo
+ Protocol int
+ Priority int
+ Table int
+ Type int
+ Tos int
+ Flags int
+ MPLSDst *int
+ NewDst Destination
+ Encap Encap
+}
+
+func (r Route) String() string {
+ elems := []string{}
+ if len(r.MultiPath) == 0 {
+ elems = append(elems, fmt.Sprintf("Ifindex: %d", r.LinkIndex))
+ }
+ if r.MPLSDst != nil {
+ elems = append(elems, fmt.Sprintf("Dst: %d", r.MPLSDst))
+ } else {
+ elems = append(elems, fmt.Sprintf("Dst: %s", r.Dst))
+ }
+ if r.NewDst != nil {
+ elems = append(elems, fmt.Sprintf("NewDst: %s", r.NewDst))
+ }
+ if r.Encap != nil {
+ elems = append(elems, fmt.Sprintf("Encap: %s", r.Encap))
+ }
+ elems = append(elems, fmt.Sprintf("Src: %s", r.Src))
+ if len(r.MultiPath) > 0 {
+ elems = append(elems, fmt.Sprintf("Gw: %s", r.MultiPath))
+ } else {
+ elems = append(elems, fmt.Sprintf("Gw: %s", r.Gw))
+ }
+ elems = append(elems, fmt.Sprintf("Flags: %s", r.ListFlags()))
+ elems = append(elems, fmt.Sprintf("Table: %d", r.Table))
+ return fmt.Sprintf("{%s}", strings.Join(elems, " "))
+}
+
+func (r *Route) SetFlag(flag NextHopFlag) {
+ r.Flags |= int(flag)
+}
+
+func (r *Route) ClearFlag(flag NextHopFlag) {
+ r.Flags &^= int(flag)
+}
+
+type flagString struct {
+ f NextHopFlag
+ s string
+}
+
+// RouteUpdate is sent when a route changes - type is RTM_NEWROUTE or RTM_DELROUTE
+type RouteUpdate struct {
+ Type uint16
+ Route
+}
+
+type NexthopInfo struct {
+ LinkIndex int
+ Hops int
+ Gw net.IP
+ Flags int
+ NewDst Destination
+ Encap Encap
+}
+
+func (n *NexthopInfo) String() string {
+ elems := []string{}
+ elems = append(elems, fmt.Sprintf("Ifindex: %d", n.LinkIndex))
+ if n.NewDst != nil {
+ elems = append(elems, fmt.Sprintf("NewDst: %s", n.NewDst))
+ }
+ if n.Encap != nil {
+ elems = append(elems, fmt.Sprintf("Encap: %s", n.Encap))
+ }
+ elems = append(elems, fmt.Sprintf("Weight: %d", n.Hops+1))
+ elems = append(elems, fmt.Sprintf("Gw: %d", n.Gw))
+ elems = append(elems, fmt.Sprintf("Flags: %s", n.ListFlags()))
+ return fmt.Sprintf("{%s}", strings.Join(elems, " "))
+}
diff --git a/vendor/github.com/vishvananda/netlink/route_linux.go b/vendor/github.com/vishvananda/netlink/route_linux.go
new file mode 100644
index 000000000..cd739e714
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/route_linux.go
@@ -0,0 +1,674 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+ "strings"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+ "github.com/vishvananda/netns"
+)
+
+// RtAttr is shared so it is in netlink_linux.go
+
+const (
+ SCOPE_UNIVERSE Scope = syscall.RT_SCOPE_UNIVERSE
+ SCOPE_SITE Scope = syscall.RT_SCOPE_SITE
+ SCOPE_LINK Scope = syscall.RT_SCOPE_LINK
+ SCOPE_HOST Scope = syscall.RT_SCOPE_HOST
+ SCOPE_NOWHERE Scope = syscall.RT_SCOPE_NOWHERE
+)
+
+const (
+ RT_FILTER_PROTOCOL uint64 = 1 << (1 + iota)
+ RT_FILTER_SCOPE
+ RT_FILTER_TYPE
+ RT_FILTER_TOS
+ RT_FILTER_IIF
+ RT_FILTER_OIF
+ RT_FILTER_DST
+ RT_FILTER_SRC
+ RT_FILTER_GW
+ RT_FILTER_TABLE
+)
+
+const (
+ FLAG_ONLINK NextHopFlag = syscall.RTNH_F_ONLINK
+ FLAG_PERVASIVE NextHopFlag = syscall.RTNH_F_PERVASIVE
+)
+
+var testFlags = []flagString{
+ {f: FLAG_ONLINK, s: "onlink"},
+ {f: FLAG_PERVASIVE, s: "pervasive"},
+}
+
+func listFlags(flag int) []string {
+ var flags []string
+ for _, tf := range testFlags {
+ if flag&int(tf.f) != 0 {
+ flags = append(flags, tf.s)
+ }
+ }
+ return flags
+}
+
+func (r *Route) ListFlags() []string {
+ return listFlags(r.Flags)
+}
+
+func (n *NexthopInfo) ListFlags() []string {
+ return listFlags(n.Flags)
+}
+
+type MPLSDestination struct {
+ Labels []int
+}
+
+func (d *MPLSDestination) Family() int {
+ return nl.FAMILY_MPLS
+}
+
+func (d *MPLSDestination) Decode(buf []byte) error {
+ d.Labels = nl.DecodeMPLSStack(buf)
+ return nil
+}
+
+func (d *MPLSDestination) Encode() ([]byte, error) {
+ return nl.EncodeMPLSStack(d.Labels...), nil
+}
+
+func (d *MPLSDestination) String() string {
+ s := make([]string, 0, len(d.Labels))
+ for _, l := range d.Labels {
+ s = append(s, fmt.Sprintf("%d", l))
+ }
+ return strings.Join(s, "/")
+}
+
+type MPLSEncap struct {
+ Labels []int
+}
+
+func (e *MPLSEncap) Type() int {
+ return nl.LWTUNNEL_ENCAP_MPLS
+}
+
+func (e *MPLSEncap) Decode(buf []byte) error {
+ if len(buf) < 4 {
+ return fmt.Errorf("Lack of bytes")
+ }
+ native := nl.NativeEndian()
+ l := native.Uint16(buf)
+ if len(buf) < int(l) {
+ return fmt.Errorf("Lack of bytes")
+ }
+ buf = buf[:l]
+ typ := native.Uint16(buf[2:])
+ if typ != nl.MPLS_IPTUNNEL_DST {
+ return fmt.Errorf("Unknown MPLS Encap Type: %d", typ)
+ }
+ e.Labels = nl.DecodeMPLSStack(buf[4:])
+ return nil
+}
+
+func (e *MPLSEncap) Encode() ([]byte, error) {
+ s := nl.EncodeMPLSStack(e.Labels...)
+ native := nl.NativeEndian()
+ hdr := make([]byte, 4)
+ native.PutUint16(hdr, uint16(len(s)+4))
+ native.PutUint16(hdr[2:], nl.MPLS_IPTUNNEL_DST)
+ return append(hdr, s...), nil
+}
+
+func (e *MPLSEncap) String() string {
+ s := make([]string, 0, len(e.Labels))
+ for _, l := range e.Labels {
+ s = append(s, fmt.Sprintf("%d", l))
+ }
+ return strings.Join(s, "/")
+}
+
+// RouteAdd will add a route to the system.
+// Equivalent to: `ip route add $route`
+func RouteAdd(route *Route) error {
+ return pkgHandle.RouteAdd(route)
+}
+
+// RouteAdd will add a route to the system.
+// Equivalent to: `ip route add $route`
+func (h *Handle) RouteAdd(route *Route) error {
+ flags := syscall.NLM_F_CREATE | syscall.NLM_F_EXCL | syscall.NLM_F_ACK
+ req := h.newNetlinkRequest(syscall.RTM_NEWROUTE, flags)
+ return h.routeHandle(route, req, nl.NewRtMsg())
+}
+
+// RouteReplace will add a route to the system.
+// Equivalent to: `ip route replace $route`
+func RouteReplace(route *Route) error {
+ return pkgHandle.RouteReplace(route)
+}
+
+// RouteReplace will add a route to the system.
+// Equivalent to: `ip route replace $route`
+func (h *Handle) RouteReplace(route *Route) error {
+ flags := syscall.NLM_F_CREATE | syscall.NLM_F_REPLACE | syscall.NLM_F_ACK
+ req := h.newNetlinkRequest(syscall.RTM_NEWROUTE, flags)
+ return h.routeHandle(route, req, nl.NewRtMsg())
+}
+
+// RouteDel will delete a route from the system.
+// Equivalent to: `ip route del $route`
+func RouteDel(route *Route) error {
+ return pkgHandle.RouteDel(route)
+}
+
+// RouteDel will delete a route from the system.
+// Equivalent to: `ip route del $route`
+func (h *Handle) RouteDel(route *Route) error {
+ req := h.newNetlinkRequest(syscall.RTM_DELROUTE, syscall.NLM_F_ACK)
+ return h.routeHandle(route, req, nl.NewRtDelMsg())
+}
+
+func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) error {
+ if (route.Dst == nil || route.Dst.IP == nil) && route.Src == nil && route.Gw == nil && route.MPLSDst == nil {
+ return fmt.Errorf("one of Dst.IP, Src, or Gw must not be nil")
+ }
+
+ family := -1
+ var rtAttrs []*nl.RtAttr
+
+ if route.Dst != nil && route.Dst.IP != nil {
+ dstLen, _ := route.Dst.Mask.Size()
+ msg.Dst_len = uint8(dstLen)
+ dstFamily := nl.GetIPFamily(route.Dst.IP)
+ family = dstFamily
+ var dstData []byte
+ if dstFamily == FAMILY_V4 {
+ dstData = route.Dst.IP.To4()
+ } else {
+ dstData = route.Dst.IP.To16()
+ }
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, dstData))
+ } else if route.MPLSDst != nil {
+ family = nl.FAMILY_MPLS
+ msg.Dst_len = uint8(20)
+ msg.Type = syscall.RTN_UNICAST
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, nl.EncodeMPLSStack(*route.MPLSDst)))
+ }
+
+ if route.NewDst != nil {
+ if family != -1 && family != route.NewDst.Family() {
+ return fmt.Errorf("new destination and destination are not the same address family")
+ }
+ buf, err := route.NewDst.Encode()
+ if err != nil {
+ return err
+ }
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_NEWDST, buf))
+ }
+
+ if route.Encap != nil {
+ buf := make([]byte, 2)
+ native.PutUint16(buf, uint16(route.Encap.Type()))
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_ENCAP_TYPE, buf))
+ buf, err := route.Encap.Encode()
+ if err != nil {
+ return err
+ }
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_ENCAP, buf))
+ }
+
+ if route.Src != nil {
+ srcFamily := nl.GetIPFamily(route.Src)
+ if family != -1 && family != srcFamily {
+ return fmt.Errorf("source and destination ip are not the same IP family")
+ }
+ family = srcFamily
+ var srcData []byte
+ if srcFamily == FAMILY_V4 {
+ srcData = route.Src.To4()
+ } else {
+ srcData = route.Src.To16()
+ }
+ // The commonly used src ip for routes is actually PREFSRC
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_PREFSRC, srcData))
+ }
+
+ if route.Gw != nil {
+ gwFamily := nl.GetIPFamily(route.Gw)
+ if family != -1 && family != gwFamily {
+ return fmt.Errorf("gateway, source, and destination ip are not the same IP family")
+ }
+ family = gwFamily
+ var gwData []byte
+ if gwFamily == FAMILY_V4 {
+ gwData = route.Gw.To4()
+ } else {
+ gwData = route.Gw.To16()
+ }
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_GATEWAY, gwData))
+ }
+
+ if len(route.MultiPath) > 0 {
+ buf := []byte{}
+ for _, nh := range route.MultiPath {
+ rtnh := &nl.RtNexthop{
+ RtNexthop: syscall.RtNexthop{
+ Hops: uint8(nh.Hops),
+ Ifindex: int32(nh.LinkIndex),
+ Flags: uint8(nh.Flags),
+ },
+ }
+ children := []nl.NetlinkRequestData{}
+ if nh.Gw != nil {
+ gwFamily := nl.GetIPFamily(nh.Gw)
+ if family != -1 && family != gwFamily {
+ return fmt.Errorf("gateway, source, and destination ip are not the same IP family")
+ }
+ if gwFamily == FAMILY_V4 {
+ children = append(children, nl.NewRtAttr(syscall.RTA_GATEWAY, []byte(nh.Gw.To4())))
+ } else {
+ children = append(children, nl.NewRtAttr(syscall.RTA_GATEWAY, []byte(nh.Gw.To16())))
+ }
+ }
+ if nh.NewDst != nil {
+ if family != -1 && family != nh.NewDst.Family() {
+ return fmt.Errorf("new destination and destination are not the same address family")
+ }
+ buf, err := nh.NewDst.Encode()
+ if err != nil {
+ return err
+ }
+ children = append(children, nl.NewRtAttr(nl.RTA_NEWDST, buf))
+ }
+ if nh.Encap != nil {
+ buf := make([]byte, 2)
+ native.PutUint16(buf, uint16(nh.Encap.Type()))
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_ENCAP_TYPE, buf))
+ buf, err := nh.Encap.Encode()
+ if err != nil {
+ return err
+ }
+ children = append(children, nl.NewRtAttr(nl.RTA_ENCAP, buf))
+ }
+ rtnh.Children = children
+ buf = append(buf, rtnh.Serialize()...)
+ }
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_MULTIPATH, buf))
+ }
+
+ if route.Table > 0 {
+ if route.Table >= 256 {
+ msg.Table = syscall.RT_TABLE_UNSPEC
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(route.Table))
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_TABLE, b))
+ } else {
+ msg.Table = uint8(route.Table)
+ }
+ }
+
+ if route.Priority > 0 {
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(route.Priority))
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_PRIORITY, b))
+ }
+ if route.Tos > 0 {
+ msg.Tos = uint8(route.Tos)
+ }
+ if route.Protocol > 0 {
+ msg.Protocol = uint8(route.Protocol)
+ }
+ if route.Type > 0 {
+ msg.Type = uint8(route.Type)
+ }
+
+ msg.Flags = uint32(route.Flags)
+ msg.Scope = uint8(route.Scope)
+ msg.Family = uint8(family)
+ req.AddData(msg)
+ for _, attr := range rtAttrs {
+ req.AddData(attr)
+ }
+
+ var (
+ b = make([]byte, 4)
+ native = nl.NativeEndian()
+ )
+ native.PutUint32(b, uint32(route.LinkIndex))
+
+ req.AddData(nl.NewRtAttr(syscall.RTA_OIF, b))
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// RouteList gets a list of routes in the system.
+// Equivalent to: `ip route show`.
+// The list can be filtered by link and ip family.
+func RouteList(link Link, family int) ([]Route, error) {
+ return pkgHandle.RouteList(link, family)
+}
+
+// RouteList gets a list of routes in the system.
+// Equivalent to: `ip route show`.
+// The list can be filtered by link and ip family.
+func (h *Handle) RouteList(link Link, family int) ([]Route, error) {
+ var routeFilter *Route
+ if link != nil {
+ routeFilter = &Route{
+ LinkIndex: link.Attrs().Index,
+ }
+ }
+ return h.RouteListFiltered(family, routeFilter, RT_FILTER_OIF)
+}
+
+// RouteListFiltered gets a list of routes in the system filtered with specified rules.
+// All rules must be defined in RouteFilter struct
+func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) {
+ return pkgHandle.RouteListFiltered(family, filter, filterMask)
+}
+
+// RouteListFiltered gets a list of routes in the system filtered with specified rules.
+// All rules must be defined in RouteFilter struct
+func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP)
+ infmsg := nl.NewIfInfomsg(family)
+ req.AddData(infmsg)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE)
+ if err != nil {
+ return nil, err
+ }
+
+ var res []Route
+ for _, m := range msgs {
+ msg := nl.DeserializeRtMsg(m)
+ if msg.Flags&syscall.RTM_F_CLONED != 0 {
+ // Ignore cloned routes
+ continue
+ }
+ if msg.Table != syscall.RT_TABLE_MAIN {
+ if filter == nil || filter != nil && filterMask&RT_FILTER_TABLE == 0 {
+ // Ignore non-main tables
+ continue
+ }
+ }
+ route, err := deserializeRoute(m)
+ if err != nil {
+ return nil, err
+ }
+ if filter != nil {
+ switch {
+ case filterMask&RT_FILTER_TABLE != 0 && filter.Table != syscall.RT_TABLE_UNSPEC && route.Table != filter.Table:
+ continue
+ case filterMask&RT_FILTER_PROTOCOL != 0 && route.Protocol != filter.Protocol:
+ continue
+ case filterMask&RT_FILTER_SCOPE != 0 && route.Scope != filter.Scope:
+ continue
+ case filterMask&RT_FILTER_TYPE != 0 && route.Type != filter.Type:
+ continue
+ case filterMask&RT_FILTER_TOS != 0 && route.Tos != filter.Tos:
+ continue
+ case filterMask&RT_FILTER_OIF != 0 && route.LinkIndex != filter.LinkIndex:
+ continue
+ case filterMask&RT_FILTER_IIF != 0 && route.ILinkIndex != filter.ILinkIndex:
+ continue
+ case filterMask&RT_FILTER_GW != 0 && !route.Gw.Equal(filter.Gw):
+ continue
+ case filterMask&RT_FILTER_SRC != 0 && !route.Src.Equal(filter.Src):
+ continue
+ case filterMask&RT_FILTER_DST != 0:
+ if filter.MPLSDst == nil || route.MPLSDst == nil || (*filter.MPLSDst) != (*route.MPLSDst) {
+ if filter.Dst == nil {
+ if route.Dst != nil {
+ continue
+ }
+ } else {
+ if route.Dst == nil {
+ continue
+ }
+ aMaskLen, aMaskBits := route.Dst.Mask.Size()
+ bMaskLen, bMaskBits := filter.Dst.Mask.Size()
+ if !(route.Dst.IP.Equal(filter.Dst.IP) && aMaskLen == bMaskLen && aMaskBits == bMaskBits) {
+ continue
+ }
+ }
+ }
+ }
+ }
+ res = append(res, route)
+ }
+ return res, nil
+}
+
+// deserializeRoute decodes a binary netlink message into a Route struct
+func deserializeRoute(m []byte) (Route, error) {
+ msg := nl.DeserializeRtMsg(m)
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return Route{}, err
+ }
+ route := Route{
+ Scope: Scope(msg.Scope),
+ Protocol: int(msg.Protocol),
+ Table: int(msg.Table),
+ Type: int(msg.Type),
+ Tos: int(msg.Tos),
+ Flags: int(msg.Flags),
+ }
+
+ native := nl.NativeEndian()
+ var encap, encapType syscall.NetlinkRouteAttr
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case syscall.RTA_GATEWAY:
+ route.Gw = net.IP(attr.Value)
+ case syscall.RTA_PREFSRC:
+ route.Src = net.IP(attr.Value)
+ case syscall.RTA_DST:
+ if msg.Family == nl.FAMILY_MPLS {
+ stack := nl.DecodeMPLSStack(attr.Value)
+ if len(stack) == 0 || len(stack) > 1 {
+ return route, fmt.Errorf("invalid MPLS RTA_DST")
+ }
+ route.MPLSDst = &stack[0]
+ } else {
+ route.Dst = &net.IPNet{
+ IP: attr.Value,
+ Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attr.Value)),
+ }
+ }
+ case syscall.RTA_OIF:
+ route.LinkIndex = int(native.Uint32(attr.Value[0:4]))
+ case syscall.RTA_IIF:
+ route.ILinkIndex = int(native.Uint32(attr.Value[0:4]))
+ case syscall.RTA_PRIORITY:
+ route.Priority = int(native.Uint32(attr.Value[0:4]))
+ case syscall.RTA_TABLE:
+ route.Table = int(native.Uint32(attr.Value[0:4]))
+ case syscall.RTA_MULTIPATH:
+ parseRtNexthop := func(value []byte) (*NexthopInfo, []byte, error) {
+ if len(value) < syscall.SizeofRtNexthop {
+ return nil, nil, fmt.Errorf("Lack of bytes")
+ }
+ nh := nl.DeserializeRtNexthop(value)
+ if len(value) < int(nh.RtNexthop.Len) {
+ return nil, nil, fmt.Errorf("Lack of bytes")
+ }
+ info := &NexthopInfo{
+ LinkIndex: int(nh.RtNexthop.Ifindex),
+ Hops: int(nh.RtNexthop.Hops),
+ Flags: int(nh.RtNexthop.Flags),
+ }
+ attrs, err := nl.ParseRouteAttr(value[syscall.SizeofRtNexthop:int(nh.RtNexthop.Len)])
+ if err != nil {
+ return nil, nil, err
+ }
+ var encap, encapType syscall.NetlinkRouteAttr
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case syscall.RTA_GATEWAY:
+ info.Gw = net.IP(attr.Value)
+ case nl.RTA_NEWDST:
+ var d Destination
+ switch msg.Family {
+ case nl.FAMILY_MPLS:
+ d = &MPLSDestination{}
+ }
+ if err := d.Decode(attr.Value); err != nil {
+ return nil, nil, err
+ }
+ info.NewDst = d
+ case nl.RTA_ENCAP_TYPE:
+ encapType = attr
+ case nl.RTA_ENCAP:
+ encap = attr
+ }
+ }
+
+ if len(encap.Value) != 0 && len(encapType.Value) != 0 {
+ typ := int(native.Uint16(encapType.Value[0:2]))
+ var e Encap
+ switch typ {
+ case nl.LWTUNNEL_ENCAP_MPLS:
+ e = &MPLSEncap{}
+ if err := e.Decode(encap.Value); err != nil {
+ return nil, nil, err
+ }
+ }
+ info.Encap = e
+ }
+
+ return info, value[int(nh.RtNexthop.Len):], nil
+ }
+ rest := attr.Value
+ for len(rest) > 0 {
+ info, buf, err := parseRtNexthop(rest)
+ if err != nil {
+ return route, err
+ }
+ route.MultiPath = append(route.MultiPath, info)
+ rest = buf
+ }
+ case nl.RTA_NEWDST:
+ var d Destination
+ switch msg.Family {
+ case nl.FAMILY_MPLS:
+ d = &MPLSDestination{}
+ }
+ if err := d.Decode(attr.Value); err != nil {
+ return route, err
+ }
+ route.NewDst = d
+ case nl.RTA_ENCAP_TYPE:
+ encapType = attr
+ case nl.RTA_ENCAP:
+ encap = attr
+ }
+ }
+
+ if len(encap.Value) != 0 && len(encapType.Value) != 0 {
+ typ := int(native.Uint16(encapType.Value[0:2]))
+ var e Encap
+ switch typ {
+ case nl.LWTUNNEL_ENCAP_MPLS:
+ e = &MPLSEncap{}
+ if err := e.Decode(encap.Value); err != nil {
+ return route, err
+ }
+ }
+ route.Encap = e
+ }
+
+ return route, nil
+}
+
+// RouteGet gets a route to a specific destination from the host system.
+// Equivalent to: 'ip route get'.
+func RouteGet(destination net.IP) ([]Route, error) {
+ return pkgHandle.RouteGet(destination)
+}
+
+// RouteGet gets a route to a specific destination from the host system.
+// Equivalent to: 'ip route get'.
+func (h *Handle) RouteGet(destination net.IP) ([]Route, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_REQUEST)
+ family := nl.GetIPFamily(destination)
+ var destinationData []byte
+ var bitlen uint8
+ if family == FAMILY_V4 {
+ destinationData = destination.To4()
+ bitlen = 32
+ } else {
+ destinationData = destination.To16()
+ bitlen = 128
+ }
+ msg := &nl.RtMsg{}
+ msg.Family = uint8(family)
+ msg.Dst_len = bitlen
+ req.AddData(msg)
+
+ rtaDst := nl.NewRtAttr(syscall.RTA_DST, destinationData)
+ req.AddData(rtaDst)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE)
+ if err != nil {
+ return nil, err
+ }
+
+ var res []Route
+ for _, m := range msgs {
+ route, err := deserializeRoute(m)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, route)
+ }
+ return res, nil
+
+}
+
+// RouteSubscribe takes a chan down which notifications will be sent
+// when routes are added or deleted. Close the 'done' chan to stop subscription.
+func RouteSubscribe(ch chan<- RouteUpdate, done <-chan struct{}) error {
+ return routeSubscribeAt(netns.None(), netns.None(), ch, done)
+}
+
+// RouteSubscribeAt works like RouteSubscribe plus it allows the caller
+// to choose the network namespace in which to subscribe (ns).
+func RouteSubscribeAt(ns netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}) error {
+ return routeSubscribeAt(ns, netns.None(), ch, done)
+}
+
+func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}) error {
+ s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_ROUTE, syscall.RTNLGRP_IPV6_ROUTE)
+ if err != nil {
+ return err
+ }
+ if done != nil {
+ go func() {
+ <-done
+ s.Close()
+ }()
+ }
+ go func() {
+ defer close(ch)
+ for {
+ msgs, err := s.Receive()
+ if err != nil {
+ return
+ }
+ for _, m := range msgs {
+ route, err := deserializeRoute(m.Data)
+ if err != nil {
+ return
+ }
+ ch <- RouteUpdate{Type: m.Header.Type, Route: route}
+ }
+ }
+ }()
+
+ return nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/route_unspecified.go b/vendor/github.com/vishvananda/netlink/route_unspecified.go
new file mode 100644
index 000000000..2701862b4
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/route_unspecified.go
@@ -0,0 +1,11 @@
+// +build !linux
+
+package netlink
+
+func (r *Route) ListFlags() []string {
+ return []string{}
+}
+
+func (n *NexthopInfo) ListFlags() []string {
+ return []string{}
+}
diff --git a/vendor/github.com/vishvananda/netlink/rule.go b/vendor/github.com/vishvananda/netlink/rule.go
new file mode 100644
index 000000000..f0243defd
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/rule.go
@@ -0,0 +1,40 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+)
+
+// Rule represents a netlink rule.
+type Rule struct {
+ Priority int
+ Table int
+ Mark int
+ Mask int
+ TunID uint
+ Goto int
+ Src *net.IPNet
+ Dst *net.IPNet
+ Flow int
+ IifName string
+ OifName string
+ SuppressIfgroup int
+ SuppressPrefixlen int
+}
+
+func (r Rule) String() string {
+ return fmt.Sprintf("ip rule %d: from %s table %d", r.Priority, r.Src, r.Table)
+}
+
+// NewRule return empty rules.
+func NewRule() *Rule {
+ return &Rule{
+ SuppressIfgroup: -1,
+ SuppressPrefixlen: -1,
+ Priority: -1,
+ Mark: -1,
+ Mask: -1,
+ Goto: -1,
+ Flow: -1,
+ }
+}
diff --git a/vendor/github.com/vishvananda/netlink/rule_linux.go b/vendor/github.com/vishvananda/netlink/rule_linux.go
new file mode 100644
index 000000000..f9cdc855f
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/rule_linux.go
@@ -0,0 +1,221 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+// RuleAdd adds a rule to the system.
+// Equivalent to: ip rule add
+func RuleAdd(rule *Rule) error {
+ return pkgHandle.RuleAdd(rule)
+}
+
+// RuleAdd adds a rule to the system.
+// Equivalent to: ip rule add
+func (h *Handle) RuleAdd(rule *Rule) error {
+ req := h.newNetlinkRequest(syscall.RTM_NEWRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ return ruleHandle(rule, req)
+}
+
+// RuleDel deletes a rule from the system.
+// Equivalent to: ip rule del
+func RuleDel(rule *Rule) error {
+ return pkgHandle.RuleDel(rule)
+}
+
+// RuleDel deletes a rule from the system.
+// Equivalent to: ip rule del
+func (h *Handle) RuleDel(rule *Rule) error {
+ req := h.newNetlinkRequest(syscall.RTM_DELRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ return ruleHandle(rule, req)
+}
+
+func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error {
+ msg := nl.NewRtMsg()
+ msg.Family = syscall.AF_INET
+ var dstFamily uint8
+
+ var rtAttrs []*nl.RtAttr
+ if rule.Dst != nil && rule.Dst.IP != nil {
+ dstLen, _ := rule.Dst.Mask.Size()
+ msg.Dst_len = uint8(dstLen)
+ msg.Family = uint8(nl.GetIPFamily(rule.Dst.IP))
+ dstFamily = msg.Family
+ var dstData []byte
+ if msg.Family == syscall.AF_INET {
+ dstData = rule.Dst.IP.To4()
+ } else {
+ dstData = rule.Dst.IP.To16()
+ }
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, dstData))
+ }
+
+ if rule.Src != nil && rule.Src.IP != nil {
+ msg.Family = uint8(nl.GetIPFamily(rule.Src.IP))
+ if dstFamily != 0 && dstFamily != msg.Family {
+ return fmt.Errorf("source and destination ip are not the same IP family")
+ }
+ srcLen, _ := rule.Src.Mask.Size()
+ msg.Src_len = uint8(srcLen)
+ var srcData []byte
+ if msg.Family == syscall.AF_INET {
+ srcData = rule.Src.IP.To4()
+ } else {
+ srcData = rule.Src.IP.To16()
+ }
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_SRC, srcData))
+ }
+
+ if rule.Table >= 0 {
+ msg.Table = uint8(rule.Table)
+ if rule.Table >= 256 {
+ msg.Table = syscall.RT_TABLE_UNSPEC
+ }
+ }
+
+ req.AddData(msg)
+ for i := range rtAttrs {
+ req.AddData(rtAttrs[i])
+ }
+
+ native := nl.NativeEndian()
+
+ if rule.Priority >= 0 {
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(rule.Priority))
+ req.AddData(nl.NewRtAttr(nl.FRA_PRIORITY, b))
+ }
+ if rule.Mark >= 0 {
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(rule.Mark))
+ req.AddData(nl.NewRtAttr(nl.FRA_FWMARK, b))
+ }
+ if rule.Mask >= 0 {
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(rule.Mask))
+ req.AddData(nl.NewRtAttr(nl.FRA_FWMASK, b))
+ }
+ if rule.Flow >= 0 {
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(rule.Flow))
+ req.AddData(nl.NewRtAttr(nl.FRA_FLOW, b))
+ }
+ if rule.TunID > 0 {
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(rule.TunID))
+ req.AddData(nl.NewRtAttr(nl.FRA_TUN_ID, b))
+ }
+ if rule.Table >= 256 {
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(rule.Table))
+ req.AddData(nl.NewRtAttr(nl.FRA_TABLE, b))
+ }
+ if msg.Table > 0 {
+ if rule.SuppressPrefixlen >= 0 {
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(rule.SuppressPrefixlen))
+ req.AddData(nl.NewRtAttr(nl.FRA_SUPPRESS_PREFIXLEN, b))
+ }
+ if rule.SuppressIfgroup >= 0 {
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(rule.SuppressIfgroup))
+ req.AddData(nl.NewRtAttr(nl.FRA_SUPPRESS_IFGROUP, b))
+ }
+ }
+ if rule.IifName != "" {
+ req.AddData(nl.NewRtAttr(nl.FRA_IIFNAME, []byte(rule.IifName)))
+ }
+ if rule.OifName != "" {
+ req.AddData(nl.NewRtAttr(nl.FRA_OIFNAME, []byte(rule.OifName)))
+ }
+ if rule.Goto >= 0 {
+ msg.Type = nl.FR_ACT_NOP
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(rule.Goto))
+ req.AddData(nl.NewRtAttr(nl.FRA_GOTO, b))
+ }
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// RuleList lists rules in the system.
+// Equivalent to: ip rule list
+func RuleList(family int) ([]Rule, error) {
+ return pkgHandle.RuleList(family)
+}
+
+// RuleList lists rules in the system.
+// Equivalent to: ip rule list
+func (h *Handle) RuleList(family int) ([]Rule, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETRULE, syscall.NLM_F_DUMP|syscall.NLM_F_REQUEST)
+ msg := nl.NewIfInfomsg(family)
+ req.AddData(msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWRULE)
+ if err != nil {
+ return nil, err
+ }
+
+ native := nl.NativeEndian()
+ var res = make([]Rule, 0)
+ for i := range msgs {
+ msg := nl.DeserializeRtMsg(msgs[i])
+ attrs, err := nl.ParseRouteAttr(msgs[i][msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ rule := NewRule()
+
+ for j := range attrs {
+ switch attrs[j].Attr.Type {
+ case syscall.RTA_TABLE:
+ rule.Table = int(native.Uint32(attrs[j].Value[0:4]))
+ case nl.FRA_SRC:
+ rule.Src = &net.IPNet{
+ IP: attrs[j].Value,
+ Mask: net.CIDRMask(int(msg.Src_len), 8*len(attrs[j].Value)),
+ }
+ case nl.FRA_DST:
+ rule.Dst = &net.IPNet{
+ IP: attrs[j].Value,
+ Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attrs[j].Value)),
+ }
+ case nl.FRA_FWMARK:
+ rule.Mark = int(native.Uint32(attrs[j].Value[0:4]))
+ case nl.FRA_FWMASK:
+ rule.Mask = int(native.Uint32(attrs[j].Value[0:4]))
+ case nl.FRA_TUN_ID:
+ rule.TunID = uint(native.Uint64(attrs[j].Value[0:4]))
+ case nl.FRA_IIFNAME:
+ rule.IifName = string(attrs[j].Value[:len(attrs[j].Value)-1])
+ case nl.FRA_OIFNAME:
+ rule.OifName = string(attrs[j].Value[:len(attrs[j].Value)-1])
+ case nl.FRA_SUPPRESS_PREFIXLEN:
+ i := native.Uint32(attrs[j].Value[0:4])
+ if i != 0xffffffff {
+ rule.SuppressPrefixlen = int(i)
+ }
+ case nl.FRA_SUPPRESS_IFGROUP:
+ i := native.Uint32(attrs[j].Value[0:4])
+ if i != 0xffffffff {
+ rule.SuppressIfgroup = int(i)
+ }
+ case nl.FRA_FLOW:
+ rule.Flow = int(native.Uint32(attrs[j].Value[0:4]))
+ case nl.FRA_GOTO:
+ rule.Goto = int(native.Uint32(attrs[j].Value[0:4]))
+ case nl.FRA_PRIORITY:
+ rule.Priority = int(native.Uint32(attrs[j].Value[0:4]))
+ }
+ }
+ res = append(res, *rule)
+ }
+
+ return res, nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/socket.go b/vendor/github.com/vishvananda/netlink/socket.go
new file mode 100644
index 000000000..41aa72624
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/socket.go
@@ -0,0 +1,27 @@
+package netlink
+
+import "net"
+
+// SocketID identifies a single socket.
+type SocketID struct {
+ SourcePort uint16
+ DestinationPort uint16
+ Source net.IP
+ Destination net.IP
+ Interface uint32
+ Cookie [2]uint32
+}
+
+// Socket represents a netlink socket.
+type Socket struct {
+ Family uint8
+ State uint8
+ Timer uint8
+ Retrans uint8
+ ID SocketID
+ Expires uint32
+ RQueue uint32
+ WQueue uint32
+ UID uint32
+ INode uint32
+}
diff --git a/vendor/github.com/vishvananda/netlink/socket_linux.go b/vendor/github.com/vishvananda/netlink/socket_linux.go
new file mode 100644
index 000000000..b42b84f0c
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/socket_linux.go
@@ -0,0 +1,159 @@
+package netlink
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+const (
+ sizeofSocketID = 0x30
+ sizeofSocketRequest = sizeofSocketID + 0x8
+ sizeofSocket = sizeofSocketID + 0x18
+)
+
+type socketRequest struct {
+ Family uint8
+ Protocol uint8
+ Ext uint8
+ pad uint8
+ States uint32
+ ID SocketID
+}
+
+type writeBuffer struct {
+ Bytes []byte
+ pos int
+}
+
+func (b *writeBuffer) Write(c byte) {
+ b.Bytes[b.pos] = c
+ b.pos++
+}
+
+func (b *writeBuffer) Next(n int) []byte {
+ s := b.Bytes[b.pos : b.pos+n]
+ b.pos += n
+ return s
+}
+
+func (r *socketRequest) Serialize() []byte {
+ b := writeBuffer{Bytes: make([]byte, sizeofSocketRequest)}
+ b.Write(r.Family)
+ b.Write(r.Protocol)
+ b.Write(r.Ext)
+ b.Write(r.pad)
+ native.PutUint32(b.Next(4), r.States)
+ networkOrder.PutUint16(b.Next(2), r.ID.SourcePort)
+ networkOrder.PutUint16(b.Next(2), r.ID.DestinationPort)
+ copy(b.Next(4), r.ID.Source.To4())
+ b.Next(12)
+ copy(b.Next(4), r.ID.Destination.To4())
+ b.Next(12)
+ native.PutUint32(b.Next(4), r.ID.Interface)
+ native.PutUint32(b.Next(4), r.ID.Cookie[0])
+ native.PutUint32(b.Next(4), r.ID.Cookie[1])
+ return b.Bytes
+}
+
+func (r *socketRequest) Len() int { return sizeofSocketRequest }
+
+type readBuffer struct {
+ Bytes []byte
+ pos int
+}
+
+func (b *readBuffer) Read() byte {
+ c := b.Bytes[b.pos]
+ b.pos++
+ return c
+}
+
+func (b *readBuffer) Next(n int) []byte {
+ s := b.Bytes[b.pos : b.pos+n]
+ b.pos += n
+ return s
+}
+
+func (s *Socket) deserialize(b []byte) error {
+ if len(b) < sizeofSocket {
+ return fmt.Errorf("socket data short read (%d); want %d", len(b), sizeofSocket)
+ }
+ rb := readBuffer{Bytes: b}
+ s.Family = rb.Read()
+ s.State = rb.Read()
+ s.Timer = rb.Read()
+ s.Retrans = rb.Read()
+ s.ID.SourcePort = networkOrder.Uint16(rb.Next(2))
+ s.ID.DestinationPort = networkOrder.Uint16(rb.Next(2))
+ s.ID.Source = net.IPv4(rb.Read(), rb.Read(), rb.Read(), rb.Read())
+ rb.Next(12)
+ s.ID.Destination = net.IPv4(rb.Read(), rb.Read(), rb.Read(), rb.Read())
+ rb.Next(12)
+ s.ID.Interface = native.Uint32(rb.Next(4))
+ s.ID.Cookie[0] = native.Uint32(rb.Next(4))
+ s.ID.Cookie[1] = native.Uint32(rb.Next(4))
+ s.Expires = native.Uint32(rb.Next(4))
+ s.RQueue = native.Uint32(rb.Next(4))
+ s.WQueue = native.Uint32(rb.Next(4))
+ s.UID = native.Uint32(rb.Next(4))
+ s.INode = native.Uint32(rb.Next(4))
+ return nil
+}
+
+// SocketGet returns the Socket identified by its local and remote addresses.
+func SocketGet(local, remote net.Addr) (*Socket, error) {
+ localTCP, ok := local.(*net.TCPAddr)
+ if !ok {
+ return nil, ErrNotImplemented
+ }
+ remoteTCP, ok := remote.(*net.TCPAddr)
+ if !ok {
+ return nil, ErrNotImplemented
+ }
+ localIP := localTCP.IP.To4()
+ if localIP == nil {
+ return nil, ErrNotImplemented
+ }
+ remoteIP := remoteTCP.IP.To4()
+ if remoteIP == nil {
+ return nil, ErrNotImplemented
+ }
+
+ s, err := nl.Subscribe(syscall.NETLINK_INET_DIAG)
+ if err != nil {
+ return nil, err
+ }
+ defer s.Close()
+ req := nl.NewNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, 0)
+ req.AddData(&socketRequest{
+ Family: syscall.AF_INET,
+ Protocol: syscall.IPPROTO_TCP,
+ ID: SocketID{
+ SourcePort: uint16(localTCP.Port),
+ DestinationPort: uint16(remoteTCP.Port),
+ Source: localIP,
+ Destination: remoteIP,
+ Cookie: [2]uint32{nl.TCPDIAG_NOCOOKIE, nl.TCPDIAG_NOCOOKIE},
+ },
+ })
+ s.Send(req)
+ msgs, err := s.Receive()
+ if err != nil {
+ return nil, err
+ }
+ if len(msgs) == 0 {
+ return nil, errors.New("no message nor error from netlink")
+ }
+ if len(msgs) > 2 {
+ return nil, fmt.Errorf("multiple (%d) matching sockets", len(msgs))
+ }
+ sock := &Socket{}
+ if err := sock.deserialize(msgs[0].Data); err != nil {
+ return nil, err
+ }
+ return sock, nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/xfrm.go b/vendor/github.com/vishvananda/netlink/xfrm.go
new file mode 100644
index 000000000..9962dcf70
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/xfrm.go
@@ -0,0 +1,74 @@
+package netlink
+
+import (
+ "fmt"
+ "syscall"
+)
+
+// Proto is an enum representing an ipsec protocol.
+type Proto uint8
+
+const (
+ XFRM_PROTO_ROUTE2 Proto = syscall.IPPROTO_ROUTING
+ XFRM_PROTO_ESP Proto = syscall.IPPROTO_ESP
+ XFRM_PROTO_AH Proto = syscall.IPPROTO_AH
+ XFRM_PROTO_HAO Proto = syscall.IPPROTO_DSTOPTS
+ XFRM_PROTO_COMP Proto = 0x6c // NOTE not defined on darwin
+ XFRM_PROTO_IPSEC_ANY Proto = syscall.IPPROTO_RAW
+)
+
+func (p Proto) String() string {
+ switch p {
+ case XFRM_PROTO_ROUTE2:
+ return "route2"
+ case XFRM_PROTO_ESP:
+ return "esp"
+ case XFRM_PROTO_AH:
+ return "ah"
+ case XFRM_PROTO_HAO:
+ return "hao"
+ case XFRM_PROTO_COMP:
+ return "comp"
+ case XFRM_PROTO_IPSEC_ANY:
+ return "ipsec-any"
+ }
+ return fmt.Sprintf("%d", p)
+}
+
+// Mode is an enum representing an ipsec transport.
+type Mode uint8
+
+const (
+ XFRM_MODE_TRANSPORT Mode = iota
+ XFRM_MODE_TUNNEL
+ XFRM_MODE_ROUTEOPTIMIZATION
+ XFRM_MODE_IN_TRIGGER
+ XFRM_MODE_BEET
+ XFRM_MODE_MAX
+)
+
+func (m Mode) String() string {
+ switch m {
+ case XFRM_MODE_TRANSPORT:
+ return "transport"
+ case XFRM_MODE_TUNNEL:
+ return "tunnel"
+ case XFRM_MODE_ROUTEOPTIMIZATION:
+ return "ro"
+ case XFRM_MODE_IN_TRIGGER:
+ return "in_trigger"
+ case XFRM_MODE_BEET:
+ return "beet"
+ }
+ return fmt.Sprintf("%d", m)
+}
+
+// XfrmMark represents the mark associated to the state or policy
+type XfrmMark struct {
+ Value uint32
+ Mask uint32
+}
+
+func (m *XfrmMark) String() string {
+ return fmt.Sprintf("(0x%x,0x%x)", m.Value, m.Mask)
+}
diff --git a/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go
new file mode 100644
index 000000000..7b98c9cb6
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go
@@ -0,0 +1,98 @@
+package netlink
+
+import (
+ "fmt"
+ "syscall"
+
+ "github.com/vishvananda/netns"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+type XfrmMsg interface {
+ Type() nl.XfrmMsgType
+}
+
+type XfrmMsgExpire struct {
+ XfrmState *XfrmState
+ Hard bool
+}
+
+func (ue *XfrmMsgExpire) Type() nl.XfrmMsgType {
+ return nl.XFRM_MSG_EXPIRE
+}
+
+func parseXfrmMsgExpire(b []byte) *XfrmMsgExpire {
+ var e XfrmMsgExpire
+
+ msg := nl.DeserializeXfrmUserExpire(b)
+ e.XfrmState = xfrmStateFromXfrmUsersaInfo(&msg.XfrmUsersaInfo)
+ e.Hard = msg.Hard == 1
+
+ return &e
+}
+
+func XfrmMonitor(ch chan<- XfrmMsg, done <-chan struct{}, errorChan chan<- error,
+ types ...nl.XfrmMsgType) error {
+
+ groups, err := xfrmMcastGroups(types)
+ if err != nil {
+ return nil
+ }
+ s, err := nl.SubscribeAt(netns.None(), netns.None(), syscall.NETLINK_XFRM, groups...)
+ if err != nil {
+ return err
+ }
+
+ if done != nil {
+ go func() {
+ <-done
+ s.Close()
+ }()
+
+ }
+
+ go func() {
+ defer close(ch)
+ for {
+ msgs, err := s.Receive()
+ if err != nil {
+ errorChan <- err
+ return
+ }
+ for _, m := range msgs {
+ switch m.Header.Type {
+ case nl.XFRM_MSG_EXPIRE:
+ ch <- parseXfrmMsgExpire(m.Data)
+ default:
+ errorChan <- fmt.Errorf("unsupported msg type: %x", m.Header.Type)
+ }
+ }
+ }
+ }()
+
+ return nil
+}
+
+func xfrmMcastGroups(types []nl.XfrmMsgType) ([]uint, error) {
+ groups := make([]uint, 0)
+
+ if len(types) == 0 {
+ return nil, fmt.Errorf("no xfrm msg type specified")
+ }
+
+ for _, t := range types {
+ var group uint
+
+ switch t {
+ case nl.XFRM_MSG_EXPIRE:
+ group = nl.XFRMNLGRP_EXPIRE
+ default:
+ return nil, fmt.Errorf("unsupported group: %x", t)
+ }
+
+ groups = append(groups, group)
+ }
+
+ return groups, nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy.go b/vendor/github.com/vishvananda/netlink/xfrm_policy.go
new file mode 100644
index 000000000..c97ec43a2
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/xfrm_policy.go
@@ -0,0 +1,74 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+)
+
+// Dir is an enum representing an ipsec template direction.
+type Dir uint8
+
+const (
+ XFRM_DIR_IN Dir = iota
+ XFRM_DIR_OUT
+ XFRM_DIR_FWD
+ XFRM_SOCKET_IN
+ XFRM_SOCKET_OUT
+ XFRM_SOCKET_FWD
+)
+
+func (d Dir) String() string {
+ switch d {
+ case XFRM_DIR_IN:
+ return "dir in"
+ case XFRM_DIR_OUT:
+ return "dir out"
+ case XFRM_DIR_FWD:
+ return "dir fwd"
+ case XFRM_SOCKET_IN:
+ return "socket in"
+ case XFRM_SOCKET_OUT:
+ return "socket out"
+ case XFRM_SOCKET_FWD:
+ return "socket fwd"
+ }
+ return fmt.Sprintf("socket %d", d-XFRM_SOCKET_IN)
+}
+
+// XfrmPolicyTmpl encapsulates a rule for the base addresses of an ipsec
+// policy. These rules are matched with XfrmState to determine encryption
+// and authentication algorithms.
+type XfrmPolicyTmpl struct {
+ Dst net.IP
+ Src net.IP
+ Proto Proto
+ Mode Mode
+ Spi int
+ Reqid int
+}
+
+func (t XfrmPolicyTmpl) String() string {
+ return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, Mode: %s, Spi: 0x%x, Reqid: 0x%x}",
+ t.Dst, t.Src, t.Proto, t.Mode, t.Spi, t.Reqid)
+}
+
+// XfrmPolicy represents an ipsec policy. It represents the overlay network
+// and has a list of XfrmPolicyTmpls representing the base addresses of
+// the policy.
+type XfrmPolicy struct {
+ Dst *net.IPNet
+ Src *net.IPNet
+ Proto Proto
+ DstPort int
+ SrcPort int
+ Dir Dir
+ Priority int
+ Index int
+ Mark *XfrmMark
+ Tmpls []XfrmPolicyTmpl
+}
+
+func (p XfrmPolicy) String() string {
+ return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, DstPort: %d, SrcPort: %d, Dir: %s, Priority: %d, Index: %d, Mark: %s, Tmpls: %s}",
+ p.Dst, p.Src, p.Proto, p.DstPort, p.SrcPort, p.Dir, p.Priority, p.Index, p.Mark, p.Tmpls)
+}
diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go
new file mode 100644
index 000000000..c3d4e4222
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go
@@ -0,0 +1,257 @@
+package netlink
+
+import (
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+func selFromPolicy(sel *nl.XfrmSelector, policy *XfrmPolicy) {
+ sel.Family = uint16(nl.FAMILY_V4)
+ if policy.Dst != nil {
+ sel.Family = uint16(nl.GetIPFamily(policy.Dst.IP))
+ sel.Daddr.FromIP(policy.Dst.IP)
+ prefixlenD, _ := policy.Dst.Mask.Size()
+ sel.PrefixlenD = uint8(prefixlenD)
+ }
+ if policy.Src != nil {
+ sel.Saddr.FromIP(policy.Src.IP)
+ prefixlenS, _ := policy.Src.Mask.Size()
+ sel.PrefixlenS = uint8(prefixlenS)
+ }
+ sel.Proto = uint8(policy.Proto)
+ sel.Dport = nl.Swap16(uint16(policy.DstPort))
+ sel.Sport = nl.Swap16(uint16(policy.SrcPort))
+ if sel.Dport != 0 {
+ sel.DportMask = ^uint16(0)
+ }
+ if sel.Sport != 0 {
+ sel.SportMask = ^uint16(0)
+ }
+}
+
+// XfrmPolicyAdd will add an xfrm policy to the system.
+// Equivalent to: `ip xfrm policy add $policy`
+func XfrmPolicyAdd(policy *XfrmPolicy) error {
+ return pkgHandle.XfrmPolicyAdd(policy)
+}
+
+// XfrmPolicyAdd will add an xfrm policy to the system.
+// Equivalent to: `ip xfrm policy add $policy`
+func (h *Handle) XfrmPolicyAdd(policy *XfrmPolicy) error {
+ return h.xfrmPolicyAddOrUpdate(policy, nl.XFRM_MSG_NEWPOLICY)
+}
+
+// XfrmPolicyUpdate will update an xfrm policy to the system.
+// Equivalent to: `ip xfrm policy update $policy`
+func XfrmPolicyUpdate(policy *XfrmPolicy) error {
+ return pkgHandle.XfrmPolicyUpdate(policy)
+}
+
+// XfrmPolicyUpdate will update an xfrm policy to the system.
+// Equivalent to: `ip xfrm policy update $policy`
+func (h *Handle) XfrmPolicyUpdate(policy *XfrmPolicy) error {
+ return h.xfrmPolicyAddOrUpdate(policy, nl.XFRM_MSG_UPDPOLICY)
+}
+
+func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error {
+ req := h.newNetlinkRequest(nlProto, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+
+ msg := &nl.XfrmUserpolicyInfo{}
+ selFromPolicy(&msg.Sel, policy)
+ msg.Priority = uint32(policy.Priority)
+ msg.Index = uint32(policy.Index)
+ msg.Dir = uint8(policy.Dir)
+ msg.Lft.SoftByteLimit = nl.XFRM_INF
+ msg.Lft.HardByteLimit = nl.XFRM_INF
+ msg.Lft.SoftPacketLimit = nl.XFRM_INF
+ msg.Lft.HardPacketLimit = nl.XFRM_INF
+ req.AddData(msg)
+
+ tmplData := make([]byte, nl.SizeofXfrmUserTmpl*len(policy.Tmpls))
+ for i, tmpl := range policy.Tmpls {
+ start := i * nl.SizeofXfrmUserTmpl
+ userTmpl := nl.DeserializeXfrmUserTmpl(tmplData[start : start+nl.SizeofXfrmUserTmpl])
+ userTmpl.XfrmId.Daddr.FromIP(tmpl.Dst)
+ userTmpl.Saddr.FromIP(tmpl.Src)
+ userTmpl.XfrmId.Proto = uint8(tmpl.Proto)
+ userTmpl.XfrmId.Spi = nl.Swap32(uint32(tmpl.Spi))
+ userTmpl.Mode = uint8(tmpl.Mode)
+ userTmpl.Reqid = uint32(tmpl.Reqid)
+ userTmpl.Aalgos = ^uint32(0)
+ userTmpl.Ealgos = ^uint32(0)
+ userTmpl.Calgos = ^uint32(0)
+ }
+ if len(tmplData) > 0 {
+ tmpls := nl.NewRtAttr(nl.XFRMA_TMPL, tmplData)
+ req.AddData(tmpls)
+ }
+ if policy.Mark != nil {
+ out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(policy.Mark))
+ req.AddData(out)
+ }
+
+ _, err := req.Execute(syscall.NETLINK_XFRM, 0)
+ return err
+}
+
+// XfrmPolicyDel will delete an xfrm policy from the system. Note that
+// the Tmpls are ignored when matching the policy to delete.
+// Equivalent to: `ip xfrm policy del $policy`
+func XfrmPolicyDel(policy *XfrmPolicy) error {
+ return pkgHandle.XfrmPolicyDel(policy)
+}
+
+// XfrmPolicyDel will delete an xfrm policy from the system. Note that
+// the Tmpls are ignored when matching the policy to delete.
+// Equivalent to: `ip xfrm policy del $policy`
+func (h *Handle) XfrmPolicyDel(policy *XfrmPolicy) error {
+ _, err := h.xfrmPolicyGetOrDelete(policy, nl.XFRM_MSG_DELPOLICY)
+ return err
+}
+
+// XfrmPolicyList gets a list of xfrm policies in the system.
+// Equivalent to: `ip xfrm policy show`.
+// The list can be filtered by ip family.
+func XfrmPolicyList(family int) ([]XfrmPolicy, error) {
+ return pkgHandle.XfrmPolicyList(family)
+}
+
+// XfrmPolicyList gets a list of xfrm policies in the system.
+// Equivalent to: `ip xfrm policy show`.
+// The list can be filtered by ip family.
+func (h *Handle) XfrmPolicyList(family int) ([]XfrmPolicy, error) {
+ req := h.newNetlinkRequest(nl.XFRM_MSG_GETPOLICY, syscall.NLM_F_DUMP)
+
+ msg := nl.NewIfInfomsg(family)
+ req.AddData(msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY)
+ if err != nil {
+ return nil, err
+ }
+
+ var res []XfrmPolicy
+ for _, m := range msgs {
+ if policy, err := parseXfrmPolicy(m, family); err == nil {
+ res = append(res, *policy)
+ } else if err == familyError {
+ continue
+ } else {
+ return nil, err
+ }
+ }
+ return res, nil
+}
+
+// XfrmPolicyGet gets a the policy described by the index or selector, if found.
+// Equivalent to: `ip xfrm policy get { SELECTOR | index INDEX } dir DIR [ctx CTX ] [ mark MARK [ mask MASK ] ] [ ptype PTYPE ]`.
+func XfrmPolicyGet(policy *XfrmPolicy) (*XfrmPolicy, error) {
+ return pkgHandle.XfrmPolicyGet(policy)
+}
+
+// XfrmPolicyGet gets a the policy described by the index or selector, if found.
+// Equivalent to: `ip xfrm policy get { SELECTOR | index INDEX } dir DIR [ctx CTX ] [ mark MARK [ mask MASK ] ] [ ptype PTYPE ]`.
+func (h *Handle) XfrmPolicyGet(policy *XfrmPolicy) (*XfrmPolicy, error) {
+ return h.xfrmPolicyGetOrDelete(policy, nl.XFRM_MSG_GETPOLICY)
+}
+
+// XfrmPolicyFlush will flush the policies on the system.
+// Equivalent to: `ip xfrm policy flush`
+func XfrmPolicyFlush() error {
+ return pkgHandle.XfrmPolicyFlush()
+}
+
+// XfrmPolicyFlush will flush the policies on the system.
+// Equivalent to: `ip xfrm policy flush`
+func (h *Handle) XfrmPolicyFlush() error {
+ req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHPOLICY, syscall.NLM_F_ACK)
+ _, err := req.Execute(syscall.NETLINK_XFRM, 0)
+ return err
+}
+
+func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPolicy, error) {
+ req := h.newNetlinkRequest(nlProto, syscall.NLM_F_ACK)
+
+ msg := &nl.XfrmUserpolicyId{}
+ selFromPolicy(&msg.Sel, policy)
+ msg.Index = uint32(policy.Index)
+ msg.Dir = uint8(policy.Dir)
+ req.AddData(msg)
+
+ if policy.Mark != nil {
+ out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(policy.Mark))
+ req.AddData(out)
+ }
+
+ resType := nl.XFRM_MSG_NEWPOLICY
+ if nlProto == nl.XFRM_MSG_DELPOLICY {
+ resType = 0
+ }
+
+ msgs, err := req.Execute(syscall.NETLINK_XFRM, uint16(resType))
+ if err != nil {
+ return nil, err
+ }
+
+ if nlProto == nl.XFRM_MSG_DELPOLICY {
+ return nil, err
+ }
+
+ p, err := parseXfrmPolicy(msgs[0], FAMILY_ALL)
+ if err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+func parseXfrmPolicy(m []byte, family int) (*XfrmPolicy, error) {
+ msg := nl.DeserializeXfrmUserpolicyInfo(m)
+
+ // This is mainly for the policy dump
+ if family != FAMILY_ALL && family != int(msg.Sel.Family) {
+ return nil, familyError
+ }
+
+ var policy XfrmPolicy
+
+ policy.Dst = msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD)
+ policy.Src = msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS)
+ policy.Proto = Proto(msg.Sel.Proto)
+ policy.DstPort = int(nl.Swap16(msg.Sel.Dport))
+ policy.SrcPort = int(nl.Swap16(msg.Sel.Sport))
+ policy.Priority = int(msg.Priority)
+ policy.Index = int(msg.Index)
+ policy.Dir = Dir(msg.Dir)
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.XFRMA_TMPL:
+ max := len(attr.Value)
+ for i := 0; i < max; i += nl.SizeofXfrmUserTmpl {
+ var resTmpl XfrmPolicyTmpl
+ tmpl := nl.DeserializeXfrmUserTmpl(attr.Value[i : i+nl.SizeofXfrmUserTmpl])
+ resTmpl.Dst = tmpl.XfrmId.Daddr.ToIP()
+ resTmpl.Src = tmpl.Saddr.ToIP()
+ resTmpl.Proto = Proto(tmpl.XfrmId.Proto)
+ resTmpl.Mode = Mode(tmpl.Mode)
+ resTmpl.Spi = int(nl.Swap32(tmpl.XfrmId.Spi))
+ resTmpl.Reqid = int(tmpl.Reqid)
+ policy.Tmpls = append(policy.Tmpls, resTmpl)
+ }
+ case nl.XFRMA_MARK:
+ mark := nl.DeserializeXfrmMark(attr.Value[:])
+ policy.Mark = new(XfrmMark)
+ policy.Mark.Value = mark.Value
+ policy.Mark.Mask = mark.Mask
+ }
+ }
+
+ return &policy, nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state.go b/vendor/github.com/vishvananda/netlink/xfrm_state.go
new file mode 100644
index 000000000..368a9b986
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/xfrm_state.go
@@ -0,0 +1,108 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+)
+
+// XfrmStateAlgo represents the algorithm to use for the ipsec encryption.
+type XfrmStateAlgo struct {
+ Name string
+ Key []byte
+ TruncateLen int // Auth only
+ ICVLen int // AEAD only
+}
+
+func (a XfrmStateAlgo) String() string {
+ base := fmt.Sprintf("{Name: %s, Key: 0x%x", a.Name, a.Key)
+ if a.TruncateLen != 0 {
+ base = fmt.Sprintf("%s, Truncate length: %d", base, a.TruncateLen)
+ }
+ if a.ICVLen != 0 {
+ base = fmt.Sprintf("%s, ICV length: %d", base, a.ICVLen)
+ }
+ return fmt.Sprintf("%s}", base)
+}
+
+// EncapType is an enum representing the optional packet encapsulation.
+type EncapType uint8
+
+const (
+ XFRM_ENCAP_ESPINUDP_NONIKE EncapType = iota + 1
+ XFRM_ENCAP_ESPINUDP
+)
+
+func (e EncapType) String() string {
+ switch e {
+ case XFRM_ENCAP_ESPINUDP_NONIKE:
+ return "espinudp-non-ike"
+ case XFRM_ENCAP_ESPINUDP:
+ return "espinudp"
+ }
+ return "unknown"
+}
+
+// XfrmStateEncap represents the encapsulation to use for the ipsec encryption.
+type XfrmStateEncap struct {
+ Type EncapType
+ SrcPort int
+ DstPort int
+ OriginalAddress net.IP
+}
+
+func (e XfrmStateEncap) String() string {
+ return fmt.Sprintf("{Type: %s, Srcport: %d, DstPort: %d, OriginalAddress: %v}",
+ e.Type, e.SrcPort, e.DstPort, e.OriginalAddress)
+}
+
+// XfrmStateLimits represents the configured limits for the state.
+type XfrmStateLimits struct {
+ ByteSoft uint64
+ ByteHard uint64
+ PacketSoft uint64
+ PacketHard uint64
+ TimeSoft uint64
+ TimeHard uint64
+ TimeUseSoft uint64
+ TimeUseHard uint64
+}
+
+// XfrmState represents the state of an ipsec policy. It optionally
+// contains an XfrmStateAlgo for encryption and one for authentication.
+type XfrmState struct {
+ Dst net.IP
+ Src net.IP
+ Proto Proto
+ Mode Mode
+ Spi int
+ Reqid int
+ ReplayWindow int
+ Limits XfrmStateLimits
+ Mark *XfrmMark
+ Auth *XfrmStateAlgo
+ Crypt *XfrmStateAlgo
+ Aead *XfrmStateAlgo
+ Encap *XfrmStateEncap
+ ESN bool
+}
+
+func (sa XfrmState) String() string {
+ return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t",
+ sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN)
+}
+func (sa XfrmState) Print(stats bool) string {
+ if !stats {
+ return sa.String()
+ }
+
+ return fmt.Sprintf("%s, ByteSoft: %s, ByteHard: %s, PacketSoft: %s, PacketHard: %s, TimeSoft: %d, TimeHard: %d, TimeUseSoft: %d, TimeUseHard: %d",
+ sa.String(), printLimit(sa.Limits.ByteSoft), printLimit(sa.Limits.ByteHard), printLimit(sa.Limits.PacketSoft), printLimit(sa.Limits.PacketHard),
+ sa.Limits.TimeSoft, sa.Limits.TimeHard, sa.Limits.TimeUseSoft, sa.Limits.TimeUseHard)
+}
+
+func printLimit(lmt uint64) string {
+ if lmt == ^uint64(0) {
+ return "(INF)"
+ }
+ return fmt.Sprintf("%d", lmt)
+}
diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go
new file mode 100644
index 000000000..6a7bc0dec
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go
@@ -0,0 +1,444 @@
+package netlink
+
+import (
+ "fmt"
+ "syscall"
+ "unsafe"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+func writeStateAlgo(a *XfrmStateAlgo) []byte {
+ algo := nl.XfrmAlgo{
+ AlgKeyLen: uint32(len(a.Key) * 8),
+ AlgKey: a.Key,
+ }
+ end := len(a.Name)
+ if end > 64 {
+ end = 64
+ }
+ copy(algo.AlgName[:end], a.Name)
+ return algo.Serialize()
+}
+
+func writeStateAlgoAuth(a *XfrmStateAlgo) []byte {
+ algo := nl.XfrmAlgoAuth{
+ AlgKeyLen: uint32(len(a.Key) * 8),
+ AlgTruncLen: uint32(a.TruncateLen),
+ AlgKey: a.Key,
+ }
+ end := len(a.Name)
+ if end > 64 {
+ end = 64
+ }
+ copy(algo.AlgName[:end], a.Name)
+ return algo.Serialize()
+}
+
+func writeStateAlgoAead(a *XfrmStateAlgo) []byte {
+ algo := nl.XfrmAlgoAEAD{
+ AlgKeyLen: uint32(len(a.Key) * 8),
+ AlgICVLen: uint32(a.ICVLen),
+ AlgKey: a.Key,
+ }
+ end := len(a.Name)
+ if end > 64 {
+ end = 64
+ }
+ copy(algo.AlgName[:end], a.Name)
+ return algo.Serialize()
+}
+
+func writeMark(m *XfrmMark) []byte {
+ mark := &nl.XfrmMark{
+ Value: m.Value,
+ Mask: m.Mask,
+ }
+ if mark.Mask == 0 {
+ mark.Mask = ^uint32(0)
+ }
+ return mark.Serialize()
+}
+
+func writeReplayEsn(replayWindow int) []byte {
+ replayEsn := &nl.XfrmReplayStateEsn{
+ OSeq: 0,
+ Seq: 0,
+ OSeqHi: 0,
+ SeqHi: 0,
+ ReplayWindow: uint32(replayWindow),
+ }
+
+ // taken from iproute2/ip/xfrm_state.c:
+ replayEsn.BmpLen = uint32((replayWindow + (4 * 8) - 1) / (4 * 8))
+
+ return replayEsn.Serialize()
+}
+
+// XfrmStateAdd will add an xfrm state to the system.
+// Equivalent to: `ip xfrm state add $state`
+func XfrmStateAdd(state *XfrmState) error {
+ return pkgHandle.XfrmStateAdd(state)
+}
+
+// XfrmStateAdd will add an xfrm state to the system.
+// Equivalent to: `ip xfrm state add $state`
+func (h *Handle) XfrmStateAdd(state *XfrmState) error {
+ return h.xfrmStateAddOrUpdate(state, nl.XFRM_MSG_NEWSA)
+}
+
+// XfrmStateAllocSpi will allocate an xfrm state in the system.
+// Equivalent to: `ip xfrm state allocspi`
+func XfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) {
+ return pkgHandle.xfrmStateAllocSpi(state)
+}
+
+// XfrmStateUpdate will update an xfrm state to the system.
+// Equivalent to: `ip xfrm state update $state`
+func XfrmStateUpdate(state *XfrmState) error {
+ return pkgHandle.XfrmStateUpdate(state)
+}
+
+// XfrmStateUpdate will update an xfrm state to the system.
+// Equivalent to: `ip xfrm state update $state`
+func (h *Handle) XfrmStateUpdate(state *XfrmState) error {
+ return h.xfrmStateAddOrUpdate(state, nl.XFRM_MSG_UPDSA)
+}
+
+func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error {
+
+ // A state with spi 0 can't be deleted so don't allow it to be set
+ if state.Spi == 0 {
+ return fmt.Errorf("Spi must be set when adding xfrm state.")
+ }
+ req := h.newNetlinkRequest(nlProto, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+
+ msg := xfrmUsersaInfoFromXfrmState(state)
+
+ if state.ESN {
+ if state.ReplayWindow == 0 {
+ return fmt.Errorf("ESN flag set without ReplayWindow")
+ }
+ msg.Flags |= nl.XFRM_STATE_ESN
+ msg.ReplayWindow = 0
+ }
+
+ limitsToLft(state.Limits, &msg.Lft)
+ req.AddData(msg)
+
+ if state.Auth != nil {
+ out := nl.NewRtAttr(nl.XFRMA_ALG_AUTH_TRUNC, writeStateAlgoAuth(state.Auth))
+ req.AddData(out)
+ }
+ if state.Crypt != nil {
+ out := nl.NewRtAttr(nl.XFRMA_ALG_CRYPT, writeStateAlgo(state.Crypt))
+ req.AddData(out)
+ }
+ if state.Aead != nil {
+ out := nl.NewRtAttr(nl.XFRMA_ALG_AEAD, writeStateAlgoAead(state.Aead))
+ req.AddData(out)
+ }
+ if state.Encap != nil {
+ encapData := make([]byte, nl.SizeofXfrmEncapTmpl)
+ encap := nl.DeserializeXfrmEncapTmpl(encapData)
+ encap.EncapType = uint16(state.Encap.Type)
+ encap.EncapSport = nl.Swap16(uint16(state.Encap.SrcPort))
+ encap.EncapDport = nl.Swap16(uint16(state.Encap.DstPort))
+ encap.EncapOa.FromIP(state.Encap.OriginalAddress)
+ out := nl.NewRtAttr(nl.XFRMA_ENCAP, encapData)
+ req.AddData(out)
+ }
+ if state.Mark != nil {
+ out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(state.Mark))
+ req.AddData(out)
+ }
+ if state.ESN {
+ out := nl.NewRtAttr(nl.XFRMA_REPLAY_ESN_VAL, writeReplayEsn(state.ReplayWindow))
+ req.AddData(out)
+ }
+
+ _, err := req.Execute(syscall.NETLINK_XFRM, 0)
+ return err
+}
+
+func (h *Handle) xfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) {
+ req := h.newNetlinkRequest(nl.XFRM_MSG_ALLOCSPI,
+ syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+
+ msg := &nl.XfrmUserSpiInfo{}
+ msg.XfrmUsersaInfo = *(xfrmUsersaInfoFromXfrmState(state))
+ // 1-255 is reserved by IANA for future use
+ msg.Min = 0x100
+ msg.Max = 0xffffffff
+ req.AddData(msg)
+
+ if state.Mark != nil {
+ out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(state.Mark))
+ req.AddData(out)
+ }
+
+ msgs, err := req.Execute(syscall.NETLINK_XFRM, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ s, err := parseXfrmState(msgs[0], FAMILY_ALL)
+ if err != nil {
+ return nil, err
+ }
+
+ return s, err
+}
+
+// XfrmStateDel will delete an xfrm state from the system. Note that
+// the Algos are ignored when matching the state to delete.
+// Equivalent to: `ip xfrm state del $state`
+func XfrmStateDel(state *XfrmState) error {
+ return pkgHandle.XfrmStateDel(state)
+}
+
+// XfrmStateDel will delete an xfrm state from the system. Note that
+// the Algos are ignored when matching the state to delete.
+// Equivalent to: `ip xfrm state del $state`
+func (h *Handle) XfrmStateDel(state *XfrmState) error {
+ _, err := h.xfrmStateGetOrDelete(state, nl.XFRM_MSG_DELSA)
+ return err
+}
+
+// XfrmStateList gets a list of xfrm states in the system.
+// Equivalent to: `ip [-4|-6] xfrm state show`.
+// The list can be filtered by ip family.
+func XfrmStateList(family int) ([]XfrmState, error) {
+ return pkgHandle.XfrmStateList(family)
+}
+
+// XfrmStateList gets a list of xfrm states in the system.
+// Equivalent to: `ip xfrm state show`.
+// The list can be filtered by ip family.
+func (h *Handle) XfrmStateList(family int) ([]XfrmState, error) {
+ req := h.newNetlinkRequest(nl.XFRM_MSG_GETSA, syscall.NLM_F_DUMP)
+
+ msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWSA)
+ if err != nil {
+ return nil, err
+ }
+
+ var res []XfrmState
+ for _, m := range msgs {
+ if state, err := parseXfrmState(m, family); err == nil {
+ res = append(res, *state)
+ } else if err == familyError {
+ continue
+ } else {
+ return nil, err
+ }
+ }
+ return res, nil
+}
+
+// XfrmStateGet gets the xfrm state described by the ID, if found.
+// Equivalent to: `ip xfrm state get ID [ mark MARK [ mask MASK ] ]`.
+// Only the fields which constitue the SA ID must be filled in:
+// ID := [ src ADDR ] [ dst ADDR ] [ proto XFRM-PROTO ] [ spi SPI ]
+// mark is optional
+func XfrmStateGet(state *XfrmState) (*XfrmState, error) {
+ return pkgHandle.XfrmStateGet(state)
+}
+
+// XfrmStateGet gets the xfrm state described by the ID, if found.
+// Equivalent to: `ip xfrm state get ID [ mark MARK [ mask MASK ] ]`.
+// Only the fields which constitue the SA ID must be filled in:
+// ID := [ src ADDR ] [ dst ADDR ] [ proto XFRM-PROTO ] [ spi SPI ]
+// mark is optional
+func (h *Handle) XfrmStateGet(state *XfrmState) (*XfrmState, error) {
+ return h.xfrmStateGetOrDelete(state, nl.XFRM_MSG_GETSA)
+}
+
+func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState, error) {
+ req := h.newNetlinkRequest(nlProto, syscall.NLM_F_ACK)
+
+ msg := &nl.XfrmUsersaId{}
+ msg.Family = uint16(nl.GetIPFamily(state.Dst))
+ msg.Daddr.FromIP(state.Dst)
+ msg.Proto = uint8(state.Proto)
+ msg.Spi = nl.Swap32(uint32(state.Spi))
+ req.AddData(msg)
+
+ if state.Mark != nil {
+ out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(state.Mark))
+ req.AddData(out)
+ }
+ if state.Src != nil {
+ out := nl.NewRtAttr(nl.XFRMA_SRCADDR, state.Src.To16())
+ req.AddData(out)
+ }
+
+ resType := nl.XFRM_MSG_NEWSA
+ if nlProto == nl.XFRM_MSG_DELSA {
+ resType = 0
+ }
+
+ msgs, err := req.Execute(syscall.NETLINK_XFRM, uint16(resType))
+ if err != nil {
+ return nil, err
+ }
+
+ if nlProto == nl.XFRM_MSG_DELSA {
+ return nil, nil
+ }
+
+ s, err := parseXfrmState(msgs[0], FAMILY_ALL)
+ if err != nil {
+ return nil, err
+ }
+
+ return s, nil
+}
+
+var familyError = fmt.Errorf("family error")
+
+func xfrmStateFromXfrmUsersaInfo(msg *nl.XfrmUsersaInfo) *XfrmState {
+ var state XfrmState
+
+ state.Dst = msg.Id.Daddr.ToIP()
+ state.Src = msg.Saddr.ToIP()
+ state.Proto = Proto(msg.Id.Proto)
+ state.Mode = Mode(msg.Mode)
+ state.Spi = int(nl.Swap32(msg.Id.Spi))
+ state.Reqid = int(msg.Reqid)
+ state.ReplayWindow = int(msg.ReplayWindow)
+ lftToLimits(&msg.Lft, &state.Limits)
+
+ return &state
+}
+
+func parseXfrmState(m []byte, family int) (*XfrmState, error) {
+ msg := nl.DeserializeXfrmUsersaInfo(m)
+
+ // This is mainly for the state dump
+ if family != FAMILY_ALL && family != int(msg.Family) {
+ return nil, familyError
+ }
+
+ state := xfrmStateFromXfrmUsersaInfo(msg)
+
+ attrs, err := nl.ParseRouteAttr(m[nl.SizeofXfrmUsersaInfo:])
+ if err != nil {
+ return nil, err
+ }
+
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.XFRMA_ALG_AUTH, nl.XFRMA_ALG_CRYPT:
+ var resAlgo *XfrmStateAlgo
+ if attr.Attr.Type == nl.XFRMA_ALG_AUTH {
+ if state.Auth == nil {
+ state.Auth = new(XfrmStateAlgo)
+ }
+ resAlgo = state.Auth
+ } else {
+ state.Crypt = new(XfrmStateAlgo)
+ resAlgo = state.Crypt
+ }
+ algo := nl.DeserializeXfrmAlgo(attr.Value[:])
+ (*resAlgo).Name = nl.BytesToString(algo.AlgName[:])
+ (*resAlgo).Key = algo.AlgKey
+ case nl.XFRMA_ALG_AUTH_TRUNC:
+ if state.Auth == nil {
+ state.Auth = new(XfrmStateAlgo)
+ }
+ algo := nl.DeserializeXfrmAlgoAuth(attr.Value[:])
+ state.Auth.Name = nl.BytesToString(algo.AlgName[:])
+ state.Auth.Key = algo.AlgKey
+ state.Auth.TruncateLen = int(algo.AlgTruncLen)
+ case nl.XFRMA_ALG_AEAD:
+ state.Aead = new(XfrmStateAlgo)
+ algo := nl.DeserializeXfrmAlgoAEAD(attr.Value[:])
+ state.Aead.Name = nl.BytesToString(algo.AlgName[:])
+ state.Aead.Key = algo.AlgKey
+ state.Aead.ICVLen = int(algo.AlgICVLen)
+ case nl.XFRMA_ENCAP:
+ encap := nl.DeserializeXfrmEncapTmpl(attr.Value[:])
+ state.Encap = new(XfrmStateEncap)
+ state.Encap.Type = EncapType(encap.EncapType)
+ state.Encap.SrcPort = int(nl.Swap16(encap.EncapSport))
+ state.Encap.DstPort = int(nl.Swap16(encap.EncapDport))
+ state.Encap.OriginalAddress = encap.EncapOa.ToIP()
+ case nl.XFRMA_MARK:
+ mark := nl.DeserializeXfrmMark(attr.Value[:])
+ state.Mark = new(XfrmMark)
+ state.Mark.Value = mark.Value
+ state.Mark.Mask = mark.Mask
+ }
+ }
+
+ return state, nil
+}
+
+// XfrmStateFlush will flush the xfrm state on the system.
+// proto = 0 means any transformation protocols
+// Equivalent to: `ip xfrm state flush [ proto XFRM-PROTO ]`
+func XfrmStateFlush(proto Proto) error {
+ return pkgHandle.XfrmStateFlush(proto)
+}
+
+// XfrmStateFlush will flush the xfrm state on the system.
+// proto = 0 means any transformation protocols
+// Equivalent to: `ip xfrm state flush [ proto XFRM-PROTO ]`
+func (h *Handle) XfrmStateFlush(proto Proto) error {
+ req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHSA, syscall.NLM_F_ACK)
+
+ req.AddData(&nl.XfrmUsersaFlush{Proto: uint8(proto)})
+
+ _, err := req.Execute(syscall.NETLINK_XFRM, 0)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func limitsToLft(lmts XfrmStateLimits, lft *nl.XfrmLifetimeCfg) {
+ if lmts.ByteSoft != 0 {
+ lft.SoftByteLimit = lmts.ByteSoft
+ } else {
+ lft.SoftByteLimit = nl.XFRM_INF
+ }
+ if lmts.ByteHard != 0 {
+ lft.HardByteLimit = lmts.ByteHard
+ } else {
+ lft.HardByteLimit = nl.XFRM_INF
+ }
+ if lmts.PacketSoft != 0 {
+ lft.SoftPacketLimit = lmts.PacketSoft
+ } else {
+ lft.SoftPacketLimit = nl.XFRM_INF
+ }
+ if lmts.PacketHard != 0 {
+ lft.HardPacketLimit = lmts.PacketHard
+ } else {
+ lft.HardPacketLimit = nl.XFRM_INF
+ }
+ lft.SoftAddExpiresSeconds = lmts.TimeSoft
+ lft.HardAddExpiresSeconds = lmts.TimeHard
+ lft.SoftUseExpiresSeconds = lmts.TimeUseSoft
+ lft.HardUseExpiresSeconds = lmts.TimeUseHard
+}
+
+func lftToLimits(lft *nl.XfrmLifetimeCfg, lmts *XfrmStateLimits) {
+ *lmts = *(*XfrmStateLimits)(unsafe.Pointer(lft))
+}
+
+func xfrmUsersaInfoFromXfrmState(state *XfrmState) *nl.XfrmUsersaInfo {
+ msg := &nl.XfrmUsersaInfo{}
+ msg.Family = uint16(nl.GetIPFamily(state.Dst))
+ msg.Id.Daddr.FromIP(state.Dst)
+ msg.Saddr.FromIP(state.Src)
+ msg.Id.Proto = uint8(state.Proto)
+ msg.Mode = uint8(state.Mode)
+ msg.Id.Spi = nl.Swap32(uint32(state.Spi))
+ msg.Reqid = uint32(state.Reqid)
+ msg.ReplayWindow = uint8(state.ReplayWindow)
+
+ return msg
+}
diff --git a/vendor/github.com/vishvananda/netns/LICENSE b/vendor/github.com/vishvananda/netns/LICENSE
new file mode 100644
index 000000000..9f64db858
--- /dev/null
+++ b/vendor/github.com/vishvananda/netns/LICENSE
@@ -0,0 +1,192 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Vishvananda Ishaya.
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/vishvananda/netns/README.md b/vendor/github.com/vishvananda/netns/README.md
new file mode 100644
index 000000000..66a5f7258
--- /dev/null
+++ b/vendor/github.com/vishvananda/netns/README.md
@@ -0,0 +1,51 @@
+# netns - network namespaces in go #
+
+The netns package provides an ultra-simple interface for handling
+network namespaces in go. Changing namespaces requires elevated
+privileges, so in most cases this code needs to be run as root.
+
+## Local Build and Test ##
+
+You can use go get command:
+
+ go get github.com/vishvananda/netns
+
+Testing (requires root):
+
+ sudo -E go test github.com/vishvananda/netns
+
+## Example ##
+
+```go
+package main
+
+import (
+ "fmt"
+ "net"
+ "runtime"
+ "github.com/vishvananda/netns"
+)
+
+func main() {
+ // Lock the OS Thread so we don't accidentally switch namespaces
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ // Save the current network namespace
+ origns, _ := netns.Get()
+ defer origns.Close()
+
+ // Create a new network namespace
+ newns, _ := netns.New()
+ netns.Set(newns)
+ defer newns.Close()
+
+ // Do something with the network namespace
+ ifaces, _ := net.Interfaces()
+ fmt.Printf("Interfaces: %v\n", ifaces)
+
+ // Switch back to the original namespace
+ netns.Set(origns)
+}
+
+```
diff --git a/vendor/github.com/vishvananda/netns/netns.go b/vendor/github.com/vishvananda/netns/netns.go
new file mode 100644
index 000000000..dd2f21570
--- /dev/null
+++ b/vendor/github.com/vishvananda/netns/netns.go
@@ -0,0 +1,80 @@
+// Package netns allows ultra-simple network namespace handling. NsHandles
+// can be retrieved and set. Note that the current namespace is thread
+// local so actions that set and reset namespaces should use LockOSThread
+// to make sure the namespace doesn't change due to a goroutine switch.
+// It is best to close NsHandles when you are done with them. This can be
+// accomplished via a `defer ns.Close()` on the handle. Changing namespaces
+// requires elevated privileges, so in most cases this code needs to be run
+// as root.
+package netns
+
+import (
+ "fmt"
+ "syscall"
+)
+
+// NsHandle is a handle to a network namespace. It can be cast directly
+// to an int and used as a file descriptor.
+type NsHandle int
+
+// Equal determines if two network handles refer to the same network
+// namespace. This is done by comparing the device and inode that the
+// file descriptors point to.
+func (ns NsHandle) Equal(other NsHandle) bool {
+ if ns == other {
+ return true
+ }
+ var s1, s2 syscall.Stat_t
+ if err := syscall.Fstat(int(ns), &s1); err != nil {
+ return false
+ }
+ if err := syscall.Fstat(int(other), &s2); err != nil {
+ return false
+ }
+ return (s1.Dev == s2.Dev) && (s1.Ino == s2.Ino)
+}
+
+// String shows the file descriptor number and its dev and inode.
+func (ns NsHandle) String() string {
+ var s syscall.Stat_t
+ if ns == -1 {
+ return "NS(None)"
+ }
+ if err := syscall.Fstat(int(ns), &s); err != nil {
+ return fmt.Sprintf("NS(%d: unknown)", ns)
+ }
+ return fmt.Sprintf("NS(%d: %d, %d)", ns, s.Dev, s.Ino)
+}
+
+// UniqueId returns a string which uniquely identifies the namespace
+// associated with the network handle.
+func (ns NsHandle) UniqueId() string {
+ var s syscall.Stat_t
+ if ns == -1 {
+ return "NS(none)"
+ }
+ if err := syscall.Fstat(int(ns), &s); err != nil {
+ return "NS(unknown)"
+ }
+ return fmt.Sprintf("NS(%d:%d)", s.Dev, s.Ino)
+}
+
+// IsOpen returns true if Close() has not been called.
+func (ns NsHandle) IsOpen() bool {
+ return ns != -1
+}
+
+// Close closes the NsHandle and resets its file descriptor to -1.
+// It is not safe to use an NsHandle after Close() is called.
+func (ns *NsHandle) Close() error {
+ if err := syscall.Close(int(*ns)); err != nil {
+ return err
+ }
+ (*ns) = -1
+ return nil
+}
+
+// None gets an empty (closed) NsHandle.
+func None() NsHandle {
+ return NsHandle(-1)
+}
diff --git a/vendor/github.com/vishvananda/netns/netns_linux.go b/vendor/github.com/vishvananda/netns/netns_linux.go
new file mode 100644
index 000000000..a267c7105
--- /dev/null
+++ b/vendor/github.com/vishvananda/netns/netns_linux.go
@@ -0,0 +1,224 @@
+// +build linux
+
+package netns
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "syscall"
+)
+
+// SYS_SETNS syscall allows changing the namespace of the current process.
+var SYS_SETNS = map[string]uintptr{
+ "386": 346,
+ "amd64": 308,
+ "arm64": 268,
+ "arm": 375,
+ "mips": 4344,
+ "mipsle": 4344,
+ "ppc64": 350,
+ "ppc64le": 350,
+ "s390x": 339,
+}[runtime.GOARCH]
+
+// Deprecated: use syscall pkg instead (go >= 1.5 needed).
+const (
+ CLONE_NEWUTS = 0x04000000 /* New utsname group? */
+ CLONE_NEWIPC = 0x08000000 /* New ipcs */
+ CLONE_NEWUSER = 0x10000000 /* New user namespace */
+ CLONE_NEWPID = 0x20000000 /* New pid namespace */
+ CLONE_NEWNET = 0x40000000 /* New network namespace */
+ CLONE_IO = 0x80000000 /* Get io context */
+)
+
+// Setns sets namespace using syscall. Note that this should be a method
+// in syscall but it has not been added.
+func Setns(ns NsHandle, nstype int) (err error) {
+ _, _, e1 := syscall.Syscall(SYS_SETNS, uintptr(ns), uintptr(nstype), 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// Set sets the current network namespace to the namespace represented
+// by NsHandle.
+func Set(ns NsHandle) (err error) {
+ return Setns(ns, CLONE_NEWNET)
+}
+
+// New creates a new network namespace and returns a handle to it.
+func New() (ns NsHandle, err error) {
+ if err := syscall.Unshare(CLONE_NEWNET); err != nil {
+ return -1, err
+ }
+ return Get()
+}
+
+// Get gets a handle to the current threads network namespace.
+func Get() (NsHandle, error) {
+ return GetFromThread(os.Getpid(), syscall.Gettid())
+}
+
+// GetFromPath gets a handle to a network namespace
+// identified by the path
+func GetFromPath(path string) (NsHandle, error) {
+ fd, err := syscall.Open(path, syscall.O_RDONLY, 0)
+ if err != nil {
+ return -1, err
+ }
+ return NsHandle(fd), nil
+}
+
+// GetFromName gets a handle to a named network namespace such as one
+// created by `ip netns add`.
+func GetFromName(name string) (NsHandle, error) {
+ return GetFromPath(fmt.Sprintf("/var/run/netns/%s", name))
+}
+
+// GetFromPid gets a handle to the network namespace of a given pid.
+func GetFromPid(pid int) (NsHandle, error) {
+ return GetFromPath(fmt.Sprintf("/proc/%d/ns/net", pid))
+}
+
+// GetFromThread gets a handle to the network namespace of a given pid and tid.
+func GetFromThread(pid, tid int) (NsHandle, error) {
+ return GetFromPath(fmt.Sprintf("/proc/%d/task/%d/ns/net", pid, tid))
+}
+
+// GetFromDocker gets a handle to the network namespace of a docker container.
+// Id is prefixed matched against the running docker containers, so a short
+// identifier can be used as long as it isn't ambiguous.
+func GetFromDocker(id string) (NsHandle, error) {
+ pid, err := getPidForContainer(id)
+ if err != nil {
+ return -1, err
+ }
+ return GetFromPid(pid)
+}
+
+// borrowed from docker/utils/utils.go
+func findCgroupMountpoint(cgroupType string) (string, error) {
+ output, err := ioutil.ReadFile("/proc/mounts")
+ if err != nil {
+ return "", err
+ }
+
+ // /proc/mounts has 6 fields per line, one mount per line, e.g.
+ // cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0
+ for _, line := range strings.Split(string(output), "\n") {
+ parts := strings.Split(line, " ")
+ if len(parts) == 6 && parts[2] == "cgroup" {
+ for _, opt := range strings.Split(parts[3], ",") {
+ if opt == cgroupType {
+ return parts[1], nil
+ }
+ }
+ }
+ }
+
+ return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType)
+}
+
+// Returns the relative path to the cgroup docker is running in.
+// borrowed from docker/utils/utils.go
+// modified to get the docker pid instead of using /proc/self
+func getThisCgroup(cgroupType string) (string, error) {
+ dockerpid, err := ioutil.ReadFile("/var/run/docker.pid")
+ if err != nil {
+ return "", err
+ }
+ result := strings.Split(string(dockerpid), "\n")
+ if len(result) == 0 || len(result[0]) == 0 {
+ return "", fmt.Errorf("docker pid not found in /var/run/docker.pid")
+ }
+ pid, err := strconv.Atoi(result[0])
+ if err != nil {
+ return "", err
+ }
+ output, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid))
+ if err != nil {
+ return "", err
+ }
+ for _, line := range strings.Split(string(output), "\n") {
+ parts := strings.Split(line, ":")
+ // any type used by docker should work
+ if parts[1] == cgroupType {
+ return parts[2], nil
+ }
+ }
+ return "", fmt.Errorf("cgroup '%s' not found in /proc/%d/cgroup", cgroupType, pid)
+}
+
+// Returns the first pid in a container.
+// borrowed from docker/utils/utils.go
+// modified to only return the first pid
+// modified to glob with id
+// modified to search for newer docker containers
+func getPidForContainer(id string) (int, error) {
+ pid := 0
+
+ // memory is chosen randomly, any cgroup used by docker works
+ cgroupType := "memory"
+
+ cgroupRoot, err := findCgroupMountpoint(cgroupType)
+ if err != nil {
+ return pid, err
+ }
+
+ cgroupThis, err := getThisCgroup(cgroupType)
+ if err != nil {
+ return pid, err
+ }
+
+ id += "*"
+
+ attempts := []string{
+ filepath.Join(cgroupRoot, cgroupThis, id, "tasks"),
+ // With more recent lxc versions use, cgroup will be in lxc/
+ filepath.Join(cgroupRoot, cgroupThis, "lxc", id, "tasks"),
+ // With more recent docker, cgroup will be in docker/
+ filepath.Join(cgroupRoot, cgroupThis, "docker", id, "tasks"),
+ // Even more recent docker versions under systemd use docker-<id>.scope/
+ filepath.Join(cgroupRoot, "system.slice", "docker-"+id+".scope", "tasks"),
+ // Even more recent docker versions under cgroup/systemd/docker/<id>/
+ filepath.Join(cgroupRoot, "..", "systemd", "docker", id, "tasks"),
+ }
+
+ var filename string
+ for _, attempt := range attempts {
+ filenames, _ := filepath.Glob(attempt)
+ if len(filenames) > 1 {
+ return pid, fmt.Errorf("Ambiguous id supplied: %v", filenames)
+ } else if len(filenames) == 1 {
+ filename = filenames[0]
+ break
+ }
+ }
+
+ if filename == "" {
+ return pid, fmt.Errorf("Unable to find container: %v", id[:len(id)-1])
+ }
+
+ output, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return pid, err
+ }
+
+ result := strings.Split(string(output), "\n")
+ if len(result) == 0 || len(result[0]) == 0 {
+ return pid, fmt.Errorf("No pid found for container")
+ }
+
+ pid, err = strconv.Atoi(result[0])
+ if err != nil {
+ return pid, fmt.Errorf("Invalid pid '%s': %s", result[0], err)
+ }
+
+ return pid, nil
+}
diff --git a/vendor/github.com/vishvananda/netns/netns_unspecified.go b/vendor/github.com/vishvananda/netns/netns_unspecified.go
new file mode 100644
index 000000000..d06af62b6
--- /dev/null
+++ b/vendor/github.com/vishvananda/netns/netns_unspecified.go
@@ -0,0 +1,43 @@
+// +build !linux
+
+package netns
+
+import (
+ "errors"
+)
+
+var (
+ ErrNotImplemented = errors.New("not implemented")
+)
+
+func Set(ns NsHandle) (err error) {
+ return ErrNotImplemented
+}
+
+func New() (ns NsHandle, err error) {
+ return -1, ErrNotImplemented
+}
+
+func Get() (NsHandle, error) {
+ return -1, ErrNotImplemented
+}
+
+func GetFromPath(path string) (NsHandle, error) {
+ return -1, ErrNotImplemented
+}
+
+func GetFromName(name string) (NsHandle, error) {
+ return -1, ErrNotImplemented
+}
+
+func GetFromPid(pid int) (NsHandle, error) {
+ return -1, ErrNotImplemented
+}
+
+func GetFromThread(pid, tid int) (NsHandle, error) {
+ return -1, ErrNotImplemented
+}
+
+func GetFromDocker(id string) (NsHandle, error) {
+ return -1, ErrNotImplemented
+}